Merge Python3 branch

This change is going to upgrade ranger to use Python 3.x

Change-Id: I563661e071c56c2df7e0e1a6e365aecd4158b6cd
This commit is contained in:
hosingh000 2019-10-28 15:53:01 -05:00
parent 9700b1ad53
commit 87d644675e
210 changed files with 2004 additions and 3489 deletions

View File

@ -2,8 +2,17 @@
check:
jobs:
- openstack-tox-pep8
- openstack-tox-py27
- openstack-tox-py36
- ranger-tox-bandit
gate:
jobs:
- openstack-tox-pep8
- openstack-tox-py27
- openstack-tox-py36
- ranger-tox-bandit
- job:
name: ranger-tox-bandit
parent: openstack-tox
timeout: 600
pre-run: playbooks/run_unit_test_job.yaml
vars:
tox_envlist: bandit-baseline

2
bindep.txt Normal file
View File

@ -0,0 +1,2 @@
python3-dev [platform:dpkg test]
default-libmysqlclient-dev [platform:dpkg test]

View File

@ -37,8 +37,8 @@ source_suffix = '.rst'
master_doc = 'index'
# General information about the project.
project = u'ranger'
copyright = u'2017, OpenStack Developers'
project = 'ranger'
copyright = '2017, OpenStack Developers'
# openstackdocstheme options
repository_name = 'openstack/ranger'
@ -73,8 +73,8 @@ htmlhelp_basename = '%sdoc' % project
latex_documents = [
('index',
'%s.tex' % project,
u'%s Documentation' % project,
u'OpenStack Developers', 'manual'),
'%s Documentation' % project,
'OpenStack Developers', 'manual'),
]
# Example configuration for intersphinx: refer to the Python standard library.

View File

@ -35,7 +35,7 @@ password = 'password'
# auth_enabled = False
[database]
connection = 'mysql://user:pass@localhost:3306/orm'
connection = 'mysql+pymysql://user:pass@localhost:3306/orm'
max_retries = 3
user_role = 'admin'

View File

@ -1,4 +1,4 @@
FROM ubuntu:16.04
FROM ubuntu:18.04
ENV DEBIAN_FRONTEND noninteractive
ENV container docker
@ -11,19 +11,19 @@ apt -y install git \
netcat \
netbase \
openssh-server \
python-minimal \
python-setuptools \
python-pip \
python-dev \
python-dateutil \
python3-minimal \
python3-setuptools \
python3-pip \
default-libmysqlclient-dev \
python3-dev \
python3-dateutil \
ca-certificates \
openstack-pkg-tools \
apache2 \
gcc \
g++ \
libffi-dev \
libssl-dev --no-install-recommends \
libmysqlclient-dev \
libssl-dev \
&& apt-get clean \
&& rm -rf \
/var/lib/apt/lists/* \
@ -33,15 +33,15 @@ libmysqlclient-dev \
/usr/share/doc \
/usr/share/doc-base
RUN pip install wheel
RUN pip3 install wheel
COPY . /tmp/ranger
WORKDIR /tmp/ranger
RUN pip install --default-timeout=100 -r requirements.txt
RUN pip3 install --default-timeout=100 -r requirements.txt
RUN python setup.py install
RUN python3 setup.py install
ARG user=ranger
@ -51,7 +51,7 @@ RUN useradd -u 1000 -ms /bin/false ${user}
# Change permissions
RUN mkdir -p /etc/ranger \
&& mkdir /var/log/ranger \
&& mkdir /home/${user}/git_repo \
&& mkdir /home/${user}/git_repo \
&& chown -R ${user}: /var/log/ranger \
&& mv /tmp/ranger /home/${user}/ranger \
&& chown -R ${user}: /home/${user} \

View File

@ -1,4 +1,4 @@
FROM ubuntu:16.04
FROM ubuntu:18.04
#ubuntu environment variables
ENV DEBIAN_FRONTEND noninteractive
@ -16,11 +16,11 @@ apt -y install git \
netcat \
netbase \
openssh-server \
python-minimal \
python-setuptools \
python-pip \
python-dev \
python-dateutil \
python3-minimal \
python3-setuptools \
python3-pip \
python3-dev \
python3-dateutil \
ca-certificates \
openstack-pkg-tools \
vim \
@ -39,15 +39,15 @@ libmysqlclient-dev \
/usr/share/doc \
/usr/share/doc-base
RUN pip install wheel
RUN pip3 install wheel
COPY . /tmp/ranger
WORKDIR /tmp/ranger
RUN pip install --default-timeout=100 -r requirements.txt
RUN pip3 install --default-timeout=100 -r requirements.txt
RUN python setup.py install
RUN python3 setup.py install
WORKDIR /tmp
# Create user

View File

@ -16,7 +16,7 @@ import os
from os.path import join
from oslo_config import cfg
from ConfigParser import ConfigParser
from configparser import ConfigParser
CONF = cfg.CONF
@ -213,7 +213,7 @@ else:
]
CONF.register_opts(FlavorGroup, flavor_group)
for key, value in flavor_dict.items():
for key, value in list(flavor_dict.items()):
if key.startswith("es_"):
autogen_es.add(value.split(': ')[0])

View File

@ -1,4 +1,3 @@
#!/usr/bin/env python
# Copyright (c) 2018 OpenStack Foundation
# All Rights Reserved.
#

View File

@ -4,14 +4,15 @@ import json
import logging
import threading
import time
import urllib2
import urllib.error
import urllib.parse
import urllib.request
from orm.common.client.audit.audit_client.api.exceptions.audit_exception import AuditException
from orm.common.client.audit.audit_client.api.model.get_audits_result import AuditsResult
from orm.common.client.audit.audit_client.api.model.transaction import Transaction
logger = logging.getLogger(__name__)
config = {
'AUDIT_SERVER_URL': None,
'NUM_OF_SEND_RETRIES': None,
@ -169,26 +170,24 @@ def _post_data(data):
# Validate that the configuration was initialized
_validate()
# Send the data
req = urllib2.Request(config['AUDIT_SERVER_URL']) # nosec
req = urllib.request.Request(config['AUDIT_SERVER_URL']) # nosec
req.add_header('Content-Type', 'application/json')
# Retry to send the data to the audit server
success = False
for retry_number in range(config['NUM_OF_SEND_RETRIES']):
try:
urllib2.urlopen(req, json.dumps(data)) # nosec
urllib.request.urlopen(req, json.dumps(data).encode('utf-8')) # nosec
success = True
break
except Exception as error:
time.sleep(config['TIME_WAIT_BETWEEN_RETRIES'])
if not success:
error_msg = "ERROR|CON{}AUDIT001|Fail to send data to [{}]. Tried " \
"a couple of times with no success. Last attempt " \
"error: [{}]".format(config['SERVICE_NAME'],
config['AUDIT_SERVER_URL'],
error.message)
logger.error(error_msg)
raise AuditException(error_msg)
error_msg = "ERROR|CON{}AUDIT001|Fail to send data to [{}]. Tried " \
"a couple of times with no success. Last attempt " \
"error: [{}]".format(config['SERVICE_NAME'],
config['AUDIT_SERVER_URL'],
str(error))
logger.error(error_msg)
raise AuditException(error_msg)
def _get_data(query):
@ -197,23 +196,21 @@ def _get_data(query):
# Send the data
audit_server_url_with_query = "{}?{}".format(config['AUDIT_SERVER_URL'],
query)
req = urllib2.Request(audit_server_url_with_query) # nosec
req = urllib.request.Request(audit_server_url_with_query) # nosec
# Retry to get the data from the audit server
success = False
response = None
error = None
for retry_number in range(config['NUM_OF_SEND_RETRIES']):
try:
response = urllib2.urlopen(req) # nosec
response = urllib.request.urlopen(req) # nosec
success = True
break
except Exception as error:
time.sleep(config['TIME_WAIT_BETWEEN_RETRIES'])
if not success:
error_msg = "Fail to get data from [{}]. Tried a couple of times " \
"with no success. Last attempt error: [{}]".\
format(audit_server_url_with_query, error.message)
logger.error(error_msg)
raise AuditException(error_msg)
else:
error_msg = "Fail to get data from [{}]. Tried a couple of times " \
"with no success. Last attempt error: [{}]".\
format(audit_server_url_with_query, str(error))
logger.error(error_msg)
raise AuditException(error_msg)
return response

View File

@ -7,3 +7,4 @@ class AuditException(Exception):
def __init__(self, error_msg):
"""init method."""
Exception.__init__(self, error_msg)
self.message = error_msg

View File

@ -1,4 +0,0 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.

View File

@ -1,9 +0,0 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
# Hacking already pins down pep8, pyflakes and flake8
hacking<0.11,>=0.10.0
testrepository>=0.0.18
mock<1.1.0,>=1.0
coverage>=3.6

View File

@ -1,22 +0,0 @@
[tox]
envlist = py27, cover, pep8
[testenv]
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
install_command = pip install -U {opts} {packages}
commands = python setup.py testr
[testenv:cover]
#omitting rds/api/app.py and rds/examples/api/functional_test.py
#since they have no need for unit test
commands =
python setup.py testr --slowest --coverage --omit=audit_client/examples/*
coverage report --omit=audit_client/examples/*
[testenv:pep8]
#cannot handle and 'H102 Apache 2.0 license header not found' and
#'H202 assertRaises Exception too broad'
#since it requires business code changes
commands = flake8

View File

@ -1,219 +0,0 @@
"""keystone_utils token validator unittests."""
import mock
import unittest
from keystone_utils import tokens
class MyResponse(object):
def __init__(self, status, json_result):
self.status_code = status
self._json_result = json_result
def json(self):
return self._json_result
class MyKeystone(object):
def validate(self, a):
raise tokens.v3_client.exceptions.NotFound('test')
def find(self, **kwargs):
raise tokens.v3_client.exceptions.NotFound('test')
class MyClient(object):
def __init__(self, set_tokens=True):
if set_tokens:
self.tokens = MyKeystone()
else:
self.tokens = mock.MagicMock()
self.roles = MyKeystone()
class TokensTest(unittest.TestCase):
def setUp(self):
tokens._KEYSTONES = {}
@mock.patch.object(tokens.requests, 'get', return_value=MyResponse(
tokens.OK_CODE, {'regions': [{'endpoints': [{'publicURL': 'test',
'type': 'identity'}]}]}))
def test_find_keystone_ep_sanity(self, mock_get):
result = tokens._find_keystone_ep('a', 'b')
self.assertEqual(result, 'test')
@mock.patch.object(tokens.requests, 'get', return_value=MyResponse(
tokens.OK_CODE + 1, {'regions': [{'endpoints': [
{'publicURL': 'test', 'type': 'identity'}]}]}))
def test_find_keystone_ep_bad_return_code(self, mock_get):
result = tokens._find_keystone_ep('a', 'b')
self.assertIsNone(result)
@mock.patch.object(tokens.requests, 'get', return_value=MyResponse(
tokens.OK_CODE, {}))
def test_find_keystone_ep_no_keystone_ep_in_response(self, mock_get):
result = tokens._find_keystone_ep('a', 'b')
self.assertIsNone(result)
@mock.patch.object(tokens.requests, 'get', return_value=MyResponse(
tokens.OK_CODE, {'regions': [{'endpoints': [{'publicURL': 'test',
'type': 'test'}]}]}))
def test_find_keystone_ep_no_identity_in_response(self, mock_get):
result = tokens._find_keystone_ep('a', 'b')
self.assertIsNone(result)
@mock.patch.object(tokens.requests, 'get', return_value=MyResponse(
tokens.OK_CODE, {'regions': [{'endpoints': [{'publicURL': 'test',
'type': 'identity'}]}]}))
@mock.patch.object(tokens.v3_client, 'Client')
def test_is_token_valid_sanity(self, mock_get, mock_client):
self.assertTrue(tokens.is_token_valid('a', 'b', tokens.TokenConf(
'a', 'b', 'c', 'd', '3')))
@mock.patch.object(tokens.requests, 'get', return_value=MyResponse(
tokens.OK_CODE, {'regions': [{'endpoints': [{'publicURL': 'test',
'type': 'identity'}]}]}))
@mock.patch.object(tokens.v3_client, 'Client')
def test_is_token_valid_sanity_role_required(self, mock_get, mock_client):
user = {'user': {'id': 'test_id', 'domain': {'id': 'test'}}}
mock_client.tokens.validate = mock.MagicMock(return_value=user)
self.assertTrue(tokens.is_token_valid('a', 'b', tokens.TokenConf(
'a', 'b', 'c', 'd', '3'), 'test', {'domain': 'test'}))
@mock.patch.object(tokens.requests, 'get', return_value=MyResponse(
tokens.OK_CODE, {'regions': [{'endpoints': [{'publicURL': 'test',
'type': 'identity'}]}]}))
def test_is_token_valid_token_not_found(self, mock_get):
client_backup = tokens.v3_client.Client
tokens.v3_client.Client = mock.MagicMock(return_value=MyClient())
self.assertFalse(tokens.is_token_valid('a', 'b', tokens.TokenConf(
'a', 'b', 'c', 'd', '3')))
tokens.v3_client.Client = client_backup
@mock.patch.object(tokens.requests, 'get', return_value=MyResponse(
tokens.OK_CODE, {'regions': [{'endpoints': [{'publicURL': 'test',
'type': 'identity'}]}]}))
def test_is_token_valid_invalid_version(self, mock_get):
client_backup = tokens.v3_client.Client
tokens.v3_client.Client = mock.MagicMock(return_value=MyClient())
self.assertRaises(ValueError, tokens.is_token_valid, 'a', 'b',
tokens.TokenConf('a', 'b', 'c', 'd', '4'))
tokens.v3_client.Client = client_backup
@mock.patch.object(tokens.requests, 'get', return_value=MyResponse(
tokens.OK_CODE, {'regions': [{'endpoints': [{'publicURL': 'test',
'type': 'identity'}]}]}))
def test_is_token_valid_keystone_v2(self, mock_get):
client_backup = tokens.v2_client.Client
tokens.v2_client.Client = mock.MagicMock()
self.assertFalse(tokens.is_token_valid('a', 'b',
tokens.TokenConf('a', 'b', 'c',
'd', '2.0'),
'test',
{'tenant': 'test'}))
tokens.v2_client.Client = client_backup
@mock.patch.object(tokens.requests, 'get', return_value=MyResponse(
tokens.OK_CODE, {'regions': [{'endpoints': [{'publicURL': 'test',
'type': 'identity'}]}]}))
def test_is_token_valid_keystone_v2_invalid_location(self, mock_get):
client_backup = tokens.v2_client.Client
tokens.v2_client.Client = mock.MagicMock()
self.assertRaises(ValueError, tokens.is_token_valid, 'a', 'b',
tokens.TokenConf('a', 'b', 'c', 'd', '2.0'), 'test',
{'domain': 'test'})
tokens.v2_client.Client = client_backup
@mock.patch.object(tokens.requests, 'get', return_value=MyResponse(
tokens.OK_CODE + 1, {'regions': [{'endpoints': [
{'publicURL': 'test', 'type': 'identity'}]}]}))
def test_is_token_valid_keystone_ep_not_found(self, mock_get):
self.assertRaises(tokens.KeystoneNotFoundError, tokens.is_token_valid,
'a', 'b', tokens.TokenConf('a', 'b', 'c', 'd', '3'))
@mock.patch.object(tokens.requests, 'get', return_value=MyResponse(
tokens.OK_CODE, {'regions': [{'endpoints': [{'publicURL': 'test',
'type': 'identity'}]}]}))
def test_is_token_valid_no_role_location(self, mock_get):
tokens.v3_client.Client = mock.MagicMock()
self.assertRaises(ValueError, tokens.is_token_valid, 'a', 'b',
tokens.TokenConf('a', 'b', 'c', 'd', '3'), 'test')
@mock.patch.object(tokens.v3_client, 'Client')
def test_does_user_have_role_sanity_true(self, mock_client):
user = {'user': {'id': 'test_id', 'domain': {'id': 'test'}}}
self.assertTrue(tokens._does_user_have_role(mock_client, '3', user,
'admin',
{'domain': 'test'}))
@mock.patch.object(tokens.v3_client, 'Client')
def test_does_user_have_role_sanity_false(self, mock_client):
user = {'user': {'id': 'test_id', 'domain': {'id': 'test'}}}
mock_client.roles.check = mock.MagicMock(
side_effect=tokens.v3_client.exceptions.NotFound('test'))
self.assertFalse(tokens._does_user_have_role(mock_client, '3', user,
'admin',
{'domain': 'test'}))
@mock.patch.object(tokens.v3_client, 'Client')
def test_does_user_have_role_invalid_user(self, mock_client):
user = {}
self.assertFalse(tokens._does_user_have_role(mock_client, '3', user,
'admin',
{'domain': 'test'}))
@mock.patch.object(tokens.v3_client, 'Client')
def test_does_user_have_role_role_does_not_exist(self, mock_client):
user = {'user': {'id': 'test_id', 'domain': {'id': 'test'}}}
mock_client.roles.find = mock.MagicMock(
side_effect=tokens.v3_client.exceptions.NotFound('test'))
self.assertRaises(tokens.v3_client.exceptions.NotFound,
tokens._does_user_have_role, mock_client, '3',
user, 'test', {'domain': 'default'})
@mock.patch.object(tokens.requests, 'get', return_value=MyResponse(
tokens.OK_CODE, {'regions': [{'endpoints': [{'publicURL': 'test',
'type': 'identity'}]}]}))
def test_is_token_valid_role_does_not_exist(self, mock_get):
tokens.v3_client.Client = mock.MagicMock(return_value=MyClient(False))
self.assertRaises(ValueError, tokens.is_token_valid, 'a', 'b',
tokens.TokenConf('a', 'b', 'c', 'd', '3'), 'test',
{'domain': 'test'})
def test_get_token_user_invalid_arguments(self):
self.assertRaises(ValueError, tokens.get_token_user, 'a', 'b')
@mock.patch.object(tokens, '_find_keystone_ep', return_value=None)
def test_get_token_user_keystone_ep_not_found(self,
mock_find_keystone_ep):
self.assertRaises(tokens.KeystoneNotFoundError,
tokens.get_token_user, 'a', mock.MagicMock(), 'c')
def test_get_token_user_invalid_keystone_version(self):
conf = tokens.TokenConf(None, None, None, None, None)
self.assertRaises(ValueError, tokens.get_token_user, 'a', conf, 'c',
'd')
@mock.patch.object(tokens, '_get_keystone_client')
def test_get_token_user_token_not_found(self, mock_get_keystone_client):
ks = mock.MagicMock()
ks.tokens.validate.side_effect = tokens.v3_client.exceptions.NotFound()
mock_get_keystone_client.return_value = ks
conf = tokens.TokenConf('3', '3', '3', '3', '3')
self.assertIsNone(tokens.get_token_user('a', conf, 'c', 'd'))
@mock.patch.object(tokens, '_get_keystone_client')
def test_get_token_user_success(self, mock_get_keystone_client):
token_info = mock.MagicMock()
token_info.token = 'a'
token_info.user = 'test_user'
ks = mock.MagicMock()
ks.tokens.validate.return_value = token_info
mock_get_keystone_client.return_value = ks
conf = tokens.TokenConf('2.0', '2.0', '2.0', '2.0', '2.0')
result = tokens.get_token_user('a', conf, 'c', 'd')
self.assertEqual(result.token, 'a')
self.assertEqual(result.user, 'test_user')

View File

@ -1,5 +0,0 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
requests==2.2.1
python-keystoneclient==1.3.1

View File

@ -1,15 +0,0 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
oslo.i18n==3.9.0
oslo.serialization==2.13.0
oslo.utils==3.16.0
hacking<0.11,>=0.10.0
mock<1.1.0,>=1.0
coverage>=3.6
python-subunit>=0.0.18
sphinx!=1.2.0,!=1.3b1,<1.3,>=1.1.2
testrepository>=0.0.18
testscenarios==0.4
testtools==1.4.0

View File

@ -1,24 +0,0 @@
[tox]
envlist = py27,cover
skipsdist = True
[testenv]
install_command =
# constraints: {[testenv:common-constraints]install_command}
pip install -U --force-reinstall {opts} {packages}
setenv = VIRTUAL_ENV={envdir}
OS_TEST_PATH=./keystone_utils/tests/unit
PYTHONPATH = {toxinidir}/mock_keystone/:/usr/local/lib/python2.7/dist-packages/
deps = -r{toxinidir}/test-requirements.txt
-r{toxinidir}/requirements.txt
[testenv:cover]
commands =
coverage erase
python setup.py testr --coverage
coverage report --omit="keystone_utils/tests/*"
coverage html --omit="keystone_utils/tests/*"
[testenv:pep8]
commands=
py.test --pep8 -m pep8

View File

@ -27,15 +27,15 @@ class APIErrorHook(PecanHook):
result_json = err_utils.get_error_dict(401,
transaction_id,
None)
else:
dict_body = None
try:
logger.debug('error: {}'.format(state.response))
dict_body = json.loads(state.response.body)
result_json = dict_body
if 'line' in str(state.response.body) and 'column' in str(
state.response.body):
result_json = dict_body
status_code = 400
if 'faultstring' in dict_body:
result_json = err_utils.get_error_dict(status_code,
@ -43,14 +43,17 @@ class APIErrorHook(PecanHook):
dict_body['faultstring'],
"")
else:
result_json = json.loads(dict_body['faultstring'])
logger.debug('Received faultstring: {}'.format(result_json))
logger.info('Received faultstring: {}'.format(dict_body['faultstring']))
# make sure status code in header and in body are the same
if 'code' in result_json:
status_code = result_json['code']
logger.info('Received status code: {}, transaction_id: {}, tracking_id: {}'.
format(status_code, transaction_id, tracking_id))
result_json = err_utils.get_error_dict(status_code,
transaction_id,
dict_body['faultstring'],
"")
except ValueError:
msg = 'Could not read faultstring from response body!'
@ -65,6 +68,6 @@ class APIErrorHook(PecanHook):
msg,
"")
setattr(state.response, 'body', json.dumps(result_json))
state.response.body = json.dumps(result_json).encode("UTF-8")
state.response.status_code = status_code
state.response.headers.add('X-RANGER-Request-Id', tracking_id)

View File

@ -14,5 +14,5 @@ class SecurityHeadersHook(PecanHook):
'X-XSS-Protection': '1; mode=block'}
# Add all the security headers
for header, value in security_headers.items():
for header, value in list(security_headers.items()):
state.response.headers.add(header, value)

View File

@ -9,7 +9,7 @@ class TransactionIdHook(PecanHook):
try:
transaction_id = utils.make_transid()
except Exception as exc:
abort(500, headers={'faultstring': exc.message})
abort(500, headers={'faultstring': str(exc)})
tracking_id = state.request.headers['X-RANGER-Tracking-Id'] \
if 'X-RANGER-Tracking-Id' in state.request.headers else transaction_id

View File

@ -39,7 +39,7 @@ class DependencyResolver:
try:
self.resolve(resource_name)
except ProviderNotFoundError as e:
logging.error("Provider not found! Make sure the provider is available.\n %s", e)
logging.error("Provider not found! Make sure the provider is available.\n %s", str(e))
return False
else:
return True

View File

@ -39,13 +39,13 @@ class ResourceProviderRegister:
return instance
def mass_register(self, resource_names_to_providers, **kwargs):
for resource_name, provider in resource_names_to_providers.items():
for resource_name, provider in list(resource_names_to_providers.items()):
self.register_instance(resource_name, provider, **kwargs)
def load(self, other_register, allow_overrides=False):
if not allow_overrides:
own_keys = self.resource_providers.keys()
other_keys = other_register.resource_providers.keys()
own_keys = list(self.resource_providers.keys())
other_keys = list(other_register.resource_providers.keys())
common_keys = own_keys & other_keys
if common_keys:
# TODO Add new FangError sub-class?

View File

@ -1,42 +1,37 @@
import imp
import importlib
import os
from oslo_log import log
from orm.common.orm_common.injector.fang import di
from orm.common.orm_common.utils.sanitize import sanitize_symbol_name
_di = di.Di()
logger = None
LOG = log.getLogger(__name__)
def register_providers(env_variable, providers_dir_path, _logger):
global logger
logger = _logger
# TODO: change all prints to logger
logger.info('Initializing dependency injector')
logger.info('Checking {0} variable'.format(env_variable))
LOG.info('Initializing dependency injector')
LOG.info('Checking {0} variable'.format(env_variable))
env = None
if not (env_variable in os.environ):
logger.warn('No {0} variable found using `prod` injector'.format(env_variable))
LOG.warning('No {0} variable found using prod injector'.format(env_variable))
env = 'prod'
elif os.environ[env_variable] == '__TEST__':
logger.info('__TEST__ variable found, explicitly skipping provider registration!!!')
LOG.info('__TEST__ variable found, explicitly skipping provider registration!')
return
else:
env = os.environ[env_variable]
log_message = '{0} found setting injector to {1} environment'.format(sanitize_symbol_name(env_variable), env)
log_message = \
'{0} found, setting injector to {1} environment'.format(sanitize_symbol_name(env_variable), env)
log_message = log_message.replace('\n', '_').replace('\r', '_')
logger.info(log_message)
LOG.info(log_message)
logger.info('Setting injector providers')
LOG.info('Setting injector providers')
module = _import_file_by_name(env, providers_dir_path)
for provider in module.providers:
logger.info('Setting provider `{0}` to {1}'.format(provider[0], provider[1]))
_di.providers.register_instance(provider[0], provider[1])
def get_di():
return _di
@ -48,12 +43,13 @@ def override_injected_dependency(dep_tuple):
def _import_file_by_name(env, providers_dir_path):
file_path = os.path.join(providers_dir_path, '{0}_providers.py'.format(env))
try:
module = imp.load_source('fms_providers', file_path)
except IOError as ex:
logger.log_exception(
'File with providers for the {0} environment, path: {1} wasnt found! Crushing!!!'.format(env, file_path),
ex)
raise ex
spec = importlib.util.spec_from_file_location("{0}_providers".format(env), file_path)
module = importlib.util.module_from_spec(spec)
if module is not None:
spec.loader.exec_module(module)
for provider in module.providers:
LOG.info('Setting provider {0} to {1}'.format(provider[0], provider[1]))
_di.providers.register_instance(provider[0], provider[1])
else:
LOG.error('Provider for {0} environment not found, path: {1} does not exist'.format(env, file_path))
return module

View File

@ -20,7 +20,7 @@ import re
import six
import _checks
from . import _checks
LOG = logging.getLogger(__name__)
@ -61,7 +61,7 @@ class ParseStateMeta(type):
reducers = []
for key, value in cls_dict.items():
for key, value in list(cls_dict.items()):
if not hasattr(value, 'reducers'):
continue
for reduction in value.reducers:

View File

@ -2,10 +2,10 @@
import logging
from . import qolicy
from orm.common.client.keystone.keystone_utils import tokens
from orm.common.orm_common.utils import api_error_utils as err_utils
from orm.common.orm_common.utils import dictator
import qolicy
from wsme.exc import ClientSideError
logger = logging.getLogger(__name__)
@ -172,13 +172,13 @@ def authorize(action, request, app_conf, keystone_ep=None):
logger.debug('The authentication service is disabled. No authentication is needed.')
is_permitted = True
except ClientSideError as e:
logger.error('Fail to validate request. due to {}.'.format(e.message))
logger.error('Fail to validate request. due to {}.'.format(str(e)))
raise err_utils.get_error('N/A', status_code=e.code)
except EnforcerError:
logger.error('The token is unauthorized according to the policy')
is_permitted = False
except Exception as e:
logger.error('Fail to validate request. due to {}.'.format(e.message))
logger.error('Fail to validate request. due to {}.'.format(str(e)))
is_permitted = False
logger.info('Authorize...end')

View File

@ -218,7 +218,7 @@ desired rule name.
import logging
import os
import _parser
from . import _parser
from orm.common.orm_common.utils import api_error_utils as err_utils
from oslo_config import cfg
@ -302,7 +302,7 @@ class Rules(dict):
# Suck in the JSON data and parse the rules
rules = {k: _parser.parse_rule(v)
for k, v in jsonutils.loads(data).items()}
for k, v in list(jsonutils.loads(data).items())}
return cls(rules, default_rule)
@ -311,7 +311,7 @@ class Rules(dict):
"""Allow loading of rule data from a dictionary."""
# Parse the rules stored in the dictionary
rules = {k: _parser.parse_rule(v) for k, v in rules_dict.items()}
rules = {k: _parser.parse_rule(v) for k, v in list(rules_dict.items())}
return cls(rules, default_rule)
@ -347,7 +347,7 @@ class Rules(dict):
# Start by building the canonical strings for the rules
out_rules = {}
for key, value in self.items():
for key, value in list(self.items()):
# Use empty string for singleton TrueCheck instances
if isinstance(value, _checks.TrueCheck):
out_rules[key] = ''

View File

@ -1,68 +0,0 @@
import json
import logging
import mock
from orm_common.hooks import api_error_hook
from unittest import TestCase
logger = logging.getLogger(__name__)
class TestAPIErrorHook(TestCase):
@mock.patch.object(api_error_hook, 'err_utils')
@mock.patch.object(api_error_hook, 'json')
def test_after_401(self, mock_json, mock_err_utils):
a = api_error_hook.APIErrorHook()
state = mock.MagicMock()
mock_err_utils.get_error_dict.return_value = 'B'
mock_json.loads = json.loads
mock_json.dumps = json.dumps
state.response.status_code = 401
a.after(state)
self.assertEqual(state.response.body,
json.dumps(mock_err_utils.get_error_dict.return_value))
@mock.patch.object(api_error_hook, 'err_utils')
def test_after_not_an_error(self, mock_err_utils):
a = api_error_hook.APIErrorHook()
state = mock.MagicMock()
mock_err_utils.get_error_dict.return_value = 'B'
state.response.body = 'AAAA'
temp = state.response.body
# A successful status code
state.response.status_code = 201
a.after(state)
# Assert that the response body hasn't changed
self.assertEqual(state.response.body, temp)
@mock.patch.object(api_error_hook, 'err_utils')
@mock.patch.object(api_error_hook.json, 'loads',
side_effect=ValueError('test'))
def test_after_error(self, mock_json, mock_err_utils):
a = api_error_hook.APIErrorHook()
state = mock.MagicMock()
mock_err_utils.get_error_dict.return_value = 'B'
state.response.body = 'AAAA'
mock_json.loads = mock.MagicMock(side_effect=ValueError('sd'))
state.response.status_code = 402
a.after(state)
self.assertEqual(state.response.body,
json.dumps(mock_err_utils.get_error_dict.return_value))
@mock.patch.object(api_error_hook, 'err_utils')
@mock.patch.object(api_error_hook, 'json')
def test_after_success(self, mock_json, mock_err_utils):
a = api_error_hook.APIErrorHook()
state = mock.MagicMock()
mock_err_utils.get_error_dict.return_value = 'B'
mock_json.loads = json.loads
mock_json.dumps = json.dumps
mock_json.loads = json.loads
state.response.body = '{"debuginfo": null, "faultcode": "Client", "faultstring": "{\\"code\\": 404, \\"created\\": \\"1475768730.95\\", \\"details\\": \\"\\", \\"message\\": \\"customer: q not found\\", \\"type\\": \\"Not Found\\", \\"transaction_id\\": \\"mock_json5efa7416fb4d408cc0e30e4373cf00\\"}"}'
state.response.status_code = 400
a.after(state)
self.assertEqual(json.loads(state.response.body), json.loads('{"message": "customer: q not found", "created": "1475768730.95", "type": "Not Found", "details": "", "code": 404, "transaction_id": "mock_json5efa7416fb4d408cc0e30e4373cf00"}'))

View File

@ -1,31 +0,0 @@
import mock
from orm_common.hooks import security_headers_hook
from unittest import TestCase
class MyHeaders(object):
def __init__(self):
self.headers = {}
def add(self, key, value):
self.headers[key] = value
class TestSecurityHeadersHook(TestCase):
def test_after(self):
s = security_headers_hook.SecurityHeadersHook()
test_headers = MyHeaders()
state = mock.MagicMock()
state.response.headers = test_headers
s.after(state)
security_headers = {'X-Frame-Options': 'DENY',
'X-Content-Type-Options': 'nosniff',
'Strict-Transport-Security': 'max-age=31536000; includeSubDomains',
'Content-Security-Policy': 'default-src \'self\'',
'X-Permitted-Cross-Domain-Policies': 'none',
'X-XSS-Protection': '1; mode=block'}
for header in security_headers:
self.assertEqual(security_headers[header],
test_headers.headers[header])

View File

@ -1,17 +0,0 @@
import logging
import mock
from orm_common.hooks import transaction_id_hook
from unittest import TestCase
logger = logging.getLogger(__name__)
class TestTransactionIdHook(TestCase):
@mock.patch.object(transaction_id_hook.utils, 'make_transid',
return_value='test')
def test_before_sanity(self, mock_make_transid):
t = transaction_id_hook.TransactionIdHook()
state = mock.MagicMock()
t.before(state)
self.assertEqual(state.request.transaction_id, 'test')
self.assertEqual(state.request.tracking_id, 'test')

View File

@ -1,57 +0,0 @@
import logging
import mock
from orm_common.injector import injector
import os
from unittest import TestCase
logger = logging.getLogger(__name__)
class TestInjector(TestCase):
def setUp(self):
pass
@mock.patch.object(injector, '_import_file_by_name')
def test_register_providers(self, mock_import_file_by_name):
os.environ['CMS_ENV'] = 'test'
injector.register_providers('CMS_ENV', 'a/b/c', logger)
@mock.patch.object(injector, '_import_file_by_name')
def test_register_providers_env_not_exist(self, mock_import_file_by_name):
injector.register_providers('CMS_ENV1', 'a/b/c', logger)
@mock.patch.object(injector, '_import_file_by_name')
def test_register_providers_env_test(self, mock_import_file_by_name):
os.environ['CMS_ENV2'] = '__TEST__'
injector.register_providers('CMS_ENV2', 'a/b/c', logger)
@mock.patch.object(injector, '_import_file_by_name')
def test_register_providers_with_existing_provider(self, mock_import_file_by_name):
mock_import_file_by_name.return_value = type('module', (object,), {'providers': ['a1', 'b2']})()
os.environ['c3'] = 'test'
injector.register_providers('c3', 'a/b/c', logger)
def test_get_di(self):
injector.get_di()
@mock.patch.object(injector, 'logger')
def test_import_file_by_name_ioerror(self, mock_logger):
injector.logger = mock.MagicMock()
# Calling it with ('', '.') should raise an IOError
# (no such file or directory)
self.assertRaises(IOError, injector._import_file_by_name, '', '.')
@mock.patch.object(injector.imp, 'load_source', return_value='test')
def test_import_file_by_name_sanity(self, mock_load_source):
self.assertEqual(injector._import_file_by_name('', '.'), 'test')
@mock.patch.object(injector._di.providers, 'register_instance')
def test_override_injected_dependency(self, mock_di):
injector.override_injected_dependency((1, 2,))
self.assertTrue(mock_di.called)
'''
@mock.patch.object(ResourceProviderRegister, 'register_instance')
def test_override_injected_dependency(self, mock_resourceProviderRegister):
injector.override_injected_dependency(mock.Mock())
'''

View File

@ -1,103 +0,0 @@
import mock
import unittest
from orm_common.policy import _checks
class TestChecks(unittest.TestCase):
def test_call_simple_checks(self):
check = _checks.FalseCheck()
self.assertFalse(check(1, 2, 3))
check = _checks.TrueCheck()
self.assertTrue(check(1, 2, 3))
check = _checks.GenericCheck('a', 'b')
self.assertFalse(check(1, 2, 3))
def test_str_simple_checks(self):
check = _checks.FalseCheck()
self.assertEqual(str(check), '!')
check = _checks.TrueCheck()
self.assertEqual(str(check), '@')
check = _checks.GenericCheck('a', 'b')
self.assertEqual(str(check), 'a:b')
def test_call_complex_checks(self):
first_rule = _checks.TrueCheck()
second_rule = _checks.FalseCheck()
check = _checks.NotCheck(first_rule)
self.assertFalse(check(1, 2, 3))
check = _checks.AndCheck([first_rule])
check.add_check(second_rule)
self.assertFalse(check(1, 2, 3))
check = _checks.AndCheck([first_rule, first_rule])
self.assertTrue(check(1, 2, 3))
check = _checks.OrCheck([first_rule])
check.add_check(second_rule)
self.assertTrue(check(1, 2, 3))
self.assertEqual(check.pop_check(), (check, second_rule,))
check = _checks.OrCheck([second_rule, second_rule])
self.assertFalse(check(1, 2, 3))
def test_str_complex_checks(self):
first_rule = _checks.TrueCheck()
second_rule = _checks.FalseCheck()
check = _checks.NotCheck(first_rule)
self.assertEqual(str(check), 'not @')
check = _checks.AndCheck([first_rule])
check.add_check(second_rule)
self.assertEqual(str(check), '(@ and !)')
check = _checks.OrCheck([first_rule])
check.add_check(second_rule)
self.assertEqual(str(check), '(@ or !)')
def test_call_custom_checks_error(self):
check = _checks.RoleCheck('a', 'admin')
# test no longer valid.
# change: https://gerrit.mtn5.cci.att.com/#/c/25690/
# removed the exception raise
#
# try:
# check(1, mock.MagicMock(), 3)
# self.fail('ClientSideError not raised!')
# except ClientSideError as exc:
# self.assertEqual(exc.code, 403)
for check_type in (_checks.TenantCheck,
_checks.DomainCheck):
check = check_type('a', 'admin')
# 2 is not a user, so the check will fail
self.assertFalse(check(1, 2, 3))
def test_call_custom_checks_success(self):
user = mock.MagicMock()
user.user = {'roles': [{'name': 'admin'}]}
user.tenant = {'name': 'admin'}
user.domain = {'name': 'admin'}
for check_type in (_checks.RoleCheck,
_checks.TenantCheck,
_checks.DomainCheck):
check = check_type('a', 'admin')
# 2 is not a user, so the check will fail
self.assertTrue(check(1, user, 3))
def test_call_rule_check_error(self):
enforcer = mock.MagicMock()
enforcer.rules = {'test': mock.MagicMock(
side_effect=KeyError('test'))}
check = _checks.RuleCheck('rule', 'test')
self.assertFalse(check(1, 2, enforcer))
def test_call_rule_check_success(self):
enforcer = mock.MagicMock()
enforcer.rules = {'test': mock.MagicMock(return_value=True)}
check = _checks.RuleCheck('rule', 'test')
self.assertTrue(check(1, 2, enforcer))

View File

@ -1,129 +0,0 @@
import mock
import unittest
from orm_common.policy import policy
class Exception_Test(Exception):
pass
class TestPolicy(unittest.TestCase):
def setUp(self):
policy._ENFORCER = None
policy._POLICY_FILE = None
policy._TOKEN_CONF = None
def test_reset(self):
policy._ENFORCER = mock.MagicMock()
policy._POLICY_FILE = mock.MagicMock()
policy.reset()
self.assertIsNone(policy._ENFORCER)
self.assertIsNone(policy._POLICY_FILE)
# Call it a second time when they are both None and see
# that no exception is raised
policy.reset()
self.assertIsNone(policy._ENFORCER)
self.assertIsNone(policy._POLICY_FILE)
@mock.patch.object(policy, 'open')
@mock.patch.object(policy.qolicy, 'Enforcer')
@mock.patch.object(policy.qolicy, 'Rules')
def test_init_success(self, mock_rules, mock_enforcer, mock_open):
policy_file = 'a'
token_conf = 'b'
mock_rules.load_json.return_value = 'c'
policy.init(policy_file, token_conf)
self.assertEqual(policy._POLICY_FILE, 'a')
self.assertEqual(policy._TOKEN_CONF, 'b')
def test_init_enforcer_already_exists(self):
policy._ENFORCER = mock.MagicMock()
# Nothing should happen when the enforcer already exists, so make sure
# that no exception is raised
policy.init('a', 'b')
@mock.patch.object(policy, 'open')
@mock.patch.object(policy.qolicy, 'Rules')
@mock.patch.object(policy, '_ENFORCER')
def test_reset_rules_no_policy_file(self, mock_enforcer,
mock_rules, mock_open):
self.assertRaises(ValueError, policy.reset_rules)
@mock.patch.object(policy, 'open')
@mock.patch.object(policy.qolicy, 'Rules')
@mock.patch.object(policy, '_ENFORCER')
def test_reset_rules_success(self, mock_enforcer,
mock_rules, mock_open):
policy._POLICY_FILE = mock.MagicMock()
policy.reset_rules()
self.assertTrue(mock_enforcer.set_rules.called)
@mock.patch.object(policy, 'reset_rules')
@mock.patch.object(policy.tokens, 'get_token_user',
side_effect=ValueError('test'))
@mock.patch.object(policy, '_ENFORCER')
def test_enforce_enforcer_error(self, mock_enforcer,
mock_get_token_user,
mock_reset_rules):
mock_enforcer.enforce.side_effect = policy.EnforcerError()
self.assertRaises(policy.EnforcerError, policy.enforce, 'action',
'token', mock.MagicMock())
@mock.patch.object(policy, 'reset_rules')
@mock.patch.object(policy.tokens, 'get_token_user')
@mock.patch.object(policy, '_ENFORCER')
def test_enforce_success(self, mock_enforcer,
mock_get_token_user,
mock_reset_rules):
mock_enforcer.enforce.return_value = True
self.assertTrue(policy.enforce('action', 'token', mock.MagicMock()))
def test_authorize_authorization_disabled(self):
request = mock.MagicMock()
app_conf = mock.MagicMock()
app_conf.authentication.enabled = False
# No exception should be raised
policy.authorize('a', request, app_conf)
@mock.patch.object(policy, 'enforce')
def test_authorize_no_token(self, mock_enforce):
request = mock.MagicMock()
request.headers.get.return_value = None
app_conf = mock.MagicMock()
app_conf.authentication.enabled = True
# No exception should be raised
policy.authorize('a', request, app_conf)
@mock.patch.object(policy, 'enforce', side_effect=policy.EnforcerError())
@mock.patch.object(policy.err_utils, 'get_error', return_value=Exception_Test)
def test_authorize_enforce_failed(self, mock_enforce, mock_get_error):
request = mock.MagicMock()
request.headers.get.return_value = None
app_conf = mock.MagicMock()
app_conf.authentication.enabled = True
self.assertRaises(Exception_Test, policy.authorize, 'a', request,
app_conf)
@mock.patch.object(policy, 'enforce', side_effect=ValueError())
@mock.patch.object(policy.err_utils, 'get_error', return_value=Exception_Test)
def test_authorize_other_error(self, mock_enforce, mock_get_error):
request = mock.MagicMock()
request.headers.get.return_value = None
app_conf = mock.MagicMock()
app_conf.authentication.enabled = True
self.assertRaises(Exception_Test, policy.authorize, 'a', request,
app_conf)
@mock.patch.object(policy, 'enforce')
def test_authorize_success(self, mock_enforce):
request = mock.MagicMock()
request.headers.get.return_value = 'test'
app_conf = mock.MagicMock()
app_conf.authentication.enabled = True
# No exception should be raised
policy.authorize('a', request, app_conf)

View File

@ -1,14 +0,0 @@
import json
import mock
from orm_common.utils import api_error_utils
from unittest import TestCase
class TestCrossApiUtil(TestCase):
@mock.patch.object(api_error_utils.utils, 'get_time_human', return_value=1.337)
def test_get_error_default_message(self, mock_time):
self.assertEqual(
json.loads(api_error_utils.get_error('test', 'a').message),
{"details": "a", "message": "Incompatible JSON body",
"created": "1.337", "code": 400, "type": "Bad Request",
"transaction_id": "test"})

View File

@ -1,74 +0,0 @@
import mock
from orm_common.utils import cross_api_utils
import time
from unittest import TestCase
class TestCrossApiUtil(TestCase):
@mock.patch('pecan.conf')
def setUp(self, mock_conf):
self.mock_response = mock.Mock()
cross_api_utils.conf = mock_conf
def respond(self, value, code):
self.mock_response.json.return_value = value
self.mock_response.status_code = code
return self.mock_response
def test_set_utils_conf(self):
cross_api_utils.set_utils_conf(None)
self.assertEqual(cross_api_utils.conf, None)
def test_check_conf_initialization(self):
cross_api_utils.set_utils_conf(None)
self.assertRaises(AssertionError, cross_api_utils._check_conf_initialization)
@mock.patch('orm_common.utils.cross_api_utils.get_rms_region_group')
def test_is_region_group_exist(self, mock_rms_region_group):
mock_rms_region_group.return_value = 'test_group'
exist = cross_api_utils.is_region_group_exist('test_group_name')
self.assertEqual(exist, True)
@mock.patch('orm_common.utils.cross_api_utils.get_rms_region_group')
def test_is_region_group_exist_false(self, mock_rms_region_group):
mock_rms_region_group.return_value = None
exist = cross_api_utils.is_region_group_exist('test_group_name')
self.assertEqual(exist, False)
@mock.patch('orm_common.utils.cross_api_utils.get_rms_region_group')
def test_get_regions_of_group(self, mock_rms_region_group):
mock_rms_region_group.return_value = {'regions': 'group'}
exist = cross_api_utils.get_regions_of_group('test_group_name')
self.assertEqual(exist, 'group')
@mock.patch('orm_common.utils.cross_api_utils.get_rms_region_group')
def test_get_regions_of_group_false(self, mock_rms_region_group):
mock_rms_region_group.return_value = None
exist = cross_api_utils.get_regions_of_group('test_group_name')
self.assertEqual(exist, None)
@mock.patch('requests.get')
def test_get_rms_region_group(self, mock_get):
mock_get.return_value = self.respond({'result': 'success'}, 200)
result = cross_api_utils.get_rms_region_group('test_group_name')
self.assertEqual(result, {'result': 'success'})
def test_get_rms_region_group_cache_used(self):
cross_api_utils.prev_timestamp = time.time()
cross_api_utils.prev_group_name = 'test_group'
cross_api_utils.prev_resp = 'test_response'
cross_api_utils.conf.api.rms_server.cache_seconds = 14760251830
self.assertEqual(cross_api_utils.prev_resp,
cross_api_utils.get_rms_region_group(
cross_api_utils.prev_group_name))
@mock.patch.object(cross_api_utils, 'logger')
@mock.patch.object(time, 'time', side_effect=ValueError('test'))
def test_get_rms_region_group_cache_used(self, mock_time, mock_logger):
self.assertRaises(ValueError, cross_api_utils.get_rms_region_group,
'test')
# @mock.patch('requests.get')
# def test_get_rms_region_group_with_exception(self, mock_get):
# mock_get.side_affect = Exception('boom')
# self.assertRaises(Exception, cross_api_utils.get_rms_region_group, 'test_group_name')

View File

@ -1,177 +0,0 @@
import logging
import mock
from orm_common.utils import utils
import pprint
from testfixtures import log_capture
from unittest import TestCase
class TestUtil(TestCase):
@mock.patch('pecan.conf')
def setUp(self, mock_conf):
self.mock_response = mock.Mock()
utils.conf = mock_conf
def respond(self, value, code):
self.mock_response.json.return_value = value
self.mock_response.status_code = code
return self.mock_response
@mock.patch('requests.post')
def test_make_uuid(self, mock_post):
mock_post.return_value = self.respond({'uuid': '987654321'}, 200)
uuid = utils.create_or_validate_uuid('', 'uuidtype')
self.assertEqual(uuid, '987654321')
@mock.patch('requests.post')
@log_capture('orm_common.utils.utils', level=logging.INFO)
def test_make_uuid_offline(self, mock_post, l):
mock_post.side_effect = Exception('boom')
uuid = utils.create_or_validate_uuid('', 'uuidtype')
self.assertEqual(uuid, None)
l.check(('orm_common.utils.utils', 'INFO', 'Failed in make_uuid:boom'))
@mock.patch('requests.post')
def test_make_transid(self, mock_post):
mock_post.return_value = self.respond({'uuid': '987654321'}, 200)
uuid = utils.make_transid()
self.assertEqual(uuid, '987654321')
@mock.patch('requests.post')
@log_capture('orm_common.utils.utils', level=logging.INFO)
def test_make_transid_offline(self, mock_post, l):
mock_post.side_effect = Exception('boom')
uuid = utils.make_transid()
self.assertEqual(uuid, None)
l.check(
('orm_common.utils.utils', 'INFO', 'Failed in make_transid:boom'))
@mock.patch('audit_client.api.audit.init')
@mock.patch('audit_client.api.audit.audit')
def test_audit_trail(self, mock_init, mock_audit):
resp = utils.audit_trail('create customer', '1234',
{'X-RANGER-Client': 'Fred'}, '5678')
self.assertEqual(resp, 200)
@mock.patch('audit_client.api.audit.audit')
def test_audit_trail_offline(self, mock_audit):
mock_audit.side_effect = Exception('boom')
resp = utils.audit_trail('create customer', '1234',
{'X-RANGER-Client': 'Fred'}, '5678')
self.assertEqual(resp, None)
@mock.patch('audit_client.api.audit.init')
@mock.patch('audit_client.api.audit.audit')
def test_audit_service_args_least(self, mock_audit, mock_init):
resp = utils.audit_trail('create customer', '1234',
{'X-RANGER-Client': 'Fred'}, '5678')
self.assertEqual(mock_audit.call_args[0][1], 'Fred') # application_id
self.assertEqual(mock_audit.call_args[0][2], '1234') # tracking_id
self.assertEqual(mock_audit.call_args[0][3], '1234') # transaction_id
self.assertEqual(mock_audit.call_args[0][4],
'create customer') # transaction_type
self.assertEqual(mock_audit.call_args[0][5], '5678') # resource_id
# self.assertEqual(mock_audit.call_args[0][6], 'cms') # service
self.assertEqual(mock_audit.call_args[0][7], '') # user_id
self.assertEqual(mock_audit.call_args[0][8], 'NA') # external_id
self.assertEqual(mock_audit.call_args[0][9], '') # event_details
# self.assertEqual(mock_audit.call_args[0][10], 'Saved to DB') # status
@mock.patch('audit_client.api.audit.init')
@mock.patch('audit_client.api.audit.audit')
def test_audit_service_with_tracking(self, mock_audit, mock_init):
utils.audit_trail('create customer', '1234',
{'X-RANGER-Client': 'Fred',
'X-RANGER-Tracking-Id': 'Track12'}, '5678')
self.assertEqual(mock_audit.call_args[0][1], 'Fred') # application_id
self.assertEqual(mock_audit.call_args[0][2], 'Track12') # tracking_id
self.assertEqual(mock_audit.call_args[0][3], '1234') # transaction_id
self.assertEqual(mock_audit.call_args[0][4],
'create customer') # transaction_type
self.assertEqual(mock_audit.call_args[0][5], '5678') # resource_id
# self.assertEqual(mock_audit.call_args[0][6], 'cms') # service
self.assertEqual(mock_audit.call_args[0][7], '') # user_id
self.assertEqual(mock_audit.call_args[0][8], 'NA') # external_id
self.assertEqual(mock_audit.call_args[0][9], '') # event_details
# self.assertEqual(mock_audit.call_args[0][10], 'Saved to DB') # status
@mock.patch('audit_client.api.audit.init')
@mock.patch('audit_client.api.audit.audit')
def test_audit_service_with_requester(self, mock_audit, mock_init):
resp = utils.audit_trail('create customer', '1234',
{'X-RANGER-Client': 'Fred',
'X-RANGER-Requester': 'Req04'}, '5678')
self.assertEqual(mock_audit.call_args[0][1], 'Fred') # application_id
self.assertEqual(mock_audit.call_args[0][2], '1234') # tracking_id
self.assertEqual(mock_audit.call_args[0][3], '1234') # transaction_id
self.assertEqual(mock_audit.call_args[0][4], 'create customer') # transaction_type
self.assertEqual(mock_audit.call_args[0][5], '5678') # resource_id
# self.assertEqual(mock_audit.call_args[0][6], 'cms') # service
self.assertEqual(mock_audit.call_args[0][7], 'Req04') # user_id
self.assertEqual(mock_audit.call_args[0][8], 'NA') # external_id
self.assertEqual(mock_audit.call_args[0][9], '') # event_details
# self.assertEqual(mock_audit.call_args[0][10], 'Saved to DB') # status
def test_set_utils_conf(self):
utils.set_utils_conf('test')
self.assertEqual(utils.conf, 'test')
def test_check_conf_initialization(self):
utils.set_utils_conf(None)
self.assertRaises(AssertionError, utils._check_conf_initialization)
@mock.patch('requests.post')
def test_create_existing_uuid(self, mock_post):
uuid = '987654321'
uuid_type = 'testtype'
mock_post.return_value = self.respond(
{'uuid': uuid, 'uuid_type': uuid_type}, 200)
returned_uuid = utils.create_or_validate_uuid(uuid, uuid_type)
self.assertEqual(returned_uuid, uuid)
@mock.patch('requests.post')
def test_create_existing_uuid_with_exception(self, mock_post):
mock_post.side_effect = Exception('boom')
uuid = '987654321'
uuid_type = 'testtype'
returned_uuid = utils.create_or_validate_uuid(uuid, uuid_type)
self.assertEqual(returned_uuid, None)
@mock.patch('requests.post')
def test_create_existing_uuid_with_400(self, mock_post):
uuid = '987654321'
uuid_type = 'testId'
mock_post.return_value = self.respond({'uuid': uuid, 'uuid_type': uuid_type}, 409)
self.assertRaises(TypeError, utils.create_or_validate_uuid(uuid, uuid_type))
@mock.patch('pecan.conf')
def test_report_config(self, mock_conf):
expected_value = pprint.pformat(mock_conf.to_dict(), indent=4)
returned_value = utils.report_config(mock_conf)
self.assertEqual(expected_value, returned_value)
@mock.patch('pecan.conf')
def test_report_config_with_log_write(self, mock_conf):
expected_value = pprint.pformat(mock_conf.to_dict(), indent=4)
returned_value = utils.report_config(mock_conf, True)
self.assertEqual(expected_value, returned_value)
@mock.patch('requests.get')
def test_get_resource_status_sanity(self, mock_get):
my_response = mock.MagicMock()
my_response.status_code = 200
my_response.json.return_value = 'test'
mock_get.return_value = my_response
result = utils.get_resource_status('A')
self.assertEqual(result, 'test')
@mock.patch('requests.get', side_effect=ValueError())
def test_get_resource_status_get_failed(self, mock_get):
self.assertIsNone(utils.get_resource_status('A'))
@mock.patch('requests.get')
def test_get_resource_status_invalid_response(self, mock_get):
my_response = mock.MagicMock()
my_response.status_code = 404
mock_get.return_value = my_response
self.assertIsNone(utils.get_resource_status('A'))

View File

@ -19,10 +19,15 @@ def get_error_dict(status_code, transaction_id, message, error_details=""):
if not message:
message = error_message[status_code]['message']
if status_code in error_message:
error_type = error_message[status_code]['type']
else:
error_type = 'Unhandled Error'
return {
# for 'code', get integer value of status_code (e.g. from 409.2 to 409)
'code': int(status_code),
'type': error_message[status_code]['type'],
'type': error_type,
'created': '{}'.format(utils.get_time_human()),
'transaction_id': transaction_id,
'message': message,

View File

@ -1,15 +1,13 @@
import logging
from pecan import conf
import re
import requests
import string
import time
# from orm_common.logger import get_logger
# logger = get_logger(__name__)
logger = logging.getLogger(__name__)
conf = None
prev_group_name = None
def set_utils_conf(_conf):
@ -32,14 +30,14 @@ def validate_description(data_value):
allowed_punctuations = ['.', '-', ',']
# if type of data_value != 'string' then convert it to string
desc = ''
if not isinstance(data_value, str):
desc = str(data_value)
invalidChars = (string.punctuation).translate(
None, ''.join(allowed_punctuations))
invalidChars = re.sub(str(allowed_punctuations),
'', string.punctuation)
# detect any escape sequences or special characters in data string
encoded_string = desc.encode('string_escape')
encoded_string = desc.encode('unicode_escape').decode('utf-8')
if any(char in invalidChars for char in encoded_string):
return False
@ -69,9 +67,6 @@ def get_regions_of_group(group_name):
return group["regions"]
prev_group_name = None
def get_rms_region_group(group_name):
""" function to call rms api for group info
returns 200 for ok and None for error
@ -104,7 +99,7 @@ def get_rms_region_group(group_name):
logger.error(
'CRITICAL|{}| Failed in getting data from rms: connection error'.format(
nagois) + str(exp))
exp.message = 'connection error: Failed to get get data from rms: ' \
exp.args[0] = 'connection error: Failed to get get data from rms: ' \
'unable to connect to server'
raise
except Exception as e:

View File

@ -1,9 +1,7 @@
class Error(Exception):
pass
class ErrorStatus(Error):
def __init__(self, status_code, message=""):
self.message = message

View File

@ -11,7 +11,7 @@ from pecan import conf
#
conf = None
logger = logging.getLogger(__name__)
LOG = logging.getLogger(__name__)
class ResponseError(Exception):
@ -44,26 +44,26 @@ def create_or_validate_uuid(uuid, uuid_type):
url = conf.api.uuid_server.base + conf.api.uuid_server.uuids
if not uuid:
logger.debug('Requesting new UUID from URL: {}'.format(url))
LOG.debug('Requesting new UUID from URL: {}'.format(url))
else:
logger.debug('Creating UUID: {}, using URL: {}'.format(uuid, url))
LOG.debug('Creating UUID: {}, using URL: {}'.format(uuid, url))
try:
resp = requests.post(url, data={'uuid': uuid, 'uuid_type': uuid_type},
verify=conf.verify)
except requests.exceptions.ConnectionError as exp:
nagios = 'CON{}UUIDGEN001'.format(conf.server.name.upper())
logger.critical(
LOG.critical(
'CRITICAL|{}|Failed in make_uuid: connection error: {}'.format(
nagios, str(exp)))
exp.message = 'connection error: Failed to get uuid: unable to connect to server'
raise
except Exception as e:
logger.info('Failed in make_uuid:' + str(e))
LOG.info('Failed in make_uuid:' + str(e))
return None
if resp.status_code == 400:
logger.debug('Duplicate key for uuid: {}'.format(uuid))
LOG.debug('Duplicate key for uuid: {}'.format(uuid))
raise TypeError('Duplicate key for uuid: ' + str(uuid))
resp = resp.json()
@ -79,15 +79,15 @@ def make_transid():
url = conf.api.uuid_server.base + conf.api.uuid_server.uuids
try:
logger.debug('Requesting transaction ID from: {}'.format(url))
LOG.debug('Requesting transaction ID from: {}'.format(url))
resp = requests.post(url, data={'uuid_type': 'transaction'}, verify=conf.verify)
except requests.exceptions.ConnectionError as exp:
nagios = 'CON{}UUIDGEN001'.format(conf.server.name.upper())
logger.critical('CRITICAL|{}|Failed in make_transid: connection error: {}'.format(nagios, str(exp)))
LOG.critical('CRITICAL|{}|Failed in make_transid: connection error: {}'.format(nagios, str(exp)))
exp.message = 'connection error: Failed to get uuid: unable to connect to server'
raise
except Exception as e:
logger.info('Failed in make_transid:' + str(e))
LOG.info('Failed in make_transid:' + str(e))
return None
resp = resp.json()
@ -129,14 +129,14 @@ def audit_trail(cmd, transaction_id, headers, resource_id, message=None,
conf.api.audit_server.base, conf.api.audit_server.trans)
num_of_send_retries = 3
time_wait_between_retries = 1
logger.debug('Initializing Audit, using URL: {}'.format(
LOG.debug('Initializing Audit, using URL: {}'.format(
audit_server_url))
audit.init(audit_server_url, num_of_send_retries,
time_wait_between_retries, conf.server.name.upper())
audit_setup = True
try:
timestamp = long(round(time.time() * 1000))
timestamp = int(round(time.time() * 1000))
application_id = headers[
'X-RANGER-Client'] if 'X-RANGER-Client' in headers else \
'NA'
@ -151,16 +151,16 @@ def audit_trail(cmd, transaction_id, headers, resource_id, message=None,
'X-RANGER-Requester'] if 'X-RANGER-Requester' in headers else \
''
external_id = 'NA'
logger.debug('Sending to audit: timestamp: {}, application_id: {}, '
' tracking_id: {},'
' transaction_type: {}'.format(timestamp, application_id,
tracking_id,
transaction_type))
LOG.debug('Sending to audit: timestamp: {}, application_id: {}, '
' tracking_id: {},'
' transaction_type: {}'.format(timestamp, application_id,
tracking_id,
transaction_type))
audit.audit(timestamp, application_id, tracking_id, transaction_id,
transaction_type, resource_id, service_name, user_id,
external_id, event_details)
except Exception as e:
logger.exception('Failed in audit service. ' + str(e))
LOG.exception('Failed in audit service. ' + str(e))
return None
return 200
@ -175,7 +175,7 @@ def report_config(conf, dump_to_log=False, my_logger=None):
"""
ret = pprint.pformat(conf.to_dict(), indent=4)
effective_logger = my_logger if my_logger else logger
effective_logger = my_logger if my_logger else LOG
if dump_to_log:
effective_logger.info('Current Configuration:\n' + ret)
@ -190,19 +190,19 @@ def get_resource_status(resource_id):
url = "{}{}{}".format(conf.api.rds_server.base,
conf.api.rds_server.status, resource_id)
logger.debug('Getting status from: {}'.format(url))
LOG.debug('Getting status from: {}'.format(url))
try:
result = requests.get(url, verify=conf.verify)
except Exception as exception:
logger.debug('Failed to get status: {}'.format(str(exception)))
LOG.debug('Failed to get status: {}'.format(str(exception)))
return None
if result.status_code != 200:
logger.debug('Got invalid response from RDS: code {}'.format(
LOG.debug('Got invalid response from RDS: code {}'.format(
result.status_code))
return None
else:
logger.debug('Got response from RDS: {}'.format(result.json()))
LOG.debug('Got response from RDS: {}'.format(result.json()))
return result.json()

View File

@ -1,4 +1,4 @@
import config as conf
from . import config as conf
import json
import logging
import os

View File

@ -1,102 +1,102 @@
"""clean cms mpdule."""
import cli_comander as cli
import db_comander as db
import initializer
import logging
import sys
import utils
import yaml_handler as yh
log = logging.getLogger(__name__)
def _validate_service(service):
allowed_services = ['CMS', 'FMS']
if service.upper() not in allowed_services:
raise Exception("service should be one of {}".format(allowed_services))
return service.upper()
def _init():
initializer.init_log()
return
def read_csv_file(file):
log.debug("reading file {}".format(file))
return utils.read_csv_file(file)
def resource_db_clean(resource_id, service):
log.debug("cleaning {} db for resource {}".format(service, resource_id))
db.remove_resource_db(resource_id, service)
return
def check_yaml_file(resource_id):
log.debug('checking yml file if exist for resource {}'.format(resource_id))
files = yh.check_yaml_exist(resource_id)
message = 'no yaml files found for this resource'
if files:
message = "found files please remove manualy {}".format(files)
log.debug(message)
return
def get_resource_regions(resource_id, service_name):
db_regions = db.get_cms_db_resource_regions(resource_id)
orm_regions = cli.get_resource_regions(resource_id, service_name)
return orm_regions, db_regions
def clean_rds_resource_status(resource_id):
log.debug("clean rds status db for resource {}".format(resource_id))
db.remove_rds_resource_status(resource_id)
return
def _start_cleaning():
log.info('start cleaning')
file_path = sys.argv[1]
service = _validate_service(sys.argv[2])
resourses_to_clean = read_csv_file(file_path)
for resource_id in resourses_to_clean:
try:
log.debug(
'check if resource {} has any regions before clean'.format(
resource_id))
resource_regions, db_regions = get_resource_regions(resource_id,
service)
if resource_regions or db_regions:
log.error(
"got regions {} {} please clean regions from orm before"
" removing the resource {}".format(resource_regions,
db_regions,
resource_id))
raise Exception(
"got regions {} {} please clean regions from orm before"
" removing the resource {}".format(resource_regions,
db_regions,
resource_id))
log.debug('cleaning {}'.format(resource_id))
resource_db_clean(resource_id, service)
check_yaml_file(resource_id)
clean_rds_resource_status(resource_id)
except Exception as exp:
log.error("---------------{}---------------".format(exp.message))
if 'not found' not in exp.message:
log.exception(exp)
continue
return
if __name__ == '__main__':
warning_message = raw_input(
'IMPORTANT:- please note its your responsibility to backup the db'
' before runing this script... click enter before continue'
)
_init()
_start_cleaning()
from . import cli_comander as cli
from . import db_comander as db
from . import initializer
from . import logging
from . import sys
from . import utils
from . import yaml_handler as yh
log = logging.getLogger(__name__)
def _validate_service(service):
allowed_services = ['CMS', 'FMS']
if service.upper() not in allowed_services:
raise Exception("service should be one of {}".format(allowed_services))
return service.upper()
def _init():
initializer.init_log()
return
def read_csv_file(file):
log.debug("reading file {}".format(file))
return utils.read_csv_file(file)
def resource_db_clean(resource_id, service):
log.debug("cleaning {} db for resource {}".format(service, resource_id))
db.remove_resource_db(resource_id, service)
return
def check_yaml_file(resource_id):
log.debug('checking yml file if exist for resource {}'.format(resource_id))
files = yh.check_yaml_exist(resource_id)
message = 'no yaml files found for this resource'
if files:
message = "found files please remove manualy {}".format(files)
log.debug(message)
return
def get_resource_regions(resource_id, service_name):
db_regions = db.get_cms_db_resource_regions(resource_id)
orm_regions = cli.get_resource_regions(resource_id, service_name)
return orm_regions, db_regions
def clean_rds_resource_status(resource_id):
log.debug("clean rds status db for resource {}".format(resource_id))
db.remove_rds_resource_status(resource_id)
return
def _start_cleaning():
log.info('start cleaning')
file_path = sys.argv[1]
service = _validate_service(sys.argv[2])
resourses_to_clean = read_csv_file(file_path)
for resource_id in resourses_to_clean:
try:
log.debug(
'check if resource {} has any regions before clean'.format(
resource_id))
resource_regions, db_regions = get_resource_regions(resource_id,
service)
if resource_regions or db_regions:
log.error(
"got regions {} {} please clean regions from orm before"
" removing the resource {}".format(resource_regions,
db_regions,
resource_id))
raise Exception(
"got regions {} {} please clean regions from orm before"
" removing the resource {}".format(resource_regions,
db_regions,
resource_id))
log.debug('cleaning {}'.format(resource_id))
resource_db_clean(resource_id, service)
check_yaml_file(resource_id)
clean_rds_resource_status(resource_id)
except Exception as exp:
log.error("---------------{}---------------".format(exp.message))
if 'not found' not in exp.message:
log.exception(exp)
continue
return
if __name__ == '__main__':
warning_message = (
'IMPORTANT:- please note its your responsibility to backup the db'
' before runing this script... click enter before continue'
)
log.info(warning_message)
_init()
_start_cleaning()

View File

@ -1,4 +1,4 @@
import config as conf
from . import config as conf
import logging
import sqlalchemy

View File

@ -1,111 +1,112 @@
import cli_comander as cli
import db_comander as db
import initializer
import logging
import sys
import utils
import yaml_handler as yh
log = logging.getLogger(__name__)
def _validate_service(service):
allowed_services = ['CMS', 'FMS', 'IMS']
if service.upper() not in allowed_services:
raise Exception("service should be one of {}".format(allowed_services))
return service.upper()
def _init():
initializer.init_log()
return
def read_csv_file(file):
log.debug("reading file {}".format(file))
return utils.read_csv_file(file)
def resource_db_clean(resource_id, service):
log.debug("cleaning {} db for resource {}".format(service, resource_id))
db.remove_resource_db(resource_id, service)
return
def check_yaml_file(resource_id):
log.debug('checking yml file if exist for resource {}'.format(resource_id))
files = yh.check_yaml_exist(resource_id)
message = 'no yaml files found for this resource'
if files:
message = "found files please remove manualy {}".format(files)
log.debug(message)
return
def get_resource_regions(resource_id, service_name):
if service_name.upper() == 'CMS':
db_regions = db.get_cms_db_resource_regions(resource_id, service_name)
elif service_name.upper() == 'FMS':
db_regions = db.get_fms_db_resource_regions(resource_id, service_name)
elif service_name.upper() == 'IMS':
db_regions = db.get_ims_db_resource_regions(resource_id, service_name)
# db_regions = db.get_resource_regions(resource_id, service_name)
orm_regions = cli.get_resource_regions(resource_id, service_name)
return orm_regions, db_regions
def clean_rds_resource_status(resource_id):
log.debug("clean rds status db for resource {}".format(resource_id))
db.remove_rds_resource_status(resource_id)
return
def _start_cleaning():
log.info('start cleaning')
file_path = sys.argv[1]
service = _validate_service(sys.argv[2])
resourses_to_clean = read_csv_file(file_path)
for resource_id in resourses_to_clean:
try:
log.debug(
'check if resource {} has any regions before clean'.format(
resource_id))
resource_regions, db_regions = get_resource_regions(resource_id,
service)
if resource_regions or db_regions:
log.error(
"got regions {} {} please clean regions from orm before"
" removing the resource {}".format(resource_regions,
db_regions,
resource_id))
raise Exception(
"got regions {} {} please clean regions from orm before"
" removing the resource {}".format(resource_regions,
db_regions,
resource_id))
log.debug('cleaning {}'.format(resource_id))
resource_db_clean(resource_id, service)
check_yaml_file(resource_id)
clean_rds_resource_status(resource_id)
if service.upper() == "IMS":
db.remove_rds_image_metadata(resource_id)
except Exception as exp:
log.error("---------------{}---------------".format(exp.message))
if 'not found' not in exp.message:
log.exception(exp)
continue
return
if __name__ == '__main__':
warning_message = raw_input(
'IMPORTANT:- please note its your responsibility to backup the db'
' before running this script... click enter before continue'
)
_init()
_start_cleaning()
from . import cli_comander as cli
from . import db_comander as db
from . import initializer
from . import logging
from . import sys
from . import utils
from . import yaml_handler as yh
log = logging.getLogger(__name__)
def _validate_service(service):
allowed_services = ['CMS', 'FMS', 'IMS']
if service.upper() not in allowed_services:
raise Exception("service should be one of {}".format(allowed_services))
return service.upper()
def _init():
initializer.init_log()
return
def read_csv_file(file):
log.debug("reading file {}".format(file))
return utils.read_csv_file(file)
def resource_db_clean(resource_id, service):
log.debug("cleaning {} db for resource {}".format(service, resource_id))
db.remove_resource_db(resource_id, service)
return
def check_yaml_file(resource_id):
log.debug('checking yml file if exist for resource {}'.format(resource_id))
files = yh.check_yaml_exist(resource_id)
message = 'no yaml files found for this resource'
if files:
message = "found files please remove manualy {}".format(files)
log.debug(message)
return
def get_resource_regions(resource_id, service_name):
if service_name.upper() == 'CMS':
db_regions = db.get_cms_db_resource_regions(resource_id, service_name)
elif service_name.upper() == 'FMS':
db_regions = db.get_fms_db_resource_regions(resource_id, service_name)
elif service_name.upper() == 'IMS':
db_regions = db.get_ims_db_resource_regions(resource_id, service_name)
# db_regions = db.get_resource_regions(resource_id, service_name)
orm_regions = cli.get_resource_regions(resource_id, service_name)
return orm_regions, db_regions
def clean_rds_resource_status(resource_id):
log.debug("clean rds status db for resource {}".format(resource_id))
db.remove_rds_resource_status(resource_id)
return
def _start_cleaning():
log.info('start cleaning')
file_path = sys.argv[1]
service = _validate_service(sys.argv[2])
resourses_to_clean = read_csv_file(file_path)
for resource_id in resourses_to_clean:
try:
log.debug(
'check if resource {} has any regions before clean'.format(
resource_id))
resource_regions, db_regions = get_resource_regions(resource_id,
service)
if resource_regions or db_regions:
log.error(
"got regions {} {} please clean regions from orm before"
" removing the resource {}".format(resource_regions,
db_regions,
resource_id))
raise Exception(
"got regions {} {} please clean regions from orm before"
" removing the resource {}".format(resource_regions,
db_regions,
resource_id))
log.debug('cleaning {}'.format(resource_id))
resource_db_clean(resource_id, service)
check_yaml_file(resource_id)
clean_rds_resource_status(resource_id)
if service.upper() == "IMS":
db.remove_rds_image_metadata(resource_id)
except Exception as exp:
log.error("---------------{}---------------".format(exp.message))
if 'not found' not in exp.message:
log.exception(exp)
continue
return
if __name__ == '__main__':
warning_message = (
'IMPORTANT:- please note its your responsibility to backup the db'
' before running this script... click enter before continue'
)
log.info(warning_message)
_init()
_start_cleaning()

View File

@ -1,4 +1,4 @@
import config as conf
from . import config as conf
import fnmatch
import os

View File

@ -1,4 +1,3 @@
#!/usr/bin/env python
import argparse
import json
import os
@ -26,19 +25,19 @@ def sh(harg, file_name):
cmd = create_command(harg, file_name)
print '>> Starting: ' + cmd
print('>> Starting: ' + cmd)
start = time.time()
output = ''
p = subprocess.Popen(cmd.split(), shell=False, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
for line in iter(p.stdout.readline, b''):
out = line.rstrip()
print(">>> " + out)
print((">>> " + out))
output += out
end = time.time()
span = end - start
retcode = p.wait()
print '>> Ended: %s [%s, %d:%02d]' % (cmd, retcode, span / 60, span % 60)
print('>> Ended: %s [%s, %d:%02d]' % (cmd, retcode, span / 60, span % 60))
return retcode, output
@ -61,7 +60,7 @@ for file in [os.path.join(dp, f) for dp, dn, fn in
except ValueError:
continue
print f
print(f)
flavor_name = calculate_name(f)
fh, file_name = tempfile.mkstemp()
os.write(fh, json.dumps({"flavor": f}))

View File

@ -1,4 +1,3 @@
#!/usr/bin/env python
import json
import sys

View File

@ -1,4 +1,3 @@
#!/usr/bin/env python
import json
import re
import sys

View File

@ -1,4 +1,3 @@
#!/usr/bin/env python
import argparse
import ast
import json
@ -34,7 +33,7 @@ def get_region_list(regions):
result.append({'name': result_region['name'],
'designType': result_region['designType']})
else:
print 'Failed to get region %s, aborting...' % (region,)
print('Failed to get region %s, aborting...' % (region,))
exit(1)
return result
@ -75,19 +74,19 @@ def sh(cli_command):
# return status and output
cmd = create_command(cli_command)
print '>> Starting: ' + cmd
print('>> Starting: ' + cmd)
start = time.time()
output = ''
errpat = re.compile('error', re.I)
p = subprocess.Popen(cmd.split(), shell=False, stdout=subprocess.PIPE)
for line in iter(p.stdout.readline, b''):
out = line.rstrip()
print(">>> " + out)
print((">>> " + out))
output += out
end = time.time()
span = end - start
retcode = p.wait()
print '>> Ended: %s [%s, %d:%02d]' % (cmd, retcode, span / 60, span % 60)
print('>> Ended: %s [%s, %d:%02d]' % (cmd, retcode, span / 60, span % 60))
return retcode, output
@ -108,7 +107,7 @@ args = parser.parse_args()
regions = args.regions.split(',')
series_list = args.series.split(',')
if not regions:
print "Must specify at least one region"
print("Must specify at least one region")
exit(1)
# Get all regions from RMS
@ -145,7 +144,7 @@ for file in [os.path.join(dp, f) for dp, dn, fn in
if not res:
flavor = ast.literal_eval(output)
FID = flavor['flavor']['id']
print 'fid: ' + FID
print('fid: ' + FID)
res, output = sh('add_region')
os.unlink(FILE_NAME)
@ -157,5 +156,5 @@ if not any_update:
exp = 'combination of regions:[{}] and series:[{}]'.format(
args.regions, args.series)
print('No flavor was updated, please make sure that the {} matches any '
'flavor under the flavor directory'.format(exp))
print(('No flavor was updated, please make sure that the {} matches any '
'flavor under the flavor directory'.format(exp)))

View File

@ -1,4 +1,3 @@
#!/usr/bin/env python
import argparse
import json
import os
@ -25,7 +24,7 @@ def sh(cmd):
# print runtime and status
# return status and output
print '>> Starting: ' + cmd
print('>> Starting: ' + cmd)
start = time.time()
output = ''
errpat = re.compile('error', re.I)
@ -35,12 +34,12 @@ def sh(cmd):
stderr=subprocess.STDOUT)
for line in iter(p.stdout.readline, b''):
out = line.rstrip()
print(">>> " + out)
print((">>> " + out))
output += out
end = time.time()
span = end - start
retcode = p.wait()
print '>> Ended: %s [%s, %d:%02d]' % (cmd, retcode, span / 60, span % 60)
print('>> Ended: %s [%s, %d:%02d]' % (cmd, retcode, span / 60, span % 60))
return retcode, output
@ -65,7 +64,7 @@ for file in [f for f in os.listdir(IMAGE_DIR) if
isfile(join(IMAGE_DIR, f))]:
f = read_jsonfile(join(IMAGE_DIR, file))
print f
print(f)
image_name = f['name']
fh, file_name = tempfile.mkstemp()
os.write(fh, json.dumps({"image": f}))
@ -80,7 +79,7 @@ for file in [f for f in os.listdir(IMAGE_DIR) if
image_name,
'Success' if res == 0 else 'Failed'))
print "\nImage creation summary:"
print "-----------------------"
print("\nImage creation summary:")
print("-----------------------")
for s in summary:
print s
print(s)

View File

@ -1,9 +1,8 @@
#!/usr/bin/env python
import argparse
import ast
import json
import os
from os.path import isfile, join
from os.path import (isfile, join)
import re
import subprocess
import tempfile
@ -25,19 +24,19 @@ def sh(cmd):
# print runtime and status
# return status and output
print '>> Starting: ' + cmd
print('>> Starting: ' + cmd)
start = time.time()
output = ''
errpat = re.compile('error', re.I)
p = subprocess.Popen(cmd.split(), shell=False, stdout=subprocess.PIPE)
for line in iter(p.stdout.readline, b''):
out = line.rstrip()
print(">>> " + out)
print((">>> " + out))
output += out
end = time.time()
span = end - start
retcode = p.wait()
print '>> Ended: %s [%s, %d:%02d]' % (cmd, retcode, span / 60, span % 60)
print('>> Ended: %s [%s, %d:%02d]' % (cmd, retcode, span / 60, span % 60))
return retcode, output
@ -63,7 +62,7 @@ args = parser.parse_args()
regions = args.regions.split(',')
if not regions:
print "Must specify at least one region"
print("Must specify at least one region")
exit(0)
data = {'regions': [{'name': r} for r in regions]}
fh, file_name = tempfile.mkstemp()
@ -79,21 +78,21 @@ if not res:
images = ast.literal_eval(output)
for img in images['images']:
img_dict[img['name']] = img['id']
print img_dict
print(img_dict)
for file in [f for f in os.listdir(IMAGE_DIR) if
isfile(join(IMAGE_DIR, f))]:
f = read_jsonfile(join(IMAGE_DIR, file))
print f
print(f)
image_name = f['name']
if image_name in img_dict:
image_id = img_dict[image_name]
print 'image_id: ' + image_id
print('image_id: ' + image_id)
res, output = sh('python %s ims add_regions test %s %s' % (
CLI_PATH, image_id, file_name))
else:
print 'python image_name: {} does not exist. ' \
'ignore.'.format(image_name)
print('python image_name: {} does not exist. '
'ignore.'.format(image_name))
os.unlink(file_name)

View File

@ -1,4 +1,4 @@
import config
from . import config
import json
import requests
@ -65,7 +65,7 @@ def get_token(timeout, args):
message = ('ERROR: {} for token generation was not supplied. '
'Please use its command-line argument or '
'environment variable.'.format(argument))
print message
print(message)
raise MissingArgumentError(message)
keystone_ep = args.keystone_auth_url if args.keystone_auth_url else None
@ -83,9 +83,9 @@ def get_token(timeout, args):
project_domain,)
if args.verbose:
print(
print((
"Getting token:\ntimeout: %d\nheaders: %s\nurl: %s\n" % (
timeout, headers, url))
timeout, headers, url)))
try:
resp = requests.post(url, timeout=timeout, data=data, headers=headers)
if resp.status_code != 201:
@ -95,10 +95,10 @@ def get_token(timeout, args):
return resp.headers['x-subject-token']
except Exception as e:
print e.message
raise ConnectionError(e.message)
print(str(e))
raise ConnectionError(str(e))
def pretty_print_json(json_to_print):
"""Print a json without the u' prefix."""
print(json.dumps(json_to_print))
print((json.dumps(json_to_print)))

View File

@ -1,11 +1,11 @@
#!/usr/bin/python
import argparse
import cli_common
import config
import orm.base_config as base_config
import os
import requests
from . import cli_common
from . import config
class ResponseError(Exception):
pass
@ -620,7 +620,7 @@ def validate_args(args):
message = ('ERROR: {} for token generation was not supplied. '
'Please use its command-line argument or '
'environment variable.'.format(argument))
print message
print(message)
raise cli_common.MissingArgumentError(message)
@ -643,11 +643,11 @@ def run(args):
rest_cmd, cmd_url = cmd_details(args)
url = '%s/v1/orm/' % (host) + cmd_url
if args.faceless:
auth_token = auth_region = requester = client = ''
auth_key = auth_region = requester = client = ''
else:
try:
validate_args(args)
auth_token = cli_common.get_token(timeout, args)
auth_key = cli_common.get_token(timeout, args)
except Exception:
exit(1)
auth_region = globals()['auth_region']
@ -657,7 +657,7 @@ def run(args):
tracking_id = args.tracking_id if args.tracking_id else None
headers = {
'content-type': 'application/json',
'X-Auth-Token': auth_token,
'X-Auth-Token': auth_key,
'X-Auth-Region': auth_region,
'X-RANGER-Requester': requester,
'X-RANGER-Client': client,
@ -665,22 +665,22 @@ def run(args):
}
if args.verbose:
print(
print((
"Sending API:\ntimeout: %d\ndata: %s\nheaders: %s\ncmd: %s\nurl: "
"%s\n" % (
timeout, data, headers, rest_cmd.__name__, url))
timeout, data, headers, rest_cmd.__name__, url)))
try:
resp = rest_cmd(url, timeout=timeout, data=data, headers=headers,
verify=config.verify)
except Exception as e:
print e
print(e)
exit(1)
if not 200 <= resp.status_code < 300:
content = resp.content
print 'API error: %s %s (Reason: %d)\n%s' % (
rest_cmd.func_name.upper(), url, resp.status_code, content)
print('API error: %s %s (Reason: %d)\n%s' % (
rest_cmd.__name__.upper(), url, resp.status_code, content))
exit(1)
if resp.status_code == 204: # no content
@ -688,6 +688,6 @@ def run(args):
rj = resp.json()
if rj == 'Not found':
print 'No output was found'
print('No output was found')
else:
cli_common.pretty_print_json(rj)

View File

@ -1,11 +1,11 @@
#!/usr/bin/python
import argparse
import cli_common
import config
import orm.base_config as base_config
import os
import requests
from . import cli_common
from . import config
class ResponseError(Exception):
pass
@ -308,7 +308,7 @@ def validate_args(args):
message = ('ERROR: {} for token generation was not supplied. '
'Please use its command-line argument or '
'environment variable.'.format(argument))
print message
print(message)
raise cli_common.MissingArgumentError(message)
@ -332,11 +332,11 @@ def run(args):
rest_cmd, cmd_url = cmd_details(args)
url = '%s/v1/orm/flavors' % (host) + cmd_url
if args.faceless:
auth_token = auth_region = requester = client = ''
auth_key = auth_region = requester = client = ''
else:
try:
validate_args(args)
auth_token = cli_common.get_token(timeout, args)
auth_key = cli_common.get_token(timeout, args)
except Exception:
exit(1)
auth_region = globals()['auth_region']
@ -346,7 +346,7 @@ def run(args):
tracking_id = args.tracking_id if args.tracking_id else None
headers = {
'content-type': 'application/json',
'X-Auth-Token': auth_token,
'X-Auth-Token': auth_key,
'X-Auth-Region': auth_region,
'X-RANGER-Requester': requester,
'X-RANGER-Client': client,
@ -354,21 +354,20 @@ def run(args):
}
if args.verbose:
print(
print((
"Sending API:\ntimeout: %d\ndata: %s\nheaders: %s\ncmd: %s\nurl:"
" %s\n" % (
timeout, data, headers, rest_cmd.__name__, url))
timeout, data, headers, rest_cmd.__name__, url)))
try:
resp = rest_cmd(url, timeout=timeout, data=data, headers=headers,
verify=config.verify)
except Exception as e:
print e
print(e)
exit(1)
if not 200 <= resp.status_code < 300:
content = resp.content
print 'API error: %s %s (Reason: %d)\n%s' % (
rest_cmd.func_name.upper(), url, resp.status_code, content)
print('API error: %s %s (Reason: %d)\n%s' % (
rest_cmd.__name__.upper(), url, resp.status_code, content))
exit(1)
if resp.status_code == 204: # no content
@ -376,6 +375,6 @@ def run(args):
rj = resp.json()
if rj == 'Not found':
print 'No output was found'
print('No output was found')
else:
cli_common.pretty_print_json(rj)

View File

@ -1,10 +1,9 @@
#!/usr/bin/python
import argparse
import config
import orm.base_config as base_config
import os
import requests
from . import config
from orm.orm_client.ormcli import cli_common
@ -201,7 +200,7 @@ def validate_args(args):
message = ('ERROR: {} for token generation was not supplied. '
'Please use its command-line argument or '
'environment variable.'.format(argument))
print message
print(message)
raise cli_common.MissingArgumentError(message)
@ -285,11 +284,11 @@ def run(args):
rest_cmd, cmd_url = cmd_details(args)
url = '%s/v1/orm/images' % (host) + cmd_url
if args.faceless:
auth_token = auth_region = requester = client = ''
auth_key = auth_region = requester = client = ''
else:
try:
validate_args(args)
auth_token = cli_common.get_token(timeout, args)
auth_key = cli_common.get_token(timeout, args)
except Exception:
exit(1)
auth_region = globals()['auth_region']
@ -299,7 +298,7 @@ def run(args):
tracking_id = args.tracking_id if args.tracking_id else None
headers = {
'content-type': 'application/json',
'X-Auth-Token': auth_token,
'X-Auth-Token': auth_key,
'X-Auth-Region': auth_region,
'X-RANGER-Requester': requester,
'X-RANGER-Client': client,
@ -307,26 +306,26 @@ def run(args):
}
if args.verbose:
print("Sending API:\ntimeout: %d\ndata: %s\n"
print(("Sending API:\ntimeout: %d\ndata: %s\n"
"headers: %s\ncmd: %s\nurl: %s\n" % (timeout,
data,
headers,
rest_cmd.__name__,
url))
url)))
try:
resp = rest_cmd(url, timeout=timeout, data=data, headers=headers,
verify=config.verify)
except Exception as e:
print e
print(e)
exit(1)
if not 200 <= resp.status_code < 300:
content = resp.content
print 'API error: %s %s (Reason: %d)\n%s' % (
rest_cmd.func_name.upper(),
print('API error: %s %s (Reason: %d)\n%s' % (
rest_cmd.__name__.upper(),
url,
resp.status_code,
content)
content))
exit(1)
if resp.status_code == 204: # no content
@ -334,6 +333,6 @@ def run(args):
rj = resp.json()
if rj == 'Not found':
print 'No output was found'
print('No output was found')
else:
cli_common.pretty_print_json(rj)

View File

@ -1,5 +1,4 @@
#!/usr/bin/env python
#! /usr/bin/python3
import sys
import ormcli

View File

@ -1,4 +1,3 @@
#!/usr/bin/python
import argparse
from orm.orm_client.ormcli import cmscli
from orm.orm_client.ormcli import fmscli
@ -15,7 +14,7 @@ class Cli:
metavar='<service>')
self.submod = {'cms': cmscli, 'fms': fmscli, 'ims': imscli,
'rms': rmscli}
for s in self.submod.values():
for s in list(self.submod.values()):
s.add_to_parser(service_sub)
def parse(self, argv=sys.argv):
@ -23,7 +22,11 @@ class Cli:
self.args = self.parser.parse_args()
def logic(self):
self.submod[self.args.service].run(self.args)
if self.args.service:
self.submod[self.args.service].run(self.args)
else:
sys.stderr.write('too few arguments')
sys.exit(2)
def main(argv):

View File

@ -1,2 +0,0 @@
requests
argparse

View File

@ -1,11 +1,11 @@
#!/usr/bin/python
import argparse
import cli_common
import config
import orm.base_config as base_config
import os
import requests
from . import cli_common
from . import config
class ResponseError(Exception):
pass
@ -223,7 +223,7 @@ def validate_args(args):
message = ('ERROR: {} for token generation was not supplied. '
'Please use its command-line argument or '
'environment variable.'.format(argument))
print message
print(message)
raise cli_common.MissingArgumentError(message)
@ -321,11 +321,11 @@ def run(args):
rest_cmd, cmd_url = cmd_details(args)
url = '%s/%s' % (rms_base_url, url_path) + cmd_url
if args.faceless:
auth_token = auth_region = requester = client = ''
auth_key = auth_region = requester = client = ''
else:
try:
validate_args(args)
auth_token = cli_common.get_token(timeout, args)
auth_key = cli_common.get_token(timeout, args)
except Exception:
exit(1)
auth_region = globals()['auth_region']
@ -337,8 +337,8 @@ def run(args):
if args.use_version == 1:
url = '%s:%d/lcp' % (host, port) + cmd_url
elif args.use_version is not None and args.use_version != 2:
print 'API error: use_version argument - invalid value, ' \
'allowed values: 1 or 2'
print('API error: use_version argument - invalid value, '
'allowed values: 1 or 2')
exit(1)
if args.subcmd == "update_status":
@ -347,33 +347,33 @@ def run(args):
tracking_id = args.tracking_id if args.tracking_id else None
headers = {
'content-type': 'application/json',
'X-Auth-Token': auth_token,
'X-Auth-Token': auth_key,
'X-Auth-Region': auth_region,
'X-RANGER-Requester': requester,
'X-RANGER-Client': client,
'X-RANGER-Tracking-Id': tracking_id
}
if args.verbose:
print("Sending API:\ntimeout: %d\ndata: %s\n"
print(("Sending API:\ntimeout: %d\ndata: %s\n"
"headers: %s\ncmd: %s\nurl: %s\n" % (timeout,
data,
headers,
rest_cmd.__name__,
url))
url)))
try:
resp = rest_cmd(url, data=data, timeout=timeout, headers=headers,
verify=config.verify)
except Exception as e:
print e
print(str(e))
exit(1)
if not 200 <= resp.status_code < 300:
content = resp.content
print 'API error: %s %s (Reason: %d)\n%s' % (
rest_cmd.func_name.upper(),
print('API error: %s %s (Reason: %d)\n%s' % (
rest_cmd.__name__.upper(),
url,
resp.status_code,
content)
content))
exit(1)
if resp.status_code == 204: # no content
@ -382,6 +382,6 @@ def run(args):
rj = resp.json()
if rj == 'Not found':
print 'No output was found'
print('No output was found')
else:
cli_common.pretty_print_json(rj)

View File

@ -1,6 +0,0 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
pecan==1.0.2
requests==2.2.1

View File

@ -1,6 +1,6 @@
"""transaction controller module."""
import base
from . import base
import logging
import wsme
@ -17,7 +17,7 @@ logger = logging.getLogger(__name__)
class Transaction(base.Base):
"""transaction type."""
timestamp = wsme.wsattr(long, mandatory=True)
timestamp = wsme.wsattr(int, mandatory=True)
user_id = wsme.wsattr(wtypes.text, mandatory=False, default=None)
application_id = wsme.wsattr(wtypes.text, mandatory=True)
tracking_id = wsme.wsattr(wtypes.text, mandatory=True)
@ -72,8 +72,8 @@ class Transaction(base.Base):
class Query(base.Base):
"""query type."""
timestamp_from = wsme.wsattr(long, mandatory=False, default=None)
timestamp_to = wsme.wsattr(long, mandatory=False, default=None)
timestamp_from = wsme.wsattr(int, mandatory=False, default=None)
timestamp_to = wsme.wsattr(int, mandatory=False, default=None)
user_id = wsme.wsattr(wtypes.text, mandatory=False, default=None)
application_id = wsme.wsattr(wtypes.text, mandatory=False, default=None)
tracking_id = wsme.wsattr(wtypes.text, mandatory=False, default=None)

View File

@ -5,7 +5,6 @@ import logging
from orm.services.audit_trail_manager.audit_server.model.transaction import Model
from orm.services.audit_trail_manager.audit_server.storage import transaction
from sqlalchemy import BigInteger, Column, Integer, Text, asc, create_engine
from sqlalchemy.exc import IntegrityError
from sqlalchemy.ext.declarative.api import declarative_base
from sqlalchemy.orm import sessionmaker
@ -64,11 +63,11 @@ class Connection(transaction.Base):
service_name=transaction_record.service_name))
session.commit()
# All other exceptions will be raised
except IntegrityError as e:
except Exception as e:
# Except Exception as e:
session.rollback()
# Raise the exception only if it's not a duplicate entry exception
if 'duplicate entry' in e.message.lower():
if 'duplicate entry' in str(e).lower():
logger.warning(
"Fail to audit record - Duplicate entry: {}".format(
e))

View File

@ -1,19 +0,0 @@
[tox]
#envlist = py27, cover
envlist = py27, cover, pep8
[testenv]
setenv= PYTHONPATH={toxinidir}:{toxinidir}/audit_server/external_mock/
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
install_command = pip install -U {opts} {packages}
commands = python setup.py testr
[testenv:cover]
commands = python setup.py testr --slowest --coverage
coverage report --omit=audit_server/tests/*
[testenv:pep8]
#commands = flake8 --ignore=D100,D101,D102,D103,D104,D105
commands = flake8

View File

@ -17,7 +17,7 @@ class MetadataController(rest.RestController):
def check_metadata_values(self, metadata):
cust_metadata = CustomerMetadata()
for key, value in metadata.metadata.iteritems():
for key, value in metadata.metadata.items():
cust_metadata.validate_metadata(key, value)
@wsexpose(CustomerResultWrapper, str, body=MetadataWrapper, rest_content_types='json')
@ -34,10 +34,10 @@ class MetadataController(rest.RestController):
return res
except AttributeError as ex:
raise err_utils.get_error(request.transaction_id,
message=ex.message, status_code=409)
message=str(ex), status_code=409)
except ValueError as ex:
raise err_utils.get_error(request.transaction_id,
message=ex.message, status_code=404)
message=str(ex), status_code=404)
except ErrorStatus as ex:
LOG.log_exception("MetaDataController - Failed to add metadata", ex)
@ -48,7 +48,7 @@ class MetadataController(rest.RestController):
except LookupError as ex:
LOG.log_exception("MetaDataController - {0}".format(ex.message), ex)
raise err_utils.get_error(request.transaction_id,
message=ex.message, status_code=400)
message=str(ex), status_code=400)
except Exception as ex:
LOG.log_exception("MetaDataController - Failed to add metadata", ex)
raise err_utils.get_error(request.transaction_id,
@ -69,10 +69,10 @@ class MetadataController(rest.RestController):
return res
except AttributeError as ex:
raise err_utils.get_error(request.transaction_id,
message=ex.message, status_code=400)
message=str(ex), status_code=400)
except ValueError as ex:
raise err_utils.get_error(request.transaction_id,
message=ex.message, status_code=404)
message=str(ex), status_code=404)
except ErrorStatus as ex:
LOG.log_exception("MetaDataController - Failed to add metadata", ex)
raise err_utils.get_error(request.transaction_id,
@ -81,7 +81,7 @@ class MetadataController(rest.RestController):
except LookupError as ex:
LOG.log_exception("MetaDataController - {0}".format(ex.message), ex)
raise err_utils.get_error(request.transaction_id,
message=ex.message, status_code=400)
message=str(ex), status_code=400)
except Exception as ex:
LOG.log_exception("MetaDataController - Failed to add metadata", ex)
raise err_utils.get_error(request.transaction_id,

View File

@ -113,7 +113,7 @@ class RegionController(rest.RestController):
except ValueError as exception:
raise err_utils.get_error(request.transaction_id,
message=exception.message,
message=str(exception),
status_code=404)
except ErrorStatus as exception:
LOG.log_exception("CustomerController - Failed to delete region", exception)

View File

@ -1,5 +1,4 @@
from pecan import rest, request, response
import oslo_db
from wsmeext.pecan import wsexpose
from orm.common.orm_common.utils import api_error_utils as err_utils
@ -39,10 +38,10 @@ class CustomerController(rest.RestController):
status_code=exception.status_code)
except Exception as exception:
LOG.log_exception("CustomerController - Failed to GetCustomerDetails", exception)
LOG.log_exception("CustomerController - Failed to GetCustomerDetails", str(exception))
raise err_utils.get_error(request.transaction_id,
status_code=500,
error_details=exception.message)
error_details=str(exception))
return result
@ -63,9 +62,8 @@ class CustomerController(rest.RestController):
try:
result = customer_logic.create_customer(customer, uuid, request.transaction_id)
except oslo_db.exception.DBDuplicateEntry as exception:
raise ErrorStatus(409.2, 'Customer field {0} already exists'.format(exception.columns))
except Exception as exception:
raise ErrorStatus(409.2, 'Customer already exists.Error:{0}'.format(str(exception)))
LOG.info("CustomerController - Customer Created: " + str(result))
event_details = 'Customer {} {} created in regions: {}, with users: {}'.format(
uuid, customer.name, [r.name for r in customer.regions],
@ -78,14 +76,13 @@ class CustomerController(rest.RestController):
except ErrorStatus as exception:
LOG.log_exception("CustomerController - Failed to CreateCustomer", exception)
raise err_utils.get_error(request.transaction_id,
message=exception.message,
message=str(exception),
status_code=exception.status_code)
except Exception as exception:
LOG.log_exception("CustomerController - Failed to CreateCustomer", exception)
LOG.log_exception("CustomerController - Failed to CreateCustomer", str(exception))
raise err_utils.get_error(request.transaction_id,
status_code=500,
error_details=exception.message)
error_details=str(exception))
@wsexpose(CustomerResultWrapper, str, body=Customer, rest_content_types='json', status_code=200)
def put(self, customer_id, customer):

View File

@ -47,7 +47,7 @@ class RegionUserController(rest.RestController):
except DBDuplicateEntry as exception:
LOG.log_exception(
"DBDuplicateEntry - Group users already assigned.", exception)
print exception.message
print(exception.message)
raise err_utils.get_error(
request.transaction_id,
status_code=409,
@ -96,7 +96,7 @@ class RegionUserController(rest.RestController):
except ValueError as exception:
raise err_utils.get_error(request.transaction_id,
message=exception.message,
message=str(exception),
status_code=404)
except ErrorStatus as exception:
LOG.log_exception("ErrorStatus - Failed to delete user from group",

View File

@ -103,7 +103,7 @@ class RegionController(rest.RestController):
except ValueError as exception:
raise err_utils.get_error(request.transaction_id,
message=exception.message,
message=str(exception),
status_code=404)
except ErrorStatus as exception:
LOG.log_exception("RegionController - Failed to delete region",

View File

@ -94,7 +94,7 @@ class RoleController(rest.RestController):
except ValueError as exception:
raise err_utils.get_error(request.transaction_id,
message=exception.message,
message=str(exception),
status_code=404)
except ErrorStatus as exception:
LOG.log_exception("ErrorStatus - Failed to unassign roles",

View File

@ -45,7 +45,7 @@ class UserController(rest.RestController):
except DBDuplicateEntry as exception:
LOG.log_exception(
"DBDuplicateEntry - Group users already assigned.", exception)
print exception.message
print(exception.message)
raise err_utils.get_error(
request.transaction_id,
status_code=409,
@ -92,7 +92,7 @@ class UserController(rest.RestController):
except ValueError as exception:
raise err_utils.get_error(request.transaction_id,
message=exception.message,
message=str(exception),
status_code=404)
except ErrorStatus as exception:
LOG.log_exception("ErrorStatus - Failed to delete user from group",

View File

@ -1,4 +1,4 @@
from __future__ import absolute_import
from ..orm.configuration import ConfigurationController
from ..orm.customer.root import CustomerController

View File

@ -1,4 +1,4 @@
from orm.root import OrmController
from .orm.root import OrmController
from pecan.rest import RestController

View File

@ -25,7 +25,7 @@ from orm.services.customer_manager.cms_rest.data.sql_alchemy.models \
from orm.services.customer_manager.cms_rest.data.sql_alchemy.user_role_record \
import UserRoleRecord
from orm.services.customer_manager.cms_rest.logic.error_base import ErrorStatus
from oslo_db.sqlalchemy import session as db_session
from oslo_db.sqlalchemy.enginefacade import LegacyEngineFacade
from pecan import conf
from sqlalchemy.event import listen
from sqlalchemy import or_
@ -35,7 +35,7 @@ LOG = logging.getLogger(__name__)
# event handling
def on_before_flush(session, flush_context, instances):
print("on_before_flush:", str(flush_context))
print(("on_before_flush:", str(flush_context)))
for model in session.new:
if hasattr(model, "validate"):
model.validate("new")
@ -52,8 +52,7 @@ class DataManager(object):
if not connection_string:
connection_string = conf.database.connection_string
self._engine_facade = db_session.EngineFacade(connection_string,
autocommit=False)
self._engine_facade = LegacyEngineFacade(connection_string, autocommit=False)
self._session = None
listen(self.session, 'before_flush', on_before_flush)
self.image_record = None
@ -230,8 +229,8 @@ class DataManager(object):
# FIXME: next line assumes that only one quota of each type is
# available and thus quota_by_type[0] is used
for field_key, field_value in DataManager.get_dict_from_quota(
quota_by_type[0], quota_type).items():
for field_key, field_value in list(DataManager.get_dict_from_quota(
quota_by_type[0], quota_type).items()):
sql_quota_field_detail = QuotaFieldDetail(
quota_id=sql_quota.id,
field_key=field_key,

View File

@ -1,4 +1,4 @@
from __builtin__ import int
from builtins import int
from sqlalchemy import func
@ -83,7 +83,7 @@ class CustomerRecord:
if results:
resource_status_dict = dict((id, (resource_id, region, status)) for id, resource_id, region, status in results)
# using resource_status_dict, create cust_region_dict with resource_id as key and (region, status) as value
for v in resource_status_dict.values():
for v in list(resource_status_dict.values()):
if v[0] in cust_region_dict:
cust_region_dict[v[0]].append(v[1:])
else:

View File

@ -57,7 +57,7 @@ class CustomerRegionRecord:
def delete_region_for_customer(self, customer_id, region_name):
# customer_id can be a uuid (type of string) or id (type of int)
# if customer_id is uuid I get id from uuid and use the id in the next sql command
if isinstance(customer_id, basestring):
if isinstance(customer_id, str):
customer_record = CustomerRecord(self.session)
customer_id = customer_record.get_customer_id_from_uuid(customer_id)
# get region id by the name I got (region_name)
@ -81,7 +81,7 @@ class CustomerRegionRecord:
def delete_all_regions_for_customer(self, customer_id): # not including default region which is -1
# customer_id can be a uuid (type of string) or id (type of int)
# if customer_id is uuid I get id from uuid and use the id in the next sql command
if isinstance(customer_id, basestring):
if isinstance(customer_id, str):
customer_record = CustomerRecord(self.session)
customer_id = customer_record.get_customer_id_from_uuid(customer_id)

View File

@ -1,4 +1,4 @@
from __builtin__ import int
from builtins import int
from orm.services.customer_manager.cms_rest.data.sql_alchemy.models import (
Groups,
@ -89,7 +89,7 @@ class GroupRecord:
for id, resource_id, region, status in results)
# using resource_status, create group_region with resource_id
# as key and (region, status) as value
for v in resource_status.values():
for v in list(resource_status.values()):
if v[0] in group_region:
group_region[v[0]].append(v[1:])
else:

View File

@ -138,7 +138,7 @@ class GroupsCustomerRoleRecord:
# group_id can be a uuid (type of string) or id (type of int).
# If group_id is uuid, then get id from uuid and use the id in the
# next sql command
if isinstance(group_id, basestring):
if isinstance(group_id, str):
group_record = GroupRecord(self.session)
group_id = group_record.get_group_id_from_uuid(group_id)

View File

@ -137,7 +137,7 @@ class GroupsDomainRoleRecord:
# group_id can be a uuid (type of string) or id (type of int).
# If group_id is uuid, then get id from uuid and use the id in the
# next sql command
if isinstance(group_id, basestring):
if isinstance(group_id, str):
group_record = GroupRecord(self.session)
group_id = group_record.get_group_id_from_uuid(group_id)

View File

@ -106,7 +106,7 @@ class GroupsRegionRecord:
# group_id can be a uuid (type of string) or id (type of int).
# If group_id is uuid, then get id from uuid and use the id in the
# next sql command
if isinstance(group_id, basestring):
if isinstance(group_id, str):
group_record = GroupRecord(self.session)
group_id = group_record.get_group_id_from_uuid(group_id)

View File

@ -88,7 +88,7 @@ class GroupsRoleRecord:
# group_id can be a uuid (type of string) or id (type of int).
# If group_id is uuid, then get id from uuid and use the id in the
# next sql command
if isinstance(group_id, basestring):
if isinstance(group_id, str):
group_record = GroupRecord(self.session)
group_id = group_record.get_group_id_from_uuid(group_id)

View File

@ -101,7 +101,7 @@ class GroupsUserRecord:
# Check if 'region_id' is a string - if so, get corresponding
# cms_region id value for use later to query/delete the
# corresponding group user record
if isinstance(region_id, basestring):
if isinstance(region_id, str):
region_query = region_id
region_record = RegionRecord(self.session)
region_id = region_record.get_region_id_from_name(region_id)

View File

@ -149,11 +149,11 @@ class Groups(Base, CMSBaseModel):
# Set up output using customer and domain dict
roles = []
for customer_uuid, customer_roles in unique_customer_roles.items():
for customer_uuid, customer_roles in list(unique_customer_roles.items()):
roles.append(
GroupWsmeModels.RoleAssignment(roles=customer_roles,
customer=customer_uuid))
for domain_name, domain_roles in unique_domain_roles.items():
for domain_name, domain_roles in list(unique_domain_roles.items()):
roles.append(GroupWsmeModels.RoleAssignment(roles=domain_roles,
domain=domain_name))
@ -166,7 +166,7 @@ class Groups(Base, CMSBaseModel):
else:
unique_domain[user.domain_name] = [user.user.name]
for domain, domain_user in unique_domain.items():
for domain, domain_user in list(unique_domain.items()):
users.append(GroupWsmeModels.User(id=domain_user,
domain=domain))
@ -260,7 +260,7 @@ class GroupsRegion(Base, CMSBaseModel):
else:
unique_domain[user.domain_name] = [user.user.name]
for domain, domain_user in unique_domain.items():
for domain, domain_user in list(unique_domain.items()):
users.append(GroupWsmeModels.User(id=domain_user,
domain=domain))
@ -676,7 +676,7 @@ class CustomerRegion(Base, CMSBaseModel):
quotas = {}
# The WSME can't handle existing data and shows empty values for unset new quotas
for class_name, class_value in WsmeModels.__dict__.iteritems():
for class_name, class_value in WsmeModels.__dict__.items():
if str(class_name) in "Network, Storage, Compute":
quotas[str(class_name).lower()] = {}
for field_key in dir(class_value):

View File

@ -43,18 +43,18 @@ class UserRoleRecord:
def delete_user_from_region(self, customer_id, region_id, user_id):
# customer_id can be a uuid (type of string) or id (type of int)
# if customer_id is uuid I get id from uuid and use the id in the next sql command
if isinstance(customer_id, basestring):
if isinstance(customer_id, str):
customer_record = CustomerRecord(self.session)
customer_id = customer_record.get_customer_id_from_uuid(customer_id)
if isinstance(region_id, basestring):
if isinstance(region_id, str):
region_query = region_id
region_record = RegionRecord(self.session)
region_id = region_record.get_region_id_from_name(region_id)
if region_id is None:
raise NotFound("region {} ".format(region_query))
if isinstance(user_id, basestring):
if isinstance(user_id, str):
user_query = user_id
cms_user_record = CmsUserRecord(self.session)
user_id = cms_user_record.get_cms_user_id_from_name(user_id)
@ -93,11 +93,11 @@ class UserRoleRecord:
def delete_all_users_from_region(self, customer_id, region_id):
# customer_id can be a uuid (type of string) or id (type of int)
# if customer_id is uuid I get id from uuid and use the id in the next sql command
if isinstance(customer_id, basestring):
if isinstance(customer_id, str):
customer_record = CustomerRecord(self.session)
customer_id = customer_record.get_customer_id_from_uuid(customer_id)
if isinstance(region_id, basestring):
if isinstance(region_id, str):
region_record = RegionRecord(self.session)
region_id = region_record.get_region_id_from_name(region_id)
if region_id == -1:
@ -114,5 +114,5 @@ class UserRoleRecord:
result = self.session.connection().execute(delete_query)
print "num records deleted: " + str(result.rowcount)
print("num records deleted: " + str(result.rowcount))
return result

View File

@ -41,7 +41,7 @@ class CustomerLogic(object):
datamanager.rollback()
raise
for key, value in customer.metadata.iteritems():
for key, value in customer.metadata.items():
cust_metadata.validate_metadata(key, value)
metadata = CustomerMetadata(field_key=key, field_value=value)
sql_customer.customer_metadata.append(metadata)
@ -97,7 +97,7 @@ class CustomerLogic(object):
for sql_user in existing_default_users_roles:
default_users_dic[sql_user.name] = sql_user
for user in default_users_requested:
is_default_user_exist = user.id in default_users_dic.keys()
is_default_user_exist = user.id in list(default_users_dic.keys())
if not is_default_user_exist:
sql_user = datamanager.add_user(user.id)
default_region_users.append(sql_user)
@ -138,7 +138,7 @@ class CustomerLogic(object):
# Default user will be given priority over region user
for user in users:
is_default_user_in_region = user.id in default_users_dic.keys()
is_default_user_in_region = user.id in list(default_users_dic.keys())
if not is_default_user_in_region:
sql_user = datamanager.add_user(user.id)
for role in user.role:
@ -284,8 +284,8 @@ class CustomerLogic(object):
return user_result_wrapper
except Exception as exception:
if 'Duplicate' in exception.message:
raise ErrorStatus(409, exception.message)
if 'Duplicate' in str(exception):
raise ErrorStatus(409, str(exception))
datamanager.rollback()
LOG.log_exception("Failed to add_users", exception)
raise exception
@ -413,8 +413,8 @@ class CustomerLogic(object):
except Exception as exception:
datamanager.rollback()
if 'Duplicate' in exception.message:
raise ErrorStatus(409, exception.message)
if 'Duplicate' in str(exception):
raise ErrorStatus(409, str(exception))
LOG.log_exception("Failed to add_default_users", exception)
raise
@ -758,7 +758,7 @@ class CustomerLogic(object):
resp = RdsProxy.get_status(sql_customer.uuid)
if resp.status_code == 200:
status_resp = resp.json()
if 'status' in status_resp.keys():
if 'status' in list(status_resp.keys()):
LOG.debug(
'RDS returned status: {}'.format(
status_resp['status']))

View File

@ -286,8 +286,8 @@ class GroupLogic(object):
except Exception as exception:
datamanager.rollback()
if 'Duplicate' in exception.message:
raise ErrorStatus(409, exception.message)
if 'Duplicate' in str(exception):
raise ErrorStatus(409, str(exception))
LOG.log_exception("Failed to add_group_default_users", exception)
raise
@ -357,8 +357,8 @@ class GroupLogic(object):
except Exception as exception:
datamanager.rollback()
if 'Duplicate' in exception.message:
raise ErrorStatus(409, exception.message)
if 'Duplicate' in str(exception):
raise ErrorStatus(409, str(exception))
LOG.log_exception("Failed to add_group_region_users", exception)
raise
@ -756,7 +756,7 @@ class GroupLogic(object):
uuids = [sql_group.uuid for sql_group in sql_groups
if sql_group and sql_group.uuid]
sql_in = ', '.join(list(map(lambda arg: "'%s'" % arg, uuids)))
sql_in = ', '.join(list(["'%s'" % arg for arg in uuids]))
resource_status = group_record.get_groups_status_by_uuids(sql_in)
for sql_group in sql_groups:
@ -822,7 +822,7 @@ class GroupLogic(object):
unique_customer[customer.customer.uuid] = [
customer.groups_role.role.name]
for customer, role_list in unique_customer.items():
for customer, role_list in list(unique_customer.items()):
role_result.append(
RoleResult(roles=role_list, customer=customer))
@ -835,7 +835,7 @@ class GroupLogic(object):
unique_domain[domain.domain_name] = [
domain.groups_role.role.name]
for domain, role_list in unique_domain.items():
for domain, role_list in list(unique_domain.items()):
role_result.append(RoleResult(roles=role_list, domain=domain))
return role_result
@ -885,7 +885,7 @@ class GroupLogic(object):
resp = RdsProxy.get_status(sql_group.uuid)
if resp.status_code == 200:
status_resp = resp.json()
if 'status' in status_resp.keys():
if 'status' in list(status_resp.keys()):
LOG.debug('RDS returned status: {}'.format(
status_resp['status']))
status = status_resp['status']

View File

@ -82,7 +82,7 @@ def update_customer_metadata(customer_uuid, metadata_wrapper, transaction_id):
def map_metadata(customer_id, metadata_wrapper):
sql_metadata_collection = []
for key, value in metadata_wrapper.metadata.iteritems():
for key, value in metadata_wrapper.metadata.items():
sql_metadata = CustomerMetadata()
sql_metadata.customer_id = customer_id
sql_metadata.field_key = key

View File

@ -1,19 +0,0 @@
[tox]
envlist=py27, pep8, cover
[testenv]
setenv= CMS_ENV=mock
PYTHONPATH={toxinidir}:{toxinidir}/cms_rest/extenal_mock/
deps= -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
[testenv:pep8]
commands =
py.test --pep8 -m pep8
[testenv:cover]
commands=
coverage run setup.py test
coverage report
coverage html
#commands={envpython} setup.py test -v {posargs}

View File

@ -1,4 +1,3 @@
#!/usr/bin/env python
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
@ -14,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
from ConfigParser import ConfigParser
from configparser import ConfigParser
from oslo_config import cfg
import re
from sqlalchemy import *

View File

@ -1,5 +1,5 @@
"""Configuration rest API input module."""
from __future__ import absolute_import
import logging
from orm.common.orm_common.utils import utils

View File

@ -1,4 +1,4 @@
from __future__ import absolute_import
from orm.common.orm_common.injector import injector
from orm.common.orm_common.utils import api_error_utils as err_utils

View File

@ -1,4 +1,4 @@
from __future__ import absolute_import
from orm.common.orm_common.injector import injector
from orm.common.orm_common.utils import api_error_utils as err_utils

View File

@ -1,6 +1,3 @@
from __future__ import absolute_import
from orm.common.orm_common.injector import injector
from orm.common.orm_common.utils import api_error_utils as err_utils
from orm.services.flavor_manager.fms_rest.data.wsme.models import RegionWrapper

View File

@ -1,4 +1,4 @@
from __future__ import absolute_import
from orm.common.orm_common.injector import injector
from orm.common.orm_common.utils import api_error_utils as err_utils

View File

@ -39,7 +39,7 @@ class LogsController(rest.RestController):
logger.info("Changing log level to [{}]".format(level))
try:
log_level = logging._levelNames.get(level.upper())
log_level = logging._nameToLevel.get(level.upper())
if log_level is not None:
self._change_log_level(log_level)
result = "Log level changed to {}.".format(level)
@ -49,7 +49,7 @@ class LogsController(rest.RestController):
"The given log level [{}] doesn't exist.".format(level))
except Exception as e:
result = "Fail to change log_level. Reason: {}".format(
e.message)
str(e))
logger.error(result)
return LogChangeResult(result)

Some files were not shown because too many files have changed in this diff Show More