Add comments, improve the fork method and add tests.
Change-Id: Ib75dc4e2e91cb34c1216ba689a297397db74a2cf
This commit is contained in:
parent
b2e011a456
commit
fa207dd7e6
|
@ -4,3 +4,9 @@ dist/
|
|||
*.egg-info/
|
||||
*.egg/
|
||||
build/
|
||||
.coverage
|
||||
.testrepository/
|
||||
.tox/
|
||||
cover/
|
||||
htmlcov/
|
||||
|
||||
|
|
|
@ -0,0 +1,4 @@
|
|||
[DEFAULT]
|
||||
test_command=${PYTHON:-python} -m subunit.run discover -t ./ ./ $LISTOPT $IDOPTION
|
||||
test_id_option=--load-list $IDFILE
|
||||
test_list_option=--list
|
|
@ -15,7 +15,9 @@ import copy
|
|||
import json
|
||||
import logging
|
||||
import requests
|
||||
from rackclient.openstack.common.gettextutils import _
|
||||
from rackclient import exceptions
|
||||
from rackclient.openstack.common import importutils
|
||||
|
||||
|
||||
class HTTPClient(object):
|
||||
|
@ -112,3 +114,23 @@ class HTTPClient(object):
|
|||
|
||||
def delete(self, url, **kwargs):
|
||||
return self.request(url, 'DELETE', **kwargs)
|
||||
|
||||
|
||||
def get_client_class(version):
|
||||
version_map = {
|
||||
'1': 'rackclient.v1.client.Client',
|
||||
}
|
||||
try:
|
||||
client_path = version_map[str(version)]
|
||||
except (KeyError, ValueError):
|
||||
msg = _("Invalid client version '%(version)s'. must be one of: "
|
||||
"%(keys)s") % {'version': version,
|
||||
'keys': ', '.join(version_map.keys())}
|
||||
raise exceptions.UnsupportedVersion(msg)
|
||||
|
||||
return importutils.import_class(client_path)
|
||||
|
||||
|
||||
def Client(version, *args, **kwargs):
|
||||
client_class = get_client_class(version)
|
||||
return client_class(*args, **kwargs)
|
||||
|
|
|
@ -11,6 +11,13 @@
|
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
class UnsupportedVersion(Exception):
|
||||
"""Indicates that the user is trying to use an unsupported
|
||||
version of the API.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class CommandError(Exception):
|
||||
pass
|
||||
|
||||
|
@ -59,7 +66,15 @@ class InternalServerError(HTTPException):
|
|||
message = "Internal Server Error"
|
||||
|
||||
|
||||
_error_classes = [BadRequest, NotFound, InternalServerError]
|
||||
class RateLimit(HTTPException):
|
||||
"""
|
||||
HTTP 413 - Too much Requests
|
||||
"""
|
||||
http_status = 413
|
||||
message = "This request was rate-limited."
|
||||
|
||||
|
||||
_error_classes = [BadRequest, NotFound, InternalServerError, RateLimit]
|
||||
_code_map = dict((c.http_status, c) for c in _error_classes)
|
||||
|
||||
|
||||
|
@ -86,17 +101,48 @@ def from_response(response, body, url, method=None):
|
|||
kwargs['message'] = message
|
||||
kwargs['details'] = details
|
||||
|
||||
cls = _code_map.get(response.status_code, BaseException)
|
||||
cls = _code_map.get(response.status_code, HTTPException)
|
||||
return cls(**kwargs)
|
||||
|
||||
|
||||
class BaseError(Exception):
|
||||
msg = "Unknown exception ocurred."
|
||||
|
||||
def __init__(self):
|
||||
super(Exception, self).__init__(self.msg)
|
||||
"""
|
||||
The base exception class for all exceptions except for HTTPException based classes.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class GetProcessContextError(Exception):
|
||||
msg = "Could not get process context."
|
||||
class ForkError(BaseError):
|
||||
pass
|
||||
|
||||
|
||||
class AMQPConnectionError(BaseError):
|
||||
pass
|
||||
|
||||
|
||||
class InvalidDirectoryError(BaseError):
|
||||
pass
|
||||
|
||||
|
||||
class InvalidFilePathError(BaseError):
|
||||
pass
|
||||
|
||||
|
||||
class InvalidFSEndpointError(BaseError):
|
||||
pass
|
||||
|
||||
|
||||
class FileSystemAccessError(BaseError):
|
||||
pass
|
||||
|
||||
|
||||
class MetadataAccessError(BaseError):
|
||||
pass
|
||||
|
||||
|
||||
class InvalidProcessError(BaseError):
|
||||
pass
|
||||
|
||||
|
||||
class ProcessInitError(BaseError):
|
||||
pass
|
||||
|
|
|
@ -1,18 +0,0 @@
|
|||
import json
|
||||
import requests
|
||||
|
||||
|
||||
def headers():
|
||||
return {"content-type": "application/json",
|
||||
"accept": "application/json"}
|
||||
|
||||
|
||||
def request(url, method, body=None):
|
||||
res = getattr(requests,method)(url, headers=headers(), data=json.dumps(body))
|
||||
return res.json()
|
||||
|
||||
|
||||
def get_metadata(resource):
|
||||
url = 'http://169.254.169.254/openstack/latest/meta_data.json'
|
||||
metadata = request(url, "get")["meta"].get(resource)
|
||||
return metadata
|
|
@ -1,79 +1,73 @@
|
|||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import json
|
||||
import requests
|
||||
from rackclient import exceptions
|
||||
from rackclient.v1 import client
|
||||
from rackclient import client
|
||||
from rackclient.exceptions import MetadataAccessError
|
||||
|
||||
METADATA_URL = 'http://169.254.169.254/openstack/latest/meta_data.json'
|
||||
|
||||
|
||||
class ProcessContext(object):
|
||||
|
||||
def __init__(self, gid=None, ppid=None, pid=None, proxy_ip=None,
|
||||
proxy_port=8088, api_version='v1', args=None):
|
||||
self.gid = gid
|
||||
self.ppid = ppid
|
||||
self.pid = pid
|
||||
self.proxy_ip = proxy_ip
|
||||
url = 'http://%s:%d/%s' % (proxy_ip, proxy_port, api_version)
|
||||
self.proxy_url = url
|
||||
self._add_args(args)
|
||||
self.client = client.Client(rack_url=self.proxy_url)
|
||||
def __init__(self):
|
||||
self.gid = None
|
||||
self.pid = None
|
||||
self.ppid = None
|
||||
self.proxy_ip = None
|
||||
self.proxy_url = None
|
||||
self.fs_endpoint = None
|
||||
self.shm_endpoint = None
|
||||
self.ipc_endpoint = None
|
||||
self.client = None
|
||||
|
||||
def _add_args(self, args):
|
||||
for (k, v) in args.items():
|
||||
try:
|
||||
setattr(self, k, v)
|
||||
except AttributeError:
|
||||
pass
|
||||
def add_args(self, args):
|
||||
if isinstance(args, dict):
|
||||
for (k, v) in args.items():
|
||||
try:
|
||||
setattr(self, k, v)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
def _get_process_list(self):
|
||||
return self.client.processes.list(self.gid)
|
||||
|
||||
def __getattr__(self, k):
|
||||
if k == 'process_list':
|
||||
return self._get_process_list()
|
||||
PCTXT = ProcessContext()
|
||||
|
||||
|
||||
def _get_metadata(metadata_url):
|
||||
resp = requests.get(metadata_url)
|
||||
|
||||
if resp.text:
|
||||
try:
|
||||
body = json.loads(resp.text)
|
||||
except ValueError:
|
||||
body = None
|
||||
else:
|
||||
body = None
|
||||
|
||||
if body:
|
||||
return body['meta']
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def _get_process_context():
|
||||
try:
|
||||
metadata = _get_metadata(METADATA_URL)
|
||||
pctxt = ProcessContext(
|
||||
gid=metadata.pop('gid'),
|
||||
ppid=metadata.pop('ppid') if "ppid" in metadata else None,
|
||||
pid=metadata.pop('pid'),
|
||||
proxy_ip=metadata.pop('proxy_ip'),
|
||||
args=metadata)
|
||||
resp = requests.get(metadata_url)
|
||||
except Exception as e:
|
||||
msg = "Could not get the metadata: %s" % e.message
|
||||
raise MetadataAccessError(msg)
|
||||
|
||||
proxy_info = pctxt.client.proxy.get(pctxt.gid)
|
||||
endpoints = {
|
||||
"fs_endpoint": proxy_info.fs_endpoint,
|
||||
"shm_endpoint": proxy_info.shm_endpoint,
|
||||
"ipc_endpoint": proxy_info.ipc_endpoint
|
||||
}
|
||||
pctxt._add_args(endpoints)
|
||||
return pctxt
|
||||
except Exception:
|
||||
raise exceptions.GetProcessContextError()
|
||||
body = json.loads(resp.text)
|
||||
return body['meta']
|
||||
|
||||
|
||||
try:
|
||||
PCTXT = _get_process_context()
|
||||
except exceptions.GetProcessContextError as e:
|
||||
PCTXT = None
|
||||
def init(client_version='1', proxy_port=8088, api_version='v1'):
|
||||
metadata = _get_metadata(METADATA_URL)
|
||||
|
||||
PCTXT.gid = metadata.pop('gid')
|
||||
PCTXT.pid = metadata.pop('pid')
|
||||
PCTXT.ppid = metadata.pop('ppid') if 'ppid' in metadata else None
|
||||
PCTXT.proxy_ip = metadata.pop('proxy_ip')
|
||||
PCTXT.proxy_url = 'http://%s:%d/%s' % \
|
||||
(PCTXT.proxy_ip, proxy_port, api_version)
|
||||
PCTXT.client = client.Client(client_version, rack_url=PCTXT.proxy_url)
|
||||
PCTXT.add_args(metadata)
|
||||
|
||||
proxy_info = PCTXT.client.proxy.get(PCTXT.gid)
|
||||
PCTXT.fs_endpoint = proxy_info.fs_endpoint
|
||||
PCTXT.shm_endpoint = proxy_info.shm_endpoint
|
||||
PCTXT.ipc_endpoint = proxy_info.ipc_endpoint
|
||||
|
|
|
@ -0,0 +1,36 @@
|
|||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from rackclient import process_context
|
||||
from rackclient.v1.syscall.default import messaging
|
||||
from rackclient import exceptions
|
||||
|
||||
PCTXT = process_context.PCTXT
|
||||
|
||||
|
||||
def _is_exist():
|
||||
try:
|
||||
PCTXT.client.processes.get(PCTXT.gid, PCTXT.pid)
|
||||
except exceptions.NotFound:
|
||||
msg = "This process is not recognized by RACK"
|
||||
raise exceptions.InvalidProcessError(msg)
|
||||
|
||||
|
||||
def init():
|
||||
try:
|
||||
process_context.init()
|
||||
_is_exist()
|
||||
messaging.init()
|
||||
except Exception as e:
|
||||
msg = "Failed to initialize the process: %s." % e.message
|
||||
raise exceptions.ProcessInitError(msg)
|
|
@ -1,3 +1,17 @@
|
|||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
Command-line interface to the RACK API.
|
||||
"""
|
||||
|
@ -9,7 +23,7 @@ from oslo.utils import encodeutils
|
|||
from rackclient.openstack.common.gettextutils import _
|
||||
from rackclient.openstack.common import cliutils
|
||||
from rackclient.v1 import shell as shell_v1
|
||||
from rackclient.v1 import client as client_v1
|
||||
from rackclient import client
|
||||
from rackclient import exceptions
|
||||
|
||||
DEFAULT_RACK_API_VERSION = "1"
|
||||
|
@ -157,10 +171,7 @@ class RackShell(object):
|
|||
_("You must provide an RACK url "
|
||||
"via either --rack-url or env[RACK_URL] "))
|
||||
|
||||
if options.rack_api_version == '1':
|
||||
self.cs = client_v1.Client(rack_url=args.rack_url, http_log_debug=options.debug)
|
||||
else:
|
||||
return 0
|
||||
self.cs = client.Client(options.rack_api_version, rack_url=args.rack_url, http_log_debug=options.debug)
|
||||
|
||||
if args.func.func_name[3:].split('_')[0] != 'group' and not args.gid:
|
||||
raise exceptions.CommandError(
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
|
|
@ -0,0 +1,62 @@
|
|||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
class FakeClient(object):
|
||||
|
||||
def assert_called(self, method, url, body=None, pos=-1):
|
||||
"""
|
||||
Assert than an API method was just called.
|
||||
"""
|
||||
expected = (method, url)
|
||||
called = self.client.callstack[pos][0:2]
|
||||
|
||||
assert self.client.callstack, \
|
||||
"Expected %s %s but no calls were made." % expected
|
||||
|
||||
assert expected == called, \
|
||||
'Expected %s %s; got %s %s' % (expected + called)
|
||||
|
||||
if body is not None:
|
||||
if self.client.callstack[pos][2] != body:
|
||||
raise AssertionError('%r != %r' %
|
||||
(self.client.callstack[pos][2], body))
|
||||
|
||||
def assert_called_anytime(self, method, url, body=None):
|
||||
"""
|
||||
Assert than an API method was called anytime in the test.
|
||||
"""
|
||||
expected = (method, url)
|
||||
|
||||
assert self.client.callstack, \
|
||||
"Expected %s %s but no calls were made." % expected
|
||||
|
||||
found = False
|
||||
for entry in self.client.callstack:
|
||||
if expected == entry[0:2]:
|
||||
found = True
|
||||
break
|
||||
|
||||
assert found, 'Expected %s; got %s' % (expected, self.client.callstack)
|
||||
if body is not None:
|
||||
try:
|
||||
assert entry[2] == body
|
||||
except AssertionError:
|
||||
print(entry[2])
|
||||
print("!=")
|
||||
print(body)
|
||||
raise
|
||||
|
||||
self.client.callstack = []
|
||||
|
||||
def clear_callstack(self):
|
||||
self.client.callstack = []
|
|
@ -0,0 +1,128 @@
|
|||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import json
|
||||
import logging
|
||||
import mock
|
||||
import fixtures
|
||||
import requests
|
||||
from rackclient import client
|
||||
from rackclient.tests import utils
|
||||
|
||||
class ClientTest(utils.TestCase):
|
||||
|
||||
def test_log_req(self):
|
||||
self.logger = self.useFixture(
|
||||
fixtures.FakeLogger(
|
||||
name=client.__name__,
|
||||
format="%(message)s",
|
||||
level=logging.DEBUG,
|
||||
nuke_handlers=True
|
||||
)
|
||||
)
|
||||
cs = client.HTTPClient('rack_url', True)
|
||||
|
||||
cs.http_log_req('GET', '/foo', {'headers': {}})
|
||||
cs.http_log_req('GET', '/foo', {'headers': {
|
||||
'Content-Type': 'application/json',
|
||||
'User-Agent': 'python-rackclient'
|
||||
}})
|
||||
|
||||
data = {'group': {
|
||||
'name': 'group1',
|
||||
'description': 'This is group1'
|
||||
}}
|
||||
cs.http_log_req('POST', '/foo', {
|
||||
'headers': {
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
'data': json.dumps(data)
|
||||
})
|
||||
|
||||
output = self.logger.output.split('\n')
|
||||
self.assertIn("REQ: curl -i '/foo' -X GET", output)
|
||||
self.assertIn("REQ: curl -i '/foo' -X GET "
|
||||
'-H "Content-Type: application/json" '
|
||||
'-H "User-Agent: python-rackclient"',
|
||||
output)
|
||||
self.assertIn("REQ: curl -i '/foo' -X POST "
|
||||
'-H "Content-Type: application/json" '
|
||||
'-d \'{"group": {"name": "group1", '
|
||||
'"description": "This is group1"}}\'',
|
||||
output)
|
||||
|
||||
def test_log_resp(self):
|
||||
self.logger = self.useFixture(
|
||||
fixtures.FakeLogger(
|
||||
name=client.__name__,
|
||||
format="%(message)s",
|
||||
level=logging.DEBUG,
|
||||
nuke_handlers=True
|
||||
)
|
||||
)
|
||||
cs = client.HTTPClient('rack_url', True)
|
||||
|
||||
text = '{"group": {"name": "group1", "description": "This is group1"}}'
|
||||
resp = utils.TestResponse({'status_code': 200, 'headers': {},
|
||||
'text': text})
|
||||
cs.http_log_resp(resp)
|
||||
|
||||
output = self.logger.output.split('\n')
|
||||
self.assertIn("RESP: [200] {}", output)
|
||||
self.assertIn('RESP BODY: {"group": {"name": "group1", '
|
||||
'"description": "This is group1"}}', output)
|
||||
|
||||
def test_request(self):
|
||||
cs = client.HTTPClient('http://www.foo.com', False)
|
||||
data = (
|
||||
'{"group": { "gid": "11111111",'
|
||||
'"user_id": "4ffc664c198e435e9853f253lkbcd7a7",'
|
||||
'"project_id": "9sac664c198e435e9853f253lkbcd7a7",'
|
||||
'"name": "group1",'
|
||||
'"description": "This is group1",'
|
||||
'"status": "ACTIVE"}}'
|
||||
)
|
||||
|
||||
mock_request = mock.Mock()
|
||||
mock_request.return_value = requests.Response()
|
||||
mock_request.return_value.status_code = 201
|
||||
mock_request.return_value._content = data
|
||||
|
||||
with mock.patch('requests.request', mock_request):
|
||||
resp, body = cs.post('/groups', body=data)
|
||||
kwargs = {
|
||||
'headers': {
|
||||
'User-Agent': 'python-rackclient',
|
||||
'Accept': 'application/json',
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
'data': json.dumps(data)
|
||||
}
|
||||
mock_request.assert_called_with('POST',
|
||||
'http://www.foo.com/groups',
|
||||
**kwargs)
|
||||
|
||||
def test_request_raise_exception(self):
|
||||
cs = client.HTTPClient('http://www.foo.com', False)
|
||||
|
||||
mock_request = mock.Mock()
|
||||
mock_request.return_value = requests.Response()
|
||||
mock_request.return_value.status_code = 404
|
||||
|
||||
mock_exec = mock.Mock()
|
||||
mock_exec.return_value = Exception('Not Found')
|
||||
|
||||
with mock.patch('requests.request', mock_request):
|
||||
with mock.patch('rackclient.exceptions.from_response',
|
||||
mock_exec):
|
||||
self.assertRaises(Exception, cs.get, '/groups')
|
|
@ -0,0 +1,67 @@
|
|||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from mock import patch
|
||||
import requests
|
||||
from rackclient import process_context as pctxt
|
||||
from rackclient.tests import utils
|
||||
from rackclient.v1.proxy import ProxyManager, Proxy
|
||||
from rackclient import exceptions
|
||||
|
||||
|
||||
class ProcessContextTest(utils.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(ProcessContextTest, self).setUp()
|
||||
|
||||
patcher = patch('requests.get')
|
||||
self.addCleanup(patcher.stop)
|
||||
self.mock_request = patcher.start()
|
||||
self.mock_request.return_value = requests.Response()
|
||||
self.mock_request.return_value._content = \
|
||||
('{"meta": {"gid": "11111111", '
|
||||
'"pid": "22222222", "ppid": "33333333", '
|
||||
'"proxy_ip": "10.0.0.2", "opt1": "value1", '
|
||||
'"opt2": "value2"}}')
|
||||
|
||||
@patch.object(ProxyManager, 'get')
|
||||
def test_init(self, mock_proxy_get):
|
||||
proxy = {
|
||||
'fs_endpoint': 'fs_endpoint',
|
||||
'shm_endpoint': 'shm_endpoint',
|
||||
'ipc_endpoint': 'ipc_endpoint'
|
||||
}
|
||||
mock_proxy_get.return_value = Proxy(None, proxy)
|
||||
|
||||
pctxt.init()
|
||||
d = pctxt.PCTXT.__dict__
|
||||
d.pop('client')
|
||||
|
||||
expected = {
|
||||
'ipc_endpoint': 'ipc_endpoint',
|
||||
'fs_endpoint': 'fs_endpoint',
|
||||
'proxy_url': 'http://10.0.0.2:8088/v1',
|
||||
'pid': '22222222',
|
||||
'gid': '11111111',
|
||||
'proxy_ip': '10.0.0.2',
|
||||
'shm_endpoint': 'shm_endpoint',
|
||||
'ppid': '33333333',
|
||||
'opt1': 'value1',
|
||||
'opt2': 'value2'
|
||||
}
|
||||
self.assertEqual(expected, d)
|
||||
mock_proxy_get.assert_called_with('11111111')
|
||||
|
||||
def test_init_metadata_error(self):
|
||||
self.mock_request.side_effect = Exception()
|
||||
self.assertRaises(exceptions.MetadataAccessError, pctxt.init)
|
|
@ -0,0 +1,128 @@
|
|||
import re
|
||||
import StringIO
|
||||
import sys
|
||||
import fixtures
|
||||
import mock
|
||||
from testtools import matchers
|
||||
from rackclient import exceptions
|
||||
import rackclient.shell
|
||||
from rackclient.tests import utils
|
||||
|
||||
FAKE_ENV = {'RACK_URL': 'http://www.example.com:8088/v1',
|
||||
'RACK_GID': '11111111'}
|
||||
|
||||
class ShellTest(utils.TestCase):
|
||||
|
||||
def make_env(self, exclude=None, fake_env=FAKE_ENV):
|
||||
env = dict((k, v) for k, v in fake_env.items() if k != exclude)
|
||||
self.useFixture(fixtures.MonkeyPatch('os.environ', env))
|
||||
|
||||
def shell(self, argstr, exitcodes=(0,)):
|
||||
orig = sys.stdout
|
||||
orig_stderr = sys.stderr
|
||||
try:
|
||||
sys.stdout = StringIO.StringIO()
|
||||
sys.stderr = StringIO.StringIO()
|
||||
_shell = rackclient.shell.RackShell()
|
||||
_shell.main(argstr.split())
|
||||
except SystemExit:
|
||||
exc_type, exc_value, exc_traceback = sys.exc_info()
|
||||
self.assertIn(exc_value.code, exitcodes)
|
||||
finally:
|
||||
stdout = sys.stdout.getvalue()
|
||||
sys.stdout.close()
|
||||
sys.stdout = orig
|
||||
stderr = sys.stderr.getvalue()
|
||||
sys.stderr.close()
|
||||
sys.stderr = orig_stderr
|
||||
return (stdout, stderr)
|
||||
|
||||
def test_help(self):
|
||||
required = [
|
||||
'.*?^usage: ',
|
||||
'.*?^\s+process-show\s+Show details about the given process',
|
||||
'.*?^See "rack help COMMAND" for help on a specific command',
|
||||
]
|
||||
stdout, stderr = self.shell('help')
|
||||
for r in required:
|
||||
self.assertThat((stdout + stderr),
|
||||
matchers.MatchesRegex(r, re.DOTALL | re.MULTILINE))
|
||||
|
||||
def test_help_unknown_command(self):
|
||||
self.assertRaises(exceptions.CommandError, self.shell, 'help foofoo')
|
||||
|
||||
def test_help_on_subcommand(self):
|
||||
required = [
|
||||
'.*?^usage: ',
|
||||
'.*?^Show details about the given process',
|
||||
'.*?^positional arguments:',
|
||||
]
|
||||
stdout, stderr = self.shell('help process-show')
|
||||
for r in required:
|
||||
self.assertThat((stdout + stderr),
|
||||
matchers.MatchesRegex(r, re.DOTALL | re.MULTILINE))
|
||||
|
||||
def test_help_no_options(self):
|
||||
required = [
|
||||
'.*?^usage: ',
|
||||
'.*?^\s+process-show\s+Show details about the given process',
|
||||
'.*?^See "rack help COMMAND" for help on a specific command',
|
||||
]
|
||||
stdout, stderr = self.shell('')
|
||||
for r in required:
|
||||
self.assertThat((stdout + stderr),
|
||||
matchers.MatchesRegex(r, re.DOTALL | re.MULTILINE))
|
||||
|
||||
def test_no_url(self):
|
||||
required = ('You must provide an RACK url '
|
||||
'via either --rack-url or env[RACK_URL] ')
|
||||
self.make_env(exclude='RACK_URL')
|
||||
try:
|
||||
self.shell('process-list')
|
||||
except exceptions.CommandError as message:
|
||||
self.assertEqual(required, message.args[0])
|
||||
else:
|
||||
self.fail('CommandError not raised')
|
||||
|
||||
def test_no_gid(self):
|
||||
required = ('You must provide a gid '
|
||||
'via either --gid or env[RACK_GID] ')
|
||||
self.make_env(exclude='RACK_GID')
|
||||
try:
|
||||
self.shell('process-list')
|
||||
except exceptions.CommandError as message:
|
||||
self.assertEqual(required, message.args[0])
|
||||
else:
|
||||
self.fail('CommandError not raised')
|
||||
|
||||
@mock.patch('sys.argv', ['rack'])
|
||||
@mock.patch('sys.stdout', StringIO.StringIO())
|
||||
@mock.patch('sys.stderr', StringIO.StringIO())
|
||||
def test_main_noargs(self):
|
||||
# Ensure that main works with no command-line arguments
|
||||
try:
|
||||
rackclient.shell.main()
|
||||
except SystemExit:
|
||||
self.fail('Unexpected SystemExit')
|
||||
|
||||
# We expect the normal usage as a result
|
||||
self.assertIn('Command-line interface to the RACK API',
|
||||
sys.stdout.getvalue())
|
||||
|
||||
@mock.patch('sys.stderr', StringIO.StringIO())
|
||||
@mock.patch.object(rackclient.shell.RackShell, 'main')
|
||||
def test_main_exception(self, mock_rack_shell):
|
||||
mock_rack_shell.side_effect = exceptions.CommandError('error message')
|
||||
try:
|
||||
rackclient.shell.main()
|
||||
except SystemExit as ex:
|
||||
self.assertEqual(ex.code, 1)
|
||||
self.assertIn('ERROR (CommandError): error message', sys.stderr.getvalue())
|
||||
|
||||
@mock.patch.object(rackclient.shell.RackShell, 'main')
|
||||
def test_main_keyboard_interrupt(self, mock_rack_shell):
|
||||
mock_rack_shell.side_effect = KeyboardInterrupt()
|
||||
try:
|
||||
rackclient.shell.main()
|
||||
except SystemExit as ex:
|
||||
self.assertEqual(ex.code, 1)
|
|
@ -0,0 +1,84 @@
|
|||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from fixtures import fixture
|
||||
import requests
|
||||
import testtools
|
||||
from rackclient import process_context
|
||||
from rackclient import client
|
||||
|
||||
PCTXT = process_context.PCTXT
|
||||
|
||||
|
||||
class PContextFixture(fixture.Fixture):
|
||||
|
||||
def setUp(self):
|
||||
super(PContextFixture, self).setUp()
|
||||
self.attrs = dir(PCTXT)
|
||||
self.addCleanup(self.cleanup_pctxt)
|
||||
|
||||
PCTXT.gid = 'gid'
|
||||
PCTXT.pid = 'pid'
|
||||
PCTXT.ppid = None
|
||||
PCTXT.proxy_ip = '10.0.0.2'
|
||||
PCTXT.proxy_url = 'http://10.0.0.2:8088/v1'
|
||||
PCTXT.fs_endpoint = None
|
||||
PCTXT.shm_endpoint = None
|
||||
PCTXT.ipc_endpoint = None
|
||||
PCTXT.client = client.Client('1', rack_url=PCTXT.proxy_url)
|
||||
|
||||
def cleanup_pctxt(self):
|
||||
attrs = dir(PCTXT)
|
||||
for attr in attrs:
|
||||
if attr not in self.attrs: delattr(PCTXT, attr)
|
||||
|
||||
|
||||
class TestCase(testtools.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestCase, self).setUp()
|
||||
# if (os.environ.get('OS_STDOUT_CAPTURE') == 'True' or
|
||||
# os.environ.get('OS_STDOUT_CAPTURE') == '1'):
|
||||
# stdout = self.useFixture(fixtures.StringStream('stdout')).stream
|
||||
# self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
|
||||
# if (os.environ.get('OS_STDERR_CAPTURE') == 'True' or
|
||||
# os.environ.get('OS_STDERR_CAPTURE') == '1'):
|
||||
# stderr = self.useFixture(fixtures.StringStream('stderr')).stream
|
||||
# self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
|
||||
|
||||
self.useFixture(PContextFixture())
|
||||
|
||||
|
||||
class TestResponse(requests.Response):
|
||||
"""
|
||||
Class used to wrap requests.Response and provide some
|
||||
convenience to initialize with a dict
|
||||
"""
|
||||
|
||||
def __init__(self, data):
|
||||
super(TestResponse, self).__init__()
|
||||
self._text = None
|
||||
if isinstance(data, dict):
|
||||
self.status_code = data.get('status_code')
|
||||
self.headers = data.get('headers')
|
||||
# Fake the text attribute to streamline Response creation
|
||||
self._text = data.get('text')
|
||||
else:
|
||||
self.status_code = data
|
||||
|
||||
def __eq__(self, other):
|
||||
return self.__dict__ == other.__dict__
|
||||
|
||||
@property
|
||||
def text(self):
|
||||
return self._text
|
|
@ -0,0 +1,359 @@
|
|||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import urlparse
|
||||
from rackclient import client as base_client
|
||||
from rackclient.tests import fakes
|
||||
from rackclient.tests import utils
|
||||
from rackclient.v1 import client
|
||||
|
||||
|
||||
class FakeClient(fakes.FakeClient, client.Client):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
client.Client.__init__(self, 'rack_rul', 'http_log_debug')
|
||||
self.client = FakeHTTPClient()
|
||||
|
||||
|
||||
class FakeHTTPClient(base_client.HTTPClient):
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
self.rack_url = 'rack_url'
|
||||
self.http_log_debug = 'http_log_debug'
|
||||
self.callstack = []
|
||||
|
||||
def request(self, url, method, **kwargs):
|
||||
if method in ['GET', 'DELETE']:
|
||||
assert 'body' not in kwargs
|
||||
elif method == 'PUT':
|
||||
assert 'body' in kwargs
|
||||
|
||||
args = urlparse.parse_qsl(urlparse.urlparse(url)[4])
|
||||
kwargs.update(args)
|
||||
munged_url = url.rsplit('?', 1)[0]
|
||||
munged_url = munged_url.strip('/').replace('/', '_').replace('.', '_')
|
||||
munged_url = munged_url.replace('-', '_')
|
||||
munged_url = munged_url.replace(' ', '_')
|
||||
|
||||
callback = "%s_%s" % (method.lower(), munged_url)
|
||||
|
||||
if not hasattr(self, callback):
|
||||
raise AssertionError('Called unknown API method: %s %s, '
|
||||
'expected fakes method name: %s' %
|
||||
(method, url, callback))
|
||||
|
||||
self.callstack.append((method, url, kwargs.get('body')))
|
||||
|
||||
status, headers, body = getattr(self, callback)(**kwargs)
|
||||
r = utils.TestResponse({
|
||||
"status_code": status,
|
||||
"text": body,
|
||||
"headers": headers,
|
||||
})
|
||||
return r, body
|
||||
|
||||
#
|
||||
# groups
|
||||
#
|
||||
|
||||
def get_groups(self, **kw):
|
||||
groups = {'groups': [
|
||||
{
|
||||
'gid': '11111111',
|
||||
"user_id": '4ffc664c198e435e9853f253lkbcd7a7',
|
||||
"project_id": '9sac664c198e435e9853f253lkbcd7a7',
|
||||
"name": 'group1',
|
||||
"description": 'This is group1',
|
||||
"status": 'ACTIVE'
|
||||
},
|
||||
{
|
||||
'gid': '22222222',
|
||||
"user_id": '4ffc664c198e435e9853f253lkbcd7a7',
|
||||
"project_id": '9sac664c198e435e9853f253lkbcd7a7',
|
||||
"name": 'group2',
|
||||
"description": 'This is group2',
|
||||
"status": 'ACTIVE'
|
||||
}
|
||||
]}
|
||||
return (200, {}, groups)
|
||||
|
||||
def get_groups_11111111(self, **kw):
|
||||
group = {'group': self.get_groups()[2]["groups"][0]}
|
||||
return (200, {}, group)
|
||||
|
||||
def post_groups(self, body, **kw):
|
||||
group = {'group': self.get_groups()[2]["groups"][0]}
|
||||
return (201, {}, group)
|
||||
|
||||
def put_groups_11111111(self, body, **kw):
|
||||
group = {'group': self.get_groups()[2]["groups"][0]}
|
||||
return (200, {}, group)
|
||||
|
||||
def delete_groups_11111111(self, **kw):
|
||||
return (204, {}, None)
|
||||
|
||||
#
|
||||
# keypairs
|
||||
#
|
||||
|
||||
def get_groups_11111111_keypairs(self, **kw):
|
||||
keypairs = {'keypairs': [
|
||||
{
|
||||
'keypair_id': 'aaaaaaaa',
|
||||
'nova_keypair_id': 'keypair1',
|
||||
'user_id': '4ffc664c198e435e9853f253lkbcd7a7',
|
||||
'project_id': '9sac664c198e435e9853f253lkbcd7a7',
|
||||
'gid': '11111111',
|
||||
'name': 'keypair1',
|
||||
'private_key': '1234',
|
||||
'is_default': True,
|
||||
'status': 'Exist'
|
||||
},
|
||||
{
|
||||
'keypair_id': 'bbbbbbbb',
|
||||
'nova_keypair_id': 'keypair2',
|
||||
'user_id': '4ffc664c198e435e9853f253lkbcd7a7',
|
||||
'project_id': '9sac664c198e435e9853f253lkbcd7a7',
|
||||
'gid': '11111111',
|
||||
'name': 'keypair2',
|
||||
'private_key': '5678',
|
||||
'is_default': False,
|
||||
'status': 'Exist'
|
||||
}
|
||||
]}
|
||||
return (200, {}, keypairs)
|
||||
|
||||
def get_groups_11111111_keypairs_aaaaaaaa(self, **kw):
|
||||
keypair = {'keypair': self.get_groups_11111111_keypairs()[2]['keypairs'][0]}
|
||||
return (200, {}, keypair)
|
||||
|
||||
def post_groups_11111111_keypairs(self, body, **kw):
|
||||
keypair = {'keypair': self.get_groups_11111111_keypairs()[2]['keypairs'][0]}
|
||||
return (201, {}, keypair)
|
||||
|
||||
def put_groups_11111111_keypairs_aaaaaaaa(self, body, **kw):
|
||||
keypair = {'keypair': self.get_groups_11111111_keypairs()[2]['keypairs'][0]}
|
||||
return (200, {}, keypair)
|
||||
|
||||
def delete_groups_11111111_keypairs_aaaaaaaa(self, **kw):
|
||||
return (204, {}, None)
|
||||
|
||||
#
|
||||
# securitygroups
|
||||
#
|
||||
|
||||
def get_groups_11111111_securitygroups(self, **kw):
|
||||
securitygroups = {'securitygroups': [
|
||||
{
|
||||
'securitygroup_id': 'aaaaaaaa',
|
||||
'neutron_securitygroup_id': 'pppppppp',
|
||||
'user_id': '4ffc664c198e435e9853f253lkbcd7a7',
|
||||
'project_id': '9sac664c198e435e9853f253lkbcd7a7',
|
||||
'gid': '11111111',
|
||||
'name': 'securitygroup1',
|
||||
'is_default': True,
|
||||
'status': 'Exist'
|
||||
},
|
||||
{
|
||||
'securitygroup_id': 'bbbbbbbb',
|
||||
'neutron_securitygroup_id': 'qqqqqqqq',
|
||||
'user_id': '4ffc664c198e435e9853f253lkbcd7a7',
|
||||
'project_id': '9sac664c198e435e9853f253lkbcd7a7',
|
||||
'gid': '11111111',
|
||||
'name': 'securitygroup2',
|
||||
'is_default': False,
|
||||
'status': 'Exist'
|
||||
}
|
||||
]}
|
||||
return (200, {}, securitygroups)
|
||||
|
||||
def get_groups_11111111_securitygroups_aaaaaaaa(self, **kw):
|
||||
securitygroup = {'securitygroup': self.get_groups_11111111_securitygroups()[2]['securitygroups'][0]}
|
||||
return (200, {}, securitygroup)
|
||||
|
||||
def post_groups_11111111_securitygroups(self, body, **kw):
|
||||
securitygroup = {'securitygroup': self.get_groups_11111111_securitygroups()[2]['securitygroups'][0]}
|
||||
return (201, {}, securitygroup)
|
||||
|
||||
def put_groups_11111111_securitygroups_aaaaaaaa(self, body, **kw):
|
||||
securitygroup = {'securitygroup': self.get_groups_11111111_securitygroups()[2]['securitygroups'][0]}
|
||||
return (200, {}, securitygroup)
|
||||
|
||||
def delete_groups_11111111_securitygroups_aaaaaaaa(self, **kw):
|
||||
return (204, {}, None)
|
||||
|
||||
#
|
||||
# networks
|
||||
#
|
||||
|
||||
def get_groups_11111111_networks(self, **kw):
|
||||
networks = {'networks': [
|
||||
{
|
||||
'network_id': 'aaaaaaaa',
|
||||
'neutron_network_id': 'pppppppp',
|
||||
'user_id': '4ffc664c198e435e9853f253lkbcd7a7',
|
||||
'project_id': '9sac664c198e435e9853f253lkbcd7a7',
|
||||
'gid': '11111111',
|
||||
'name': 'network1',
|
||||
'is_admin': True,
|
||||
'ext_router_id': 'rrrrrrrr',
|
||||
'status': 'Exist'
|
||||
},
|
||||
{
|
||||
'network_id': 'bbbbbbbb',
|
||||
'neutron_network_id': 'qqqqqqqq',
|
||||
'user_id': '4ffc664c198e435e9853f253lkbcd7a7',
|
||||
'project_id': '9sac664c198e435e9853f253lkbcd7a7',
|
||||
'gid': '11111111',
|
||||
'name': 'network2',
|
||||
'is_admin': False,
|
||||
'ext_router_id': 'rrrrrrrr',
|
||||
'status': 'Exist'
|
||||
}
|
||||
]}
|
||||
return (200, {}, networks)
|
||||
|
||||
def get_groups_11111111_networks_aaaaaaaa(self, **kw):
|
||||
network = {'network': self.get_groups_11111111_networks()[2]['networks'][0]}
|
||||
return (200, {}, network)
|
||||
|
||||
def post_groups_11111111_networks(self, body, **kw):
|
||||
network = {'network': self.get_groups_11111111_networks()[2]['networks'][0]}
|
||||
return (201, {}, network)
|
||||
|
||||
def delete_groups_11111111_networks_aaaaaaaa(self, **kw):
|
||||
return (204, {}, None)
|
||||
|
||||
#
|
||||
# processes
|
||||
#
|
||||
|
||||
def get_groups_11111111_processes(self, **kw):
|
||||
processes = {'processes': [
|
||||
{
|
||||
'nova_instance_id': 'pppppppp',
|
||||
'user_id': '4ffc664c198e435e9853f253lkbcd7a7',
|
||||
'project_id': '9sac664c198e435e9853f253lkbcd7a7',
|
||||
'gid': '11111111',
|
||||
'pid': 'aaaaaaaa',
|
||||
'ppid': None,
|
||||
'name': 'process1',
|
||||
'glance_image_id': 'xxxxxxxx',
|
||||
'nova_flavor_id': 'yyyyyyyy',
|
||||
'keypair_id': 'iiiiiiii',
|
||||
'securitygroup_ids': [
|
||||
'jjjjjjjj', 'kkkkkkkk'
|
||||
],
|
||||
'networks': [
|
||||
{'network_id': 'mmmmmmmm',
|
||||
'fixed': '10.0.0.2',
|
||||
'floating': '1.1.1.1'}
|
||||
],
|
||||
'app_status': 'ACTIVE',
|
||||
'userdata': 'IyEvYmluL3NoICBlY2hvICJIZWxsbyI=',
|
||||
'status': 'ACTIVE',
|
||||
'args': {
|
||||
'key1': 'value1',
|
||||
'key2': 'value2'
|
||||
}
|
||||
},
|
||||
{
|
||||
'process_id': 'bbbbbbbb',
|
||||
'nova_instance_id': 'qqqqqqqq',
|
||||
'user_id': '4ffc664c198e435e9853f253lkbcd7a7',
|
||||
'project_id': '9sac664c198e435e9853f253lkbcd7a7',
|
||||
'gid': '11111111',
|
||||
'pid': 'bbbbbbbb',
|
||||
'ppid': 'aaaaaaaa',
|
||||
'name': 'process2',
|
||||
'glance_image_id': 'xxxxxxxx',
|
||||
'nova_flavor_id': 'yyyyyyyy',
|
||||
'keypair_id': 'iiiiiiii',
|
||||
'securitygroup_ids': [
|
||||
'jjjjjjjj', 'kkkkkkkk'
|
||||
],
|
||||
'networks': [
|
||||
{'network_id': 'mmmmmmmm',
|
||||
'fixed': '10.0.0.3',
|
||||
'floating': '2.2.2.2'}
|
||||
],
|
||||
'app_status': 'ACTIVE',
|
||||
'userdata': 'IyEvYmluL3NoICBlY2hvICJIZWxsbyI=',
|
||||
'status': 'ACTIVE',
|
||||
'args': {
|
||||
'key1': 'value1',
|
||||
'key2': 'value2'
|
||||
}
|
||||
}
|
||||
]}
|
||||
return (200, {}, processes)
|
||||
|
||||
def get_groups_11111111_processes_aaaaaaaa(self, **kw):
|
||||
process = {'process': self.get_groups_11111111_processes()[2]['processes'][0]}
|
||||
return (200, {}, process)
|
||||
|
||||
def post_groups_11111111_processes(self, body, **kw):
|
||||
process = {'process': self.get_groups_11111111_processes()[2]['processes'][0]}
|
||||
return (202, {}, process)
|
||||
|
||||
def put_groups_11111111_processes_aaaaaaaa(self, body, **kw):
|
||||
process = {'process': self.get_groups_11111111_processes()[2]['processes'][0]}
|
||||
return (200, {}, process)
|
||||
|
||||
def delete_groups_11111111_processes_aaaaaaaa(self, **kw):
|
||||
return (204, {}, None)
|
||||
|
||||
#
|
||||
# proxy
|
||||
#
|
||||
|
||||
def get_groups_11111111_proxy(self, **kw):
|
||||
proxy = {'proxy': {
|
||||
'nova_instance_id': 'pppppppp',
|
||||
'user_id': '4ffc664c198e435e9853f253lkbcd7a7',
|
||||
'project_id': '9sac664c198e435e9853f253lkbcd7a7',
|
||||
'gid': '11111111',
|
||||
'pid': 'aaaaaaaa',
|
||||
'ppid': None,
|
||||
'name': 'proxy',
|
||||
'glance_image_id': 'xxxxxxxx',
|
||||
'nova_flavor_id': 'yyyyyyyy',
|
||||
'keypair_id': 'iiiiiiii',
|
||||
'securitygroup_ids': [
|
||||
'jjjjjjjj', 'kkkkkkkk'
|
||||
],
|
||||
'networks': [
|
||||
{'network_id': 'mmmmmmmm',
|
||||
'fixed': '10.0.0.2',
|
||||
'floating': '1.1.1.1'}
|
||||
],
|
||||
'app_status': 'ACTIVE',
|
||||
'userdata': 'IyEvYmluL3NoICBlY2hvICJIZWxsbyI=',
|
||||
'status': 'ACTIVE',
|
||||
'args': {
|
||||
'key1': 'value1',
|
||||
'key2': 'value2'
|
||||
},
|
||||
'ipc_endpoint': 'ipc_endpoint',
|
||||
'shm_endpoint': 'shm_endpoint',
|
||||
'fs_endpoint': 'fs_endpoint'
|
||||
}}
|
||||
return (200, {}, proxy)
|
||||
|
||||
def post_groups_11111111_proxy(self, body, **kw):
|
||||
proxy = {'proxy': self.get_groups_11111111_proxy()[2]['proxy']}
|
||||
return (202, {}, proxy)
|
||||
|
||||
def put_groups_11111111_proxy(self, body, **kw):
|
||||
proxy = {'proxy': self.get_groups_11111111_proxy()[2]['proxy']}
|
||||
return (200, {}, proxy)
|
|
@ -0,0 +1,163 @@
|
|||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from mock import patch
|
||||
from swiftclient import exceptions as swift_exc
|
||||
from rackclient.tests import utils
|
||||
from rackclient.v1.syscall.default import file as rackfile
|
||||
from rackclient import process_context
|
||||
from rackclient.exceptions import InvalidFSEndpointError
|
||||
from rackclient.exceptions import InvalidDirectoryError
|
||||
from rackclient.exceptions import InvalidFilePathError
|
||||
from rackclient.exceptions import FileSystemAccessError
|
||||
|
||||
PCTXT = process_context.PCTXT
|
||||
|
||||
|
||||
class FileTest(utils.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(FileTest, self).setUp()
|
||||
patcher = patch('swiftclient.client.Connection', autospec=True)
|
||||
self.addCleanup(patcher.stop)
|
||||
self.mock_conn = patcher.start()
|
||||
self.mock_conn.return_value.get_auth.return_value = 'fake', 'fake'
|
||||
|
||||
def test_get_swift_client(self):
|
||||
self.mock_conn.return_value.get_auth.return_value = 'fake', 'fake'
|
||||
rackfile._get_swift_client()
|
||||
expected = {
|
||||
"user": "rack:admin",
|
||||
"key": "admin",
|
||||
"authurl": "http://10.0.0.2:8080/auth/v1.0"
|
||||
}
|
||||
self.mock_conn.assert_any_call(**expected)
|
||||
self.mock_conn.assert_any_call(preauthurl='fake', preauthtoken='fake')
|
||||
|
||||
def test_get_swift_client_fs_endpoint(self):
|
||||
endpoint = ('{"os_username": "user", '
|
||||
'"os_password": "password", '
|
||||
'"os_tenant_name": "tenant", '
|
||||
'"os_auth_url": "http://www.example.com:5000/v2.0"}')
|
||||
PCTXT.fs_endpoint = endpoint
|
||||
rackfile._get_swift_client()
|
||||
expected = {
|
||||
"user": 'user',
|
||||
"key": 'password',
|
||||
"tenant_name": 'tenant',
|
||||
"authurl": 'http://www.example.com:5000/v2.0',
|
||||
"auth_version": "2"
|
||||
}
|
||||
self.mock_conn.assert_any_call(**expected)
|
||||
|
||||
def test_get_swift_client_invalid_fs_endpoint_error(self):
|
||||
PCTXT.fs_endpoint = 'invalid'
|
||||
self.assertRaises(InvalidFSEndpointError, rackfile._get_swift_client)
|
||||
|
||||
def test_listdir(self):
|
||||
self.mock_conn.return_value.get_container.return_value = \
|
||||
None, [{'name': 'file1'}, {'name': 'file2'}]
|
||||
files = rackfile.listdir('/dir')
|
||||
|
||||
self.mock_conn.return_value.get_container.assert_called_with('dir')
|
||||
self.assertEqual('/dir/file1', files[0].path)
|
||||
self.assertEqual('/dir/file2', files[1].path)
|
||||
|
||||
def test_listdir_invalid_directory_error(self):
|
||||
self.mock_conn.return_value.get_container.side_effect = \
|
||||
swift_exc.ClientException('', http_status=404)
|
||||
self.assertRaises(InvalidDirectoryError, rackfile.listdir, 'dir')
|
||||
|
||||
def test_listdir_filesystem_error(self):
|
||||
self.mock_conn.return_value.get_container.side_effect = \
|
||||
swift_exc.ClientException('', http_status=500)
|
||||
self.assertRaises(FileSystemAccessError, rackfile.listdir, 'dir')
|
||||
|
||||
def test_file_read_mode(self):
|
||||
self.mock_conn.return_value.get_object.return_value = \
|
||||
None, 'example text'
|
||||
f = rackfile.File('/dir1/dir2/file.txt')
|
||||
f.load()
|
||||
|
||||
self.mock_conn.return_value.get_object.assert_called_with('dir1',
|
||||
'dir2/file.txt', None)
|
||||
self.assertEqual('example text', f.read())
|
||||
|
||||
f.load()
|
||||
call_count = self.mock_conn.return_value.get_object.call_count
|
||||
self.assertEqual(1, call_count)
|
||||
|
||||
f.close()
|
||||
|
||||
def test_file_read_mode_with_chunk_size(self):
|
||||
def _content():
|
||||
for i in ['11111111', '22222222']:
|
||||
yield i
|
||||
|
||||
self.mock_conn.return_value.get_object.return_value = \
|
||||
None, _content()
|
||||
f = rackfile.File('/dir1/dir2/file.txt')
|
||||
f.load(8)
|
||||
|
||||
self.mock_conn.return_value.get_object.assert_called_with('dir1',
|
||||
'dir2/file.txt', 8)
|
||||
self.assertEqual('1111111122222222', f.read())
|
||||
f.close()
|
||||
|
||||
def test_file_load_invalid_file_path_error(self):
|
||||
self.mock_conn.return_value.get_object.side_effect = \
|
||||
swift_exc.ClientException('', http_status=404)
|
||||
f = rackfile.File('/dir1/dir2/file.txt')
|
||||
self.assertRaises(InvalidFilePathError, f.load)
|
||||
|
||||
def test_file_load_filesystem_error(self):
|
||||
self.mock_conn.return_value.get_object.side_effect = \
|
||||
swift_exc.ClientException('')
|
||||
f = rackfile.File('/dir1/dir2/file.txt')
|
||||
self.assertRaises(FileSystemAccessError, f.load)
|
||||
|
||||
def test_file_write_mode(self):
|
||||
f = rackfile.File('/dir1/dir2/file.txt', mode='w')
|
||||
f.write('example text')
|
||||
f.close()
|
||||
|
||||
self.mock_conn.return_value.put_container.assert_called_with('dir1')
|
||||
self.mock_conn.return_value.put_object.assert_called_with(
|
||||
'dir1', 'dir2/file.txt', f.file)
|
||||
|
||||
def test_file_close_invalid_directory_error(self):
|
||||
self.mock_conn.return_value.put_object.side_effect = \
|
||||
swift_exc.ClientException('', http_status=404)
|
||||
f = rackfile.File('/dir1/dir2/file.txt', mode='w')
|
||||
f.write('example text')
|
||||
self.assertRaises(InvalidDirectoryError, f.close)
|
||||
|
||||
def test_file_close_invalid_filesystem_error(self):
|
||||
self.mock_conn.return_value.put_container.side_effect = \
|
||||
swift_exc.ClientException('')
|
||||
f = rackfile.File('/dir1/dir2/file.txt', mode='w')
|
||||
f.write('example text')
|
||||
self.assertRaises(FileSystemAccessError, f.close)
|
||||
|
||||
def test_file_attriute_error(self):
|
||||
f = rackfile.File('/dir1/dir2/file.txt')
|
||||
try:
|
||||
f.invalid
|
||||
except AttributeError:
|
||||
pass
|
||||
else:
|
||||
self.fail("Expected 'AttributeError'.")
|
||||
|
||||
def test_file_invalid_mod(self):
|
||||
self.assertRaises(ValueError, rackfile.File, '/dir1/dir2/file.txt',
|
||||
'invalid_mode')
|
|
@ -0,0 +1,213 @@
|
|||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from mock import patch, Mock
|
||||
from rackclient import exceptions
|
||||
from rackclient import process_context
|
||||
from rackclient.tests import utils
|
||||
from rackclient.v1.syscall.default import messaging as rack_ipc
|
||||
|
||||
import copy
|
||||
import cPickle
|
||||
import pika
|
||||
|
||||
PCTXT = process_context.PCTXT
|
||||
|
||||
|
||||
class MessagingTest(utils.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(MessagingTest, self).setUp()
|
||||
self.mock_connection = Mock()
|
||||
self.mock_channel = Mock()
|
||||
self.mock_receive = Mock(spec=rack_ipc.Messaging.Receive)
|
||||
self.patch_pika_blocking = patch('pika.BlockingConnection',
|
||||
autospec=True)
|
||||
self.addCleanup(self.patch_pika_blocking.stop)
|
||||
self.mock_pika_blocking = self.patch_pika_blocking.start()
|
||||
self.mock_pika_blocking.return_value = self.mock_connection
|
||||
self.mock_connection.channel.return_value = self.mock_channel
|
||||
|
||||
def test_declare_queue(self):
|
||||
queue_name = 'test_queue_name'
|
||||
msg = rack_ipc.Messaging()
|
||||
msg.declare_queue(queue_name)
|
||||
|
||||
self.mock_channel.\
|
||||
exchange_declare.assert_called_with(exchange=PCTXT.gid,
|
||||
type='topic')
|
||||
self.mock_channel.queue_declare.assert_called_with(queue=queue_name)
|
||||
r_key = PCTXT.gid + '.' + queue_name
|
||||
self.mock_channel.queue_bind.assert_called_with(exchange=PCTXT.gid,
|
||||
queue=queue_name,
|
||||
routing_key=r_key)
|
||||
|
||||
@patch('rackclient.v1.syscall.default.messaging.Messaging.Receive')
|
||||
def test_receive_all_msg(self, mock_receive):
|
||||
timeout_limit = 123
|
||||
msg = rack_ipc.Messaging()
|
||||
msg_list = msg.receive_all_msg(timeout_limit=timeout_limit)
|
||||
|
||||
self.mock_connection.add_timeout.\
|
||||
assert_called_with(deadline=timeout_limit,
|
||||
callback_method=mock_receive().time_out)
|
||||
self.mock_channel.\
|
||||
basic_consume.assert_called_with(mock_receive().get_all_msg,
|
||||
queue=PCTXT.pid,
|
||||
no_ack=False)
|
||||
self.mock_channel.start_consuming.assert_called_with()
|
||||
self.assertEqual(msg_list, mock_receive().message_list)
|
||||
|
||||
@patch('rackclient.v1.syscall.default.messaging.Messaging.Receive')
|
||||
def test_receive_msg(self, mock_receive):
|
||||
timeout_limit = 123
|
||||
msg = rack_ipc.Messaging()
|
||||
message = msg.receive_msg(timeout_limit=timeout_limit)
|
||||
|
||||
self.mock_connection.add_timeout.\
|
||||
assert_called_with(deadline=timeout_limit,
|
||||
callback_method=mock_receive().time_out)
|
||||
self.mock_channel.\
|
||||
basic_consume.assert_called_with(mock_receive().get_msg,
|
||||
queue=PCTXT.pid,
|
||||
no_ack=False)
|
||||
self.mock_channel.start_consuming.assert_called_with()
|
||||
self.assertEqual(message, mock_receive().message)
|
||||
|
||||
def test_send_msg(self):
|
||||
send_msg = 'test_msg'
|
||||
target = 'test_pid'
|
||||
msg = rack_ipc.Messaging()
|
||||
msg.send_msg(target,
|
||||
message=send_msg)
|
||||
routing_key = PCTXT.gid + '.' + target
|
||||
send_dict = {'pid': PCTXT.pid,
|
||||
'message': send_msg}
|
||||
send_msg = cPickle.dumps(send_dict)
|
||||
self.mock_channel.\
|
||||
basic_publish.assert_called_with(exchange=PCTXT.gid,
|
||||
routing_key=routing_key,
|
||||
body=send_msg)
|
||||
|
||||
def test_send_msg_no_message(self):
|
||||
msg = rack_ipc.Messaging()
|
||||
target = 'test_pid'
|
||||
msg.send_msg(target)
|
||||
routing_key = PCTXT.gid + '.' + target
|
||||
send_dict = {'pid': PCTXT.pid}
|
||||
send_msg = cPickle.dumps(send_dict)
|
||||
|
||||
self.mock_channel.\
|
||||
basic_publish.assert_called_with(exchange=PCTXT.gid,
|
||||
routing_key=routing_key,
|
||||
body=send_msg)
|
||||
|
||||
def test_receive_get_all_msg(self):
|
||||
ch = Mock()
|
||||
method = Mock()
|
||||
properties = Mock()
|
||||
receive_msg = 'receive_msg'
|
||||
body = cPickle.dumps(receive_msg)
|
||||
ch_object = {'delivery_tag': 'delivery_tag'}
|
||||
method.configure_mock(**ch_object)
|
||||
|
||||
msg = rack_ipc.Messaging()
|
||||
receive = msg.Receive()
|
||||
receive.get_all_msg(ch, method, properties, body)
|
||||
|
||||
ch.basic_ack.assert_called_with(delivery_tag=ch_object['delivery_tag'])
|
||||
self.assertEqual(receive.message_list[0], receive_msg)
|
||||
|
||||
def test_receive_get_all_msg_count_limit(self):
|
||||
ch = Mock()
|
||||
method = Mock()
|
||||
properties = Mock()
|
||||
message_list = [{'pid': 'child_pid1'},
|
||||
{'pid': 'child_pid2'}]
|
||||
expected_message_list = copy.deepcopy(message_list)
|
||||
receive_msg = {'pid': 'child_pid3'}
|
||||
expected_message_list.append(receive_msg)
|
||||
body = cPickle.dumps(receive_msg)
|
||||
ch_object = {'delivery_tag': 'delivery_tag'}
|
||||
method.configure_mock(**ch_object)
|
||||
msg = rack_ipc.Messaging()
|
||||
receive = msg.Receive()
|
||||
receive.message_list = message_list
|
||||
receive.msg_count_limit = 3
|
||||
|
||||
receive.get_all_msg(ch, method, properties, body)
|
||||
|
||||
ch.basic_ack.assert_called_with(delivery_tag=ch_object['delivery_tag'])
|
||||
ch.stop_consuming.assert_called_with()
|
||||
self.assertEqual(receive.message_list, expected_message_list)
|
||||
|
||||
def test_receive_get_msg(self):
|
||||
ch = Mock()
|
||||
method = Mock()
|
||||
properties = Mock()
|
||||
receive_msg = 'receive_msg'
|
||||
body = cPickle.dumps(receive_msg)
|
||||
ch_object = {'delivery_tag': 'delivery_tag'}
|
||||
method.configure_mock(**ch_object)
|
||||
|
||||
msg = rack_ipc.Messaging()
|
||||
receive = msg.Receive()
|
||||
receive.get_msg(ch, method, properties, body)
|
||||
|
||||
ch.basic_ack.assert_called_with(delivery_tag=ch_object['delivery_tag'])
|
||||
ch.stop_consuming.assert_call_with()
|
||||
self.assertEqual(receive.message, receive_msg)
|
||||
|
||||
def test_receive_timeout(self):
|
||||
msg = rack_ipc.Messaging()
|
||||
receive = msg.Receive()
|
||||
receive.channel = self.mock_channel
|
||||
receive.time_out()
|
||||
self.mock_channel.stop_consuming.assert_called_with()
|
||||
|
||||
@patch('rackclient.v1.syscall.default.messaging.Messaging',
|
||||
autospec=True)
|
||||
def test_init(self, msg):
|
||||
rack_ipc.init()
|
||||
msg.assert_called_with()
|
||||
|
||||
@patch('rackclient.v1.syscall.default.messaging.Messaging',
|
||||
autospec=True)
|
||||
def test_init_child(self, msg):
|
||||
PCTXT.ppid = 'PPID'
|
||||
receive_msg = {'pid': PCTXT.ppid}
|
||||
mock_messaging = Mock()
|
||||
msg.return_value = mock_messaging
|
||||
mock_messaging.receive_msg.return_value = receive_msg
|
||||
rack_ipc.init()
|
||||
mock_messaging.send_msg.asset_called_with(PCTXT.ppid)
|
||||
mock_messaging.receive_msg.assert_called_onece_with()
|
||||
|
||||
@patch('pika.ConnectionParameters', autospec=True)
|
||||
def test_create_connection(self, mock_pika_connection_param):
|
||||
rack_ipc._create_connection()
|
||||
mock_pika_connection_param.assert_called_with(PCTXT.proxy_ip)
|
||||
|
||||
@patch('pika.ConnectionParameters', autospec=True)
|
||||
def test_create_connection_ipc_endpoint(self, mock_pika_connection_param):
|
||||
ipc_ip = 'ipc_ip'
|
||||
PCTXT.ipc_endpoint = ipc_ip
|
||||
|
||||
rack_ipc._create_connection()
|
||||
mock_pika_connection_param.assert_called_with(ipc_ip)
|
||||
|
||||
def test_create_connection_amqp_connection_error(self):
|
||||
self.mock_pika_blocking.side_effect = pika.\
|
||||
exceptions.AMQPConnectionError()
|
||||
self.assertRaises(exceptions.AMQPConnectionError,
|
||||
rack_ipc._create_connection)
|
|
@ -0,0 +1,232 @@
|
|||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from mock import patch
|
||||
from rackclient.tests import utils
|
||||
from rackclient.v1.syscall.default import pipe
|
||||
from rackclient import process_context
|
||||
import datetime
|
||||
|
||||
PCTXT = process_context.PCTXT
|
||||
|
||||
class PipeTest(utils.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(PipeTest, self).setUp()
|
||||
patcher = patch('redis.StrictRedis')
|
||||
self.addCleanup(patcher.stop)
|
||||
self.mock_redis=patcher.start()
|
||||
self.ins_redis = self.mock_redis.return_value
|
||||
|
||||
def test_init_default(self):
|
||||
self.ins_redis.keys.return_value = "data"
|
||||
self.ins_redis.get.return_value = "parent"
|
||||
self.ins_redis.hget.side_effect = ["r","w"]
|
||||
real = pipe.Pipe()
|
||||
self.assertEquals("10.0.0.2", real.host)
|
||||
self.assertEquals(6379, real.port)
|
||||
self.assertEquals("parent", real.name)
|
||||
self.assertFalse(real.is_named)
|
||||
self.assertEquals("r",real.read_state)
|
||||
self.assertEquals("w", real.write_state)
|
||||
self.assertTrue(self.ins_redis.hset.call_count == 2)
|
||||
|
||||
def test_init_param_read_write_child(self):
|
||||
self.ins_redis.keys.return_value = ""
|
||||
real = pipe.Pipe(read="read",write="write")
|
||||
self.assertEquals("10.0.0.2", real.host)
|
||||
self.assertEquals(6379, real.port)
|
||||
self.assertEquals("pid", real.name)
|
||||
self.assertFalse(real.is_named)
|
||||
self.assertTrue(isinstance(real.read_state, datetime.datetime))
|
||||
self.assertTrue(isinstance(real.write_state, datetime.datetime))
|
||||
self.assertTrue(self.ins_redis.hset.call_count == 2)
|
||||
|
||||
def test_init_param_read_write_parent(self):
|
||||
self.ins_redis.keys.return_value = "data"
|
||||
self.ins_redis.get.return_value = "parent"
|
||||
real = pipe.Pipe(read="read",write="write")
|
||||
self.assertEquals("10.0.0.2", real.host)
|
||||
self.assertEquals(6379, real.port)
|
||||
self.assertEquals("parent", real.name)
|
||||
self.assertFalse(real.is_named)
|
||||
self.assertTrue(isinstance(real.read_state, datetime.datetime))
|
||||
self.assertTrue(isinstance(real.write_state, datetime.datetime))
|
||||
self.assertTrue(self.ins_redis.hset.call_count == 2)
|
||||
|
||||
def test_init_param_read_write_not_none(self):
|
||||
self.ins_redis.keys.return_value = "data"
|
||||
self.ins_redis.get.return_value = "parent"
|
||||
real = pipe.Pipe(read="",write="")
|
||||
self.assertEquals("10.0.0.2", real.host)
|
||||
self.assertEquals(6379, real.port)
|
||||
self.assertEquals("parent", real.name)
|
||||
self.assertFalse(real.is_named)
|
||||
self.assertEquals("close", real.read_state)
|
||||
self.assertEquals("close", real.write_state)
|
||||
self.assertTrue(self.ins_redis.hset.call_count == 2)
|
||||
|
||||
def test_init_param_name(self):
|
||||
real = pipe.Pipe("test")
|
||||
self.assertEquals("10.0.0.2", real.host)
|
||||
self.assertEquals(6379, real.port)
|
||||
self.assertTrue(real.is_named)
|
||||
self.assertEquals("test", real.name)
|
||||
self.assertTrue(isinstance(real.read_state, datetime.datetime))
|
||||
self.assertTrue(isinstance(real.write_state, datetime.datetime))
|
||||
self.assertTrue(self.ins_redis.hset.call_count == 2)
|
||||
|
||||
def test_read(self):
|
||||
self.ins_redis.lpop.return_value = "data"
|
||||
real = pipe.Pipe(read="read", write="write")
|
||||
self.assertEquals("data", real.read())
|
||||
|
||||
def test_read_none(self):
|
||||
self.ins_redis.lpop.side_effect = [None,"data"]
|
||||
real = pipe.Pipe(read="read", write="write")
|
||||
self.assertEquals("data", real.read())
|
||||
|
||||
def test_read_EndOfFile(self):
|
||||
self.ins_redis.lpop.return_value = None
|
||||
self.ins_redis.hvals.return_value = ["close","close"]
|
||||
real = pipe.Pipe(read="read", write="write")
|
||||
self.assertRaises(pipe.EndOfFile, real.read)
|
||||
|
||||
def test_read_NoReadDescriptor(self):
|
||||
self.ins_redis.lpop.return_value = None
|
||||
self.ins_redis.hvals.return_value = ["close","close"]
|
||||
real = pipe.Pipe(read="", write="")
|
||||
self.assertRaises(pipe.NoReadDescriptor, real.read)
|
||||
|
||||
def test_write(self):
|
||||
real = pipe.Pipe(read="read",write="write")
|
||||
self.ins_redis.hvals.return_value = []
|
||||
self.assertTrue("data", real.write("data"))
|
||||
self.assertTrue(self.ins_redis.rpush.call_count == 1)
|
||||
|
||||
def test_write_NoReadDescriptor(self):
|
||||
real = pipe.Pipe(read="read",write="write")
|
||||
self.ins_redis.hvals.return_value = ["close","close"]
|
||||
self.assertRaises(pipe.NoReadDescriptor, real.write, "data")
|
||||
self.assertTrue(self.ins_redis.rpush.call_count == 1)
|
||||
|
||||
def test_write_NoWriteDescriptor(self):
|
||||
real = pipe.Pipe(read="",write="")
|
||||
self.ins_redis.hvals.return_value = ["close","close"]
|
||||
self.assertRaises(pipe.NoWriteDescriptor, real.write, "data")
|
||||
self.assertTrue(self.ins_redis.rpush.call_count == 0)
|
||||
|
||||
def test_close_reader(self):
|
||||
self.ins_redis.keys.return_value = ""
|
||||
real = pipe.Pipe(read="read", write="write")
|
||||
real.close_reader()
|
||||
self.ins_redis.hset.assert_any_call("pid_read", "pid", "close")
|
||||
|
||||
def test_close_write(self):
|
||||
self.ins_redis.keys.return_value = ""
|
||||
real = pipe.Pipe(read="read", write="write")
|
||||
real.close_writer()
|
||||
self.ins_redis.hset.assert_any_call("pid_write", "pid", "close")
|
||||
|
||||
def test_has_reader_no_states(self):
|
||||
self.ins_redis.keys.return_value = ""
|
||||
self.ins_redis.hvals.return_value= []
|
||||
real = pipe.Pipe(read="read", write="write")
|
||||
self.assertTrue(real.has_reader())
|
||||
|
||||
def test_has_reader_states_not_close(self):
|
||||
self.ins_redis.keys.return_value = ""
|
||||
self.ins_redis.hvals.return_value= ["open", "opne"]
|
||||
real = pipe.Pipe(read="read", write="write")
|
||||
self.assertTrue(real.has_reader())
|
||||
|
||||
def test_has_reader_false(self):
|
||||
self.ins_redis.keys.return_value = ""
|
||||
self.ins_redis.hvals.return_value= ["close", "close"]
|
||||
real = pipe.Pipe(read="read", write="write")
|
||||
self.assertFalse(real.has_reader())
|
||||
|
||||
def test_has_writer_no_states(self):
|
||||
self.ins_redis.keys.return_value = ""
|
||||
self.ins_redis.hvals.return_value= []
|
||||
real = pipe.Pipe(read="read", write="write")
|
||||
self.assertTrue(real.has_writer())
|
||||
|
||||
def test_has_writer_states_not_close(self):
|
||||
self.ins_redis.keys.return_value = ""
|
||||
self.ins_redis.hvals.return_value= ["open", "opne"]
|
||||
real = pipe.Pipe(read="read", write="write")
|
||||
self.assertTrue(real.has_writer())
|
||||
|
||||
def test_has_write_false(self):
|
||||
self.ins_redis.keys.return_value = ""
|
||||
self.ins_redis.hvals.return_value= ["close", "close"]
|
||||
real = pipe.Pipe(read="read",write="write")
|
||||
self.assertFalse(real.has_writer())
|
||||
|
||||
def test_flush_not_named(self):
|
||||
self.ins_redis.keys.side_effect = ["", ["abc"]]
|
||||
real = pipe.Pipe(read="read", write="write")
|
||||
real.flush()
|
||||
keys = ["pid", "pid_read", "pid_write", "abc"]
|
||||
self.ins_redis.delete.assert_any_call(*tuple(keys))
|
||||
|
||||
def test_flush_named(self):
|
||||
self.ins_redis.keys.return_value = ""
|
||||
real = pipe.Pipe(name="name", read="read",write="write")
|
||||
real.flush()
|
||||
keys = ["name", "name_read", "name_write"]
|
||||
self.ins_redis.delete.assert_any_call(*tuple(keys))
|
||||
|
||||
def test_flush_by_pid(self):
|
||||
self.ins_redis.keys.return_value = ["abc"]
|
||||
pipe.Pipe.flush_by_pid("pid")
|
||||
keys = ["pid", "pid_read", "pid_write", "abc"]
|
||||
self.ins_redis.delete.assert_any_call(*tuple(keys))
|
||||
|
||||
def test_flush_by_name(self):
|
||||
pipe.Pipe.flush_by_name("name")
|
||||
keys = ["name", "name_read", "name_write"]
|
||||
self.ins_redis.delete.assert_any_call(*tuple(keys))
|
||||
|
||||
def test_share_keys_exitst_states_close(self):
|
||||
self.ins_redis.keys.return_value = ["value"]
|
||||
self.ins_redis.get.return_value = "name"
|
||||
self.ins_redis.hget.side_effect = ["close", "close"]
|
||||
self.assertTrue(pipe.Pipe.share("ppid", "pid"))
|
||||
self.ins_redis.set.assert_any_call("name:pid", "name")
|
||||
self.ins_redis.hset.assert_any_call("name_read", "pid", "close")
|
||||
self.ins_redis.hset.assert_any_call("name_write", "pid", "close")
|
||||
|
||||
def test_share_keys_not_exist_states_not_close_ppid_exsits(self):
|
||||
mydatetime = datetime.datetime(2015, 1, 1, 0, 0)
|
||||
class FakeDateTime(datetime.datetime):
|
||||
@classmethod
|
||||
def now(cls):
|
||||
return mydatetime
|
||||
patcher = patch("rackclient.v1.syscall.default.pipe.datetime", FakeDateTime)
|
||||
patcher.start()
|
||||
self.ins_redis.keys.side_effect = [[], ["data"]]
|
||||
self.ins_redis.hget.side_effect = ["read", "write"]
|
||||
self.assertTrue(pipe.Pipe.share("ppid", "pid"))
|
||||
self.ins_redis.set.assert_any_call("ppid:pid", "ppid")
|
||||
self.ins_redis.hset.assert_any_call("ppid_read", "pid", mydatetime)
|
||||
self.ins_redis.hset.assert_any_call("ppid_write", "pid", mydatetime)
|
||||
|
||||
def test_share_false(self):
|
||||
self.ins_redis.keys.side_effect = [[], []]
|
||||
self.assertFalse(pipe.Pipe.share("ppid", "pid"))
|
||||
|
||||
def test_NoDescriptor_str_(self):
|
||||
self.assertEquals("Descriptor Not Found", pipe.NoDescriptor().__str__())
|
||||
|
|
@ -0,0 +1,70 @@
|
|||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from mock import patch
|
||||
from rackclient.tests import utils
|
||||
from rackclient.v1.syscall.default import shm
|
||||
from rackclient import process_context
|
||||
|
||||
PCTXT = process_context.PCTXT
|
||||
|
||||
class ShmTest(utils.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(ShmTest, self).setUp()
|
||||
patcher = patch('redis.StrictRedis')
|
||||
self.addCleanup(patcher.stop)
|
||||
self.mock_redis = patcher.start()
|
||||
|
||||
def test_read(self):
|
||||
ins_redis = self.mock_redis.return_value
|
||||
ins_redis.get.return_value = 'value'
|
||||
real = shm.Shm()
|
||||
self.assertEquals('value', real.read("key"))
|
||||
ins_redis.get.assert_called_once_with("key")
|
||||
|
||||
def test_write(self):
|
||||
ins_redis = self.mock_redis.return_value
|
||||
ins_redis.set.return_value = 'value'
|
||||
real = shm.Shm()
|
||||
self.assertEquals('value', real.write("key", "value"))
|
||||
ins_redis.set.assert_called_once_with("key", "value")
|
||||
|
||||
def test_list_read(self):
|
||||
ins_redis = self.mock_redis.return_value
|
||||
ins_redis.llen.return_value = 1
|
||||
ins_redis.lrange.return_value = "value"
|
||||
real = shm.Shm()
|
||||
self.assertEquals('value', real.list_read("key"))
|
||||
ins_redis.lrange.assert_called_once_with("key", 0, 1)
|
||||
|
||||
def test_list_write(self):
|
||||
ins_redis = self.mock_redis.return_value
|
||||
ins_redis.rpush.return_value = 'value'
|
||||
real = shm.Shm()
|
||||
self.assertEquals('value', real.list_write("key", "value"))
|
||||
ins_redis.rpush.assert_called_once_with("key", "value")
|
||||
|
||||
def test_list_delete_value(self):
|
||||
ins_redis = self.mock_redis.return_value
|
||||
ins_redis.lrem.return_value = 'value'
|
||||
real = shm.Shm()
|
||||
self.assertEquals('value', real.list_delete_value("key", "value"))
|
||||
ins_redis.lrem.assert_called_once_with("key", 1, "value")
|
||||
|
||||
def test_delete(self):
|
||||
ins_redis = self.mock_redis.return_value
|
||||
ins_redis.delete.return_value = 'value'
|
||||
real = shm.Shm()
|
||||
self.assertEquals('value', real.delete("key"))
|
||||
ins_redis.delete.assert_called_once_with("key")
|
|
@ -0,0 +1,117 @@
|
|||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from mock import patch, Mock
|
||||
from rackclient import process_context
|
||||
from rackclient.tests import utils
|
||||
from rackclient.v1.syscall.default import signal
|
||||
import copy
|
||||
|
||||
PCTXT = process_context.PCTXT
|
||||
|
||||
|
||||
class SignalTest(utils.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(SignalTest, self).setUp()
|
||||
|
||||
@patch('websocket.WebSocketApp')
|
||||
def test_receive(self, mock_websocket_websocketapp):
|
||||
mock_app = Mock()
|
||||
mock_websocket_websocketapp.return_value = mock_app
|
||||
|
||||
s = signal.SignalManager()
|
||||
on_msg_func = 'on_msg_func'
|
||||
excepted_on_msg_func = copy.deepcopy(on_msg_func)
|
||||
s.receive(on_msg_func)
|
||||
|
||||
mock_websocket_websocketapp.\
|
||||
assert_called_with(url=s.url + '/receive',
|
||||
header=['PID: ' + PCTXT.pid],
|
||||
on_message=s.on_message,
|
||||
on_error=s.on_error,
|
||||
on_close=s.on_close)
|
||||
mock_app.run_forever.assert_called_with()
|
||||
self.assertEqual(s.on_msg_func, excepted_on_msg_func)
|
||||
|
||||
@patch('websocket.WebSocketApp')
|
||||
def test_receive_pid_specified(self, mock_websocket_websocketapp):
|
||||
mock_app = Mock()
|
||||
mock_websocket_websocketapp.return_value = mock_app
|
||||
|
||||
url = '/test_url/'
|
||||
expected_url = url.rstrip('/')
|
||||
s = signal.SignalManager(url=url)
|
||||
on_msg_func = 'on_msg_func'
|
||||
excepted_on_msg_func = copy.deepcopy(on_msg_func)
|
||||
pid = 'singnal_pid'
|
||||
s.receive(on_msg_func, pid=pid)
|
||||
|
||||
self.assertEqual(s.url, expected_url)
|
||||
mock_websocket_websocketapp.assert_called_with(url=s.url + '/receive',
|
||||
header=['PID: ' + pid],
|
||||
on_message=s.on_message,
|
||||
on_error=s.on_error,
|
||||
on_close=s.on_close)
|
||||
mock_app.run_forever.assert_called_with()
|
||||
self.assertEqual(s.on_msg_func, excepted_on_msg_func)
|
||||
|
||||
@patch('websocket.WebSocketApp')
|
||||
def teston_msg_func_receive_pid_specified(self, mock_websocket_websocketapp):
|
||||
mock_app = Mock()
|
||||
mock_websocket_websocketapp.return_value = mock_app
|
||||
|
||||
s = signal.SignalManager()
|
||||
on_msg_func = 'on_msg_func'
|
||||
PCTXT.pid = None
|
||||
self.assertRaises(Exception, s.receive, on_msg_func)
|
||||
|
||||
def test_on_message(self):
|
||||
on_msg_func = Mock()
|
||||
ws = Mock()
|
||||
s = signal.SignalManager()
|
||||
s.on_msg_func = on_msg_func
|
||||
message = 'test_msg'
|
||||
excepted_message = copy.deepcopy(message)
|
||||
s.on_message(ws, message)
|
||||
|
||||
on_msg_func.assert_called_with(excepted_message)
|
||||
ws.close.assert_called_with()
|
||||
|
||||
def test_on_error(self):
|
||||
ws = Mock()
|
||||
s = signal.SignalManager()
|
||||
error = 'test_error'
|
||||
|
||||
self.assertRaises(Exception, s.on_error, ws, error)
|
||||
ws.close.assert_called_with()
|
||||
|
||||
@patch('websocket.create_connection')
|
||||
def test_send(self, mock_create_connection):
|
||||
target_id = 'target_id'
|
||||
expected_target_id = copy.deepcopy(target_id)
|
||||
message = 'test_msg'
|
||||
expected_message = copy.deepcopy(message)
|
||||
url = '/test_url/'
|
||||
expected_url = url.rstrip('/') + '/send'
|
||||
ws = Mock()
|
||||
mock_create_connection.return_value = ws
|
||||
|
||||
s = signal.SignalManager(url=url)
|
||||
s.send(target_id, message)
|
||||
|
||||
mock_create_connection.\
|
||||
assert_called_with(expected_url,
|
||||
header=['PID: ' + expected_target_id])
|
||||
ws.send.assert_called_with(expected_message)
|
||||
ws.close.assert_called_with()
|
|
@ -0,0 +1,217 @@
|
|||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from rackclient import process_context
|
||||
from rackclient.tests import utils
|
||||
from rackclient.v1 import processes
|
||||
from rackclient.v1.syscall.default import file
|
||||
from rackclient.v1.syscall.default import messaging
|
||||
from rackclient.v1.syscall.default import pipe
|
||||
from rackclient.v1.syscall.default import syscall
|
||||
|
||||
from mock import call
|
||||
from mock import Mock
|
||||
|
||||
PCTXT = process_context.PCTXT
|
||||
|
||||
|
||||
class SyscallTest(utils.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(SyscallTest, self).setUp()
|
||||
|
||||
def test_fork(self):
|
||||
def create_process(gid, ppid, **kwargs):
|
||||
count = PCTXT.client.processes.create.call_count
|
||||
d = {'ppid': ppid,
|
||||
'pid': 'pid' + str(count),
|
||||
'gid': gid,
|
||||
'proxy_ip': PCTXT.proxy_ip}
|
||||
args = kwargs['args']
|
||||
args.update(d)
|
||||
kwargs.update(d)
|
||||
return_process = processes.Process(PCTXT.client, kwargs)
|
||||
return return_process
|
||||
|
||||
PCTXT.client.processes.create = Mock(side_effect=create_process)
|
||||
# messaging mock
|
||||
mock_messaging = Mock()
|
||||
messaging.Messaging = Mock(return_value=mock_messaging)
|
||||
msg_list = [{'pid': 'pid1'}, {'pid': 'pid2'}, {'pid': 'pid3'}]
|
||||
mock_messaging.receive_all_msg.return_value = msg_list
|
||||
# pip mock
|
||||
pipe.Pipe = Mock()
|
||||
|
||||
# call fork
|
||||
arg1 = {'test': 'test1'}
|
||||
arg2 = {'test': 'test2'}
|
||||
arg3 = {'test': 'test3'}
|
||||
arg_list = [{'args': arg1},
|
||||
{'args': arg2},
|
||||
{'args': arg3}]
|
||||
process_list = syscall.fork(arg_list)
|
||||
|
||||
# check
|
||||
expected_pipe_share = [call('pid', 'pid1'),
|
||||
call('pid', 'pid2'),
|
||||
call('pid', 'pid3')]
|
||||
self.assertEqual(expected_pipe_share, pipe.Pipe.share.call_args_list)
|
||||
expected_msg_send = [call(message='start', target='pid1'),
|
||||
call(message='start', target='pid2'),
|
||||
call(message='start', target='pid3')]
|
||||
self.assertEqual(expected_msg_send,
|
||||
mock_messaging.send_msg.call_args_list)
|
||||
expected_arg_list = [arg1, arg2, arg3]
|
||||
for process in process_list:
|
||||
self.assertTrue(process.args in expected_arg_list)
|
||||
self.assertEqual(process.ppid, PCTXT.pid)
|
||||
self.assertEqual(process.gid, PCTXT.gid)
|
||||
expected_arg_list.remove(process.args)
|
||||
|
||||
def test_bulk_fork_check_connection_recoverable_error(self):
|
||||
# setup
|
||||
def create_process(gid, ppid, **kwargs):
|
||||
count = PCTXT.client.processes.create.call_count
|
||||
if count == 2:
|
||||
raise Exception()
|
||||
d = {'ppid': ppid,
|
||||
'pid': 'pid' + str(count),
|
||||
'gid': gid,
|
||||
'proxy_ip': PCTXT.proxy_ip}
|
||||
args = kwargs['args']
|
||||
args.update(d)
|
||||
kwargs.update(d)
|
||||
return_process = processes.Process(PCTXT.client, kwargs)
|
||||
return return_process
|
||||
|
||||
PCTXT.client.processes.create = Mock(side_effect=create_process)
|
||||
PCTXT.client.processes.delete = Mock()
|
||||
|
||||
# messaging mock
|
||||
mock_messaging = Mock()
|
||||
messaging.Messaging = Mock(return_value=mock_messaging)
|
||||
msg_list = [{'pid': 'pid3'}, {'pid': 'pid4'}, {'pid': 'pid5'}]
|
||||
mock_messaging.receive_all_msg.return_value = msg_list
|
||||
|
||||
# pip mock
|
||||
pipe.Pipe = Mock()
|
||||
|
||||
# call fork
|
||||
arg1 = {'test': 'test1'}
|
||||
arg2 = {'test': 'test2'}
|
||||
arg3 = {'test': 'test3'}
|
||||
arg_list = [{'args': arg1},
|
||||
{'args': arg2},
|
||||
{'args': arg3}]
|
||||
process_list = syscall.fork(arg_list)
|
||||
|
||||
# check
|
||||
PCTXT.client.processes.delete.assert_called_with(PCTXT.gid, 'pid1')
|
||||
expected_pipe_share = [call('pid', 'pid3'),
|
||||
call('pid', 'pid4'),
|
||||
call('pid', 'pid5')]
|
||||
self.assertEqual(expected_pipe_share, pipe.Pipe.share.call_args_list)
|
||||
expected_msg_send = [call(message='start', target='pid3'),
|
||||
call(message='start', target='pid4'),
|
||||
call(message='start', target='pid5')]
|
||||
self.assertEqual(expected_msg_send,
|
||||
mock_messaging.send_msg.call_args_list)
|
||||
expected_arg_list = [arg1, arg2, arg3]
|
||||
for process in process_list:
|
||||
self.assertTrue(process.args in expected_arg_list)
|
||||
self.assertEqual(process.ppid, PCTXT.pid)
|
||||
self.assertEqual(process.gid, PCTXT.gid)
|
||||
expected_arg_list.remove(process.args)
|
||||
|
||||
def test_bulk_fork_error_no_child_process_is_created(self):
|
||||
PCTXT.client.processes.create = Mock(side_effect=Exception)
|
||||
# call fork
|
||||
arg1 = {'test': 'test1'}
|
||||
arg2 = {'test': 'test2'}
|
||||
arg3 = {'test': 'test3'}
|
||||
arg_list = [{'args': arg1},
|
||||
{'args': arg2},
|
||||
{'args': arg3}]
|
||||
self.assertRaises(Exception, syscall.fork, arg_list)
|
||||
|
||||
def test_check_connection_error_no_child_process_is_active(self):
|
||||
# setup
|
||||
def create_process(gid, ppid, **kwargs):
|
||||
count = PCTXT.client.processes.create.call_count
|
||||
d = {'ppid': ppid,
|
||||
'pid': 'pid' + str(count),
|
||||
'gid': gid,
|
||||
'proxy_ip': PCTXT.proxy_ip}
|
||||
args = kwargs['args']
|
||||
args.update(d)
|
||||
kwargs.update(d)
|
||||
return_process = processes.Process(PCTXT.client, kwargs)
|
||||
return return_process
|
||||
|
||||
PCTXT.client.processes.create = Mock(side_effect=create_process)
|
||||
PCTXT.client.processes.delete = Mock()
|
||||
# messaging mock
|
||||
mock_messaging = Mock()
|
||||
messaging.Messaging = Mock(return_value=mock_messaging)
|
||||
msg_list = [{'pid': 'pid6'}]
|
||||
mock_messaging.receive_all_msg.return_value = msg_list
|
||||
|
||||
# call fork
|
||||
arg_list = [{'args': {'test': 'test1'}},
|
||||
{'args': {'test': 'test2'}},
|
||||
{'args': {'test': 'test3'}}]
|
||||
self.assertRaises(Exception, syscall.fork, arg_list)
|
||||
expected_processes_delete = [call(PCTXT.gid, 'pid1'),
|
||||
call(PCTXT.gid, 'pid2'),
|
||||
call(PCTXT.gid, 'pid3')]
|
||||
self.assertEqual(expected_processes_delete,
|
||||
PCTXT.client.processes.delete.call_args_list)
|
||||
|
||||
def test_pipe_no_arg(self):
|
||||
pipe.Pipe = Mock()
|
||||
return_value = 'pipe'
|
||||
pipe.Pipe.return_value = return_value
|
||||
|
||||
pipe_obj = syscall.pipe()
|
||||
self.assertEqual(pipe_obj, return_value)
|
||||
|
||||
def test_pipe(self):
|
||||
return_value = 'pipe'
|
||||
side_effect = lambda value: return_value + value
|
||||
pipe.Pipe = Mock(side_effect=side_effect)
|
||||
|
||||
name = 'pipe_name'
|
||||
pipe_obj = syscall.pipe(name)
|
||||
self.assertEqual(pipe_obj, return_value + name)
|
||||
|
||||
def test_fopen(self):
|
||||
file.File = Mock()
|
||||
return_value = 'file_obj'
|
||||
file.File.return_value = return_value
|
||||
|
||||
file_path = 'file_path'
|
||||
mode = 'w'
|
||||
file_obj = syscall.fopen(file_path, mode=mode)
|
||||
self.assertEqual(return_value, file_obj)
|
||||
file.File.assert_called_once_with(file_path, mode)
|
||||
|
||||
def test_fopen_no_mode(self):
|
||||
file.File = Mock()
|
||||
return_value = 'file_obj'
|
||||
file.File.return_value = return_value
|
||||
|
||||
file_path = 'file_path'
|
||||
mode = 'r'
|
||||
file_obj = syscall.fopen(file_path)
|
||||
self.assertEqual(return_value, file_obj)
|
||||
file.File.assert_called_once_with(file_path, mode)
|
|
@ -0,0 +1,2 @@
|
|||
key1=value1
|
||||
key2=value2
|
|
@ -0,0 +1,70 @@
|
|||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from rackclient.tests import utils
|
||||
from rackclient.tests.v1 import fakes
|
||||
from rackclient.v1 import groups
|
||||
|
||||
|
||||
class GroupsTest(utils.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(GroupsTest, self).setUp()
|
||||
self.cs = fakes.FakeClient()
|
||||
self.group_type = groups.Group
|
||||
|
||||
def test_list(self):
|
||||
groups = self.cs.groups.list()
|
||||
self.cs.assert_called('GET', '/groups')
|
||||
for group in groups:
|
||||
self.assertIsInstance(group, self.group_type)
|
||||
|
||||
def test_get(self):
|
||||
group = self.cs.groups.get('11111111')
|
||||
self.cs.assert_called('GET', '/groups/11111111')
|
||||
self.assertEqual('11111111', group.gid)
|
||||
self.assertEqual('4ffc664c198e435e9853f253lkbcd7a7', group.user_id)
|
||||
self.assertEqual('9sac664c198e435e9853f253lkbcd7a7', group.project_id)
|
||||
self.assertEqual('group1', group.name)
|
||||
self.assertEqual('This is group1', group.description)
|
||||
self.assertEqual('ACTIVE', group.status)
|
||||
|
||||
def _create_body(self, name, description):
|
||||
return {
|
||||
'group': {
|
||||
'name': name,
|
||||
'description': description
|
||||
}
|
||||
}
|
||||
|
||||
def test_create(self):
|
||||
name = 'group1'
|
||||
description = 'This is group1'
|
||||
group = self.cs.groups.create(name, description)
|
||||
body = self._create_body(name, description)
|
||||
self.cs.assert_called('POST', '/groups', body)
|
||||
self.assertIsInstance(group, self.group_type)
|
||||
|
||||
def test_update(self):
|
||||
gid = '11111111'
|
||||
name = 'group1'
|
||||
description = 'This is group1'
|
||||
group = self.cs.groups.update(gid, name, description)
|
||||
body = self._create_body(name, description)
|
||||
self.cs.assert_called('PUT', '/groups/11111111', body)
|
||||
self.assertIsInstance(group, self.group_type)
|
||||
|
||||
def test_delete(self):
|
||||
gid = '11111111'
|
||||
self.cs.groups.delete(gid)
|
||||
self.cs.assert_called('DELETE', '/groups/11111111')
|
|
@ -0,0 +1,2 @@
|
|||
key1=value1
|
||||
key2:value2
|
|
@ -0,0 +1,97 @@
|
|||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from rackclient import exceptions as exc
|
||||
from rackclient.tests import utils
|
||||
from rackclient.tests.v1 import fakes
|
||||
from rackclient.v1 import keypairs
|
||||
|
||||
|
||||
class KeypairsTest(utils.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(KeypairsTest, self).setUp()
|
||||
self.cs = fakes.FakeClient()
|
||||
self.keypair_type = keypairs.Keypair
|
||||
self.gid = '11111111'
|
||||
self.user_id = '4ffc664c198e435e9853f253lkbcd7a7'
|
||||
self.project_id = '9sac664c198e435e9853f253lkbcd7a7'
|
||||
|
||||
def test_list(self):
|
||||
keypairs = self.cs.keypairs.list(self.gid)
|
||||
self.cs.assert_called('GET', '/groups/%s/keypairs' % self.gid)
|
||||
for keypair in keypairs:
|
||||
self.assertIsInstance(keypair, self.keypair_type)
|
||||
|
||||
def test_get(self):
|
||||
keypair_id = 'aaaaaaaa'
|
||||
keypair = self.cs.keypairs.get(self.gid, keypair_id)
|
||||
self.cs.assert_called('GET', '/groups/%s/keypairs/%s' % (self.gid, keypair_id))
|
||||
self.assertEqual(self.gid, keypair.gid)
|
||||
self.assertEqual(self.user_id, keypair.user_id)
|
||||
self.assertEqual(self.project_id, keypair.project_id)
|
||||
self.assertEqual(keypair_id, keypair.keypair_id)
|
||||
self.assertEqual('keypair1', keypair.nova_keypair_id)
|
||||
self.assertEqual('keypair1', keypair.name)
|
||||
self.assertEqual('1234', keypair.private_key)
|
||||
self.assertEqual(True, keypair.is_default)
|
||||
self.assertEqual('Exist', keypair.status)
|
||||
|
||||
def _create_body(self, name, is_default):
|
||||
return {
|
||||
'keypair': {
|
||||
'name': name,
|
||||
'is_default': is_default
|
||||
}
|
||||
}
|
||||
|
||||
def test_create(self):
|
||||
name = 'keypair1'
|
||||
is_default = True
|
||||
keypair = self.cs.keypairs.create(self.gid, name, is_default)
|
||||
body = self._create_body(name, is_default)
|
||||
self.cs.assert_called('POST', '/groups/%s/keypairs' % self.gid, body)
|
||||
self.assertIsInstance(keypair, self.keypair_type)
|
||||
|
||||
def test_create_invalid_parameters(self):
|
||||
name = 'keypair1'
|
||||
is_default = 'invalid'
|
||||
self.assertRaises(exc.CommandError, self.cs.keypairs.create,
|
||||
self.gid, name, is_default)
|
||||
|
||||
def _update_body(self, is_default):
|
||||
return {
|
||||
'keypair': {
|
||||
'is_default': is_default
|
||||
}
|
||||
}
|
||||
|
||||
def test_update(self):
|
||||
is_default = True
|
||||
keypair_id = 'aaaaaaaa'
|
||||
keypair = self.cs.keypairs.update(self.gid,
|
||||
keypair_id, is_default)
|
||||
body = self._update_body(is_default)
|
||||
self.cs.assert_called('PUT', '/groups/%s/keypairs/%s' % (self.gid, keypair_id), body)
|
||||
self.assertIsInstance(keypair, self.keypair_type)
|
||||
|
||||
def test_update_invalid_parameters(self):
|
||||
is_default = 'invalid'
|
||||
keypair_id = 'aaaaaaaa'
|
||||
self.assertRaises(exc.CommandError, self.cs.keypairs.update,
|
||||
self.gid, keypair_id, is_default)
|
||||
|
||||
def test_delete(self):
|
||||
keypair_id = 'aaaaaaaa'
|
||||
self.cs.keypairs.delete(self.gid, keypair_id)
|
||||
self.cs.assert_called('DELETE', '/groups/%s/keypairs/%s' % (self.gid, keypair_id))
|
|
@ -0,0 +1,101 @@
|
|||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from rackclient import exceptions as exc
|
||||
from rackclient.tests import utils
|
||||
from rackclient.tests.v1 import fakes
|
||||
from rackclient.v1 import networks
|
||||
|
||||
|
||||
class NetworksTest(utils.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(NetworksTest, self).setUp()
|
||||
self.cs = fakes.FakeClient()
|
||||
self.network_type = networks.Network
|
||||
self.gid = '11111111'
|
||||
self.user_id = '4ffc664c198e435e9853f253lkbcd7a7'
|
||||
self.project_id = '9sac664c198e435e9853f253lkbcd7a7'
|
||||
|
||||
def test_list(self):
|
||||
networks = self.cs.networks.list(self.gid)
|
||||
self.cs.assert_called('GET', '/groups/%s/networks' % self.gid)
|
||||
for network in networks:
|
||||
self.assertIsInstance(network, self.network_type)
|
||||
|
||||
def test_get(self):
|
||||
network_id = 'aaaaaaaa'
|
||||
network = self.cs.networks.get(self.gid, network_id)
|
||||
self.cs.assert_called('GET', '/groups/%s/networks/%s' % (self.gid, network_id))
|
||||
self.assertEqual(self.gid, network.gid)
|
||||
self.assertEqual(self.user_id, network.user_id)
|
||||
self.assertEqual(self.project_id, network.project_id)
|
||||
self.assertEqual(network_id, network.network_id)
|
||||
self.assertEqual('pppppppp', network.neutron_network_id)
|
||||
self.assertEqual('network1', network.name)
|
||||
self.assertEqual(True, network.is_admin)
|
||||
self.assertEqual('rrrrrrrr', network.ext_router_id)
|
||||
self.assertEqual('Exist', network.status)
|
||||
|
||||
def _create_body(self, cidr, name=None, is_admin=False, gateway=None, dns_nameservers=None, ext_router_id=None):
|
||||
return {
|
||||
'network': {
|
||||
'cidr': cidr,
|
||||
'name': name,
|
||||
'is_admin': is_admin,
|
||||
'gateway': gateway,
|
||||
'dns_nameservers': dns_nameservers,
|
||||
'ext_router_id': ext_router_id
|
||||
}
|
||||
}
|
||||
|
||||
def test_create(self):
|
||||
cidr = '10.0.0.0/24'
|
||||
name = 'network1'
|
||||
is_admin = True
|
||||
dns_nameservers = ['8.8.8.8', '8.8.4.4']
|
||||
gateway = '10.0.0.254'
|
||||
ext_router_id = 'rrrrrrrr'
|
||||
|
||||
network = self.cs.networks.create(
|
||||
self.gid, cidr, name, is_admin, gateway,
|
||||
dns_nameservers, ext_router_id)
|
||||
body = self._create_body(
|
||||
cidr, name, is_admin, gateway,
|
||||
dns_nameservers, ext_router_id)
|
||||
self.cs.assert_called('POST', '/groups/%s/networks' % self.gid, body)
|
||||
self.assertIsInstance(network, self.network_type)
|
||||
|
||||
def test_create_invalid_parameters(self):
|
||||
name = 'network1'
|
||||
ext_router_id = 'rrrrrrrr'
|
||||
self.assertRaises(exc.CommandError, self.cs.networks.create,
|
||||
self.gid, 'invalid', name, True, '10.0.0.254',
|
||||
['8.8.8.8', '8.8.4.4'], ext_router_id)
|
||||
self.assertRaises(exc.CommandError, self.cs.networks.create,
|
||||
self.gid, '10.0.0.0', name, True, '10.0.0.254',
|
||||
['8.8.8.8', '8.8.4.4'], ext_router_id)
|
||||
self.assertRaises(exc.CommandError, self.cs.networks.create,
|
||||
self.gid, '10.0.0.0/24', name, 'invalid', '10.0.0.254',
|
||||
['8.8.8.8', '8.8.4.4'], ext_router_id)
|
||||
self.assertRaises(exc.CommandError, self.cs.networks.create,
|
||||
self.gid, '10.0.0.0/24', name, True, 'invalid',
|
||||
['8.8.8.8', '8.8.4.4'], ext_router_id)
|
||||
self.assertRaises(exc.CommandError, self.cs.networks.create,
|
||||
self.gid, '10.0.0.0/24', name, True, '10.0.0.254',
|
||||
{}, ext_router_id)
|
||||
|
||||
def test_delete(self):
|
||||
network_id = 'aaaaaaaa'
|
||||
self.cs.networks.delete(self.gid, network_id)
|
||||
self.cs.assert_called('DELETE', '/groups/%s/networks/%s' % (self.gid, network_id))
|
|
@ -0,0 +1,135 @@
|
|||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import base64
|
||||
import tempfile
|
||||
from rackclient import exceptions as exc
|
||||
from rackclient.tests import utils
|
||||
from rackclient.tests.v1 import fakes
|
||||
from rackclient.v1 import processes
|
||||
|
||||
|
||||
class ProcesssTest(utils.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(ProcesssTest, self).setUp()
|
||||
self.cs = fakes.FakeClient()
|
||||
self.process_type = processes.Process
|
||||
self.gid = '11111111'
|
||||
self.user_id = '4ffc664c198e435e9853f253lkbcd7a7'
|
||||
self.project_id = '9sac664c198e435e9853f253lkbcd7a7'
|
||||
|
||||
def test_list(self):
|
||||
processes = self.cs.processes.list(self.gid)
|
||||
self.cs.assert_called('GET', '/groups/%s/processes' % self.gid)
|
||||
for process in processes:
|
||||
self.assertIsInstance(process, self.process_type)
|
||||
|
||||
def test_get(self):
|
||||
pid = 'aaaaaaaa'
|
||||
process = self.cs.processes.get(self.gid, pid)
|
||||
self.cs.assert_called('GET', '/groups/%s/processes/%s' % (self.gid, pid))
|
||||
self.assertEqual(self.gid, process.gid)
|
||||
self.assertEqual(self.user_id, process.user_id)
|
||||
self.assertEqual(self.project_id, process.project_id)
|
||||
self.assertEqual(pid, process.pid)
|
||||
self.assertEqual(None, process.ppid)
|
||||
self.assertEqual('pppppppp', process.nova_instance_id)
|
||||
self.assertEqual('process1', process.name)
|
||||
self.assertEqual('xxxxxxxx', process.glance_image_id)
|
||||
self.assertEqual('yyyyyyyy', process.nova_flavor_id)
|
||||
self.assertEqual('iiiiiiii', process.keypair_id)
|
||||
self.assertEqual(['jjjjjjjj', 'kkkkkkkk'], process.securitygroup_ids)
|
||||
networks = [{
|
||||
'network_id': 'mmmmmmmm',
|
||||
'fixed': '10.0.0.2',
|
||||
'floating': '1.1.1.1'
|
||||
}]
|
||||
self.assertEqual(networks, process.networks)
|
||||
self.assertEqual('ACTIVE', process.app_status)
|
||||
self.assertEqual('ACTIVE', process.status)
|
||||
self.assertEqual('IyEvYmluL3NoICBlY2hvICJIZWxsbyI=', process.userdata)
|
||||
args = {
|
||||
'key1': 'value1',
|
||||
'key2': 'value2'
|
||||
}
|
||||
self.assertEqual(args, process.args)
|
||||
|
||||
|
||||
def _create_body(self, ppid=None, name=None, nova_flavor_id=None,
|
||||
glance_image_id=None, keypair_id=None,
|
||||
securitygroup_ids=None, userdata=None, args=None):
|
||||
return {
|
||||
'process': {
|
||||
'ppid': ppid,
|
||||
'name': name,
|
||||
'nova_flavor_id': nova_flavor_id,
|
||||
'glance_image_id': glance_image_id,
|
||||
'keypair_id': keypair_id,
|
||||
'securitygroup_ids': securitygroup_ids,
|
||||
'userdata': userdata,
|
||||
'args': args
|
||||
}
|
||||
}
|
||||
|
||||
def test_create(self):
|
||||
userdata = '#!/bin/sh echo "Hello"'
|
||||
f = tempfile.TemporaryFile()
|
||||
f.write(userdata)
|
||||
f.seek(0)
|
||||
params = {
|
||||
'ppid': '11111111',
|
||||
'name':'process1',
|
||||
'nova_flavor_id': 1,
|
||||
'glance_image_id': '22222222',
|
||||
'keypair_id': '33333333',
|
||||
'securitygroup_ids': ['44444444', '55555555'],
|
||||
'userdata': f,
|
||||
'args': {
|
||||
"key1": "value1",
|
||||
"key2": "value2"
|
||||
}
|
||||
}
|
||||
process = self.cs.processes.create(self.gid, **params)
|
||||
body = self._create_body(**params)
|
||||
body['process']['userdata'] = base64.b64encode(userdata)
|
||||
self.cs.assert_called('POST', '/groups/%s/processes' % self.gid, body)
|
||||
self.assertIsInstance(process, self.process_type)
|
||||
|
||||
def test_create_invalid_parameters(self):
|
||||
ppid = 'aaaaaaaa'
|
||||
self.assertRaises(exc.CommandError, self.cs.processes.create,
|
||||
self.gid, ppid=ppid, securitygroup_ids='invalid')
|
||||
self.assertRaises(exc.CommandError, self.cs.processes.create,
|
||||
self.gid, ppid=ppid, args='invalid')
|
||||
|
||||
def _update_body(self, app_status):
|
||||
return {
|
||||
'process': {
|
||||
'app_status': app_status
|
||||
}
|
||||
}
|
||||
|
||||
def test_update(self):
|
||||
app_status = 'ACTIVE'
|
||||
pid = 'aaaaaaaa'
|
||||
process = self.cs.processes.update(self.gid,
|
||||
pid, app_status)
|
||||
body = self._update_body(app_status)
|
||||
self.cs.assert_called('PUT', '/groups/%s/processes/%s' % (self.gid, pid), body)
|
||||
self.assertIsInstance(process, self.process_type)
|
||||
|
||||
def test_delete(self):
|
||||
pid = 'aaaaaaaa'
|
||||
self.cs.processes.delete(self.gid, pid)
|
||||
self.cs.assert_called('DELETE', '/groups/%s/processes/%s' % (self.gid, pid))
|
|
@ -0,0 +1,130 @@
|
|||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import base64
|
||||
import tempfile
|
||||
from rackclient import exceptions as exc
|
||||
from rackclient.tests import utils
|
||||
from rackclient.tests.v1 import fakes
|
||||
from rackclient.v1 import proxy
|
||||
|
||||
|
||||
class ProxyTest(utils.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(ProxyTest, self).setUp()
|
||||
self.cs = fakes.FakeClient()
|
||||
self.proxy_type = proxy.Proxy
|
||||
self.gid = '11111111'
|
||||
self.user_id = '4ffc664c198e435e9853f253lkbcd7a7'
|
||||
self.project_id = '9sac664c198e435e9853f253lkbcd7a7'
|
||||
|
||||
def test_get(self):
|
||||
proxy = self.cs.proxy.get(self.gid)
|
||||
self.cs.assert_called('GET', '/groups/%s/proxy' % self.gid)
|
||||
self.assertEqual(self.gid, proxy.gid)
|
||||
self.assertEqual(self.user_id, proxy.user_id)
|
||||
self.assertEqual(self.project_id, proxy.project_id)
|
||||
self.assertEqual(None, proxy.ppid)
|
||||
self.assertEqual('pppppppp', proxy.nova_instance_id)
|
||||
self.assertEqual('proxy', proxy.name)
|
||||
self.assertEqual('xxxxxxxx', proxy.glance_image_id)
|
||||
self.assertEqual('yyyyyyyy', proxy.nova_flavor_id)
|
||||
self.assertEqual('iiiiiiii', proxy.keypair_id)
|
||||
self.assertEqual(['jjjjjjjj', 'kkkkkkkk'], proxy.securitygroup_ids)
|
||||
networks = [{
|
||||
'network_id': 'mmmmmmmm',
|
||||
'fixed': '10.0.0.2',
|
||||
'floating': '1.1.1.1'
|
||||
}]
|
||||
self.assertEqual(networks, proxy.networks)
|
||||
self.assertEqual('ACTIVE', proxy.app_status)
|
||||
self.assertEqual('ACTIVE', proxy.status)
|
||||
self.assertEqual('IyEvYmluL3NoICBlY2hvICJIZWxsbyI=', proxy.userdata)
|
||||
args = {
|
||||
'key1': 'value1',
|
||||
'key2': 'value2'
|
||||
}
|
||||
self.assertEqual(args, proxy.args)
|
||||
self.assertEqual('ipc_endpoint', proxy.ipc_endpoint)
|
||||
self.assertEqual('shm_endpoint', proxy.shm_endpoint)
|
||||
self.assertEqual('fs_endpoint', proxy.fs_endpoint)
|
||||
|
||||
|
||||
def _create_body(self, name=None, nova_flavor_id=None,
|
||||
glance_image_id=None, keypair_id=None,
|
||||
securitygroup_ids=None, userdata=None, args=None):
|
||||
return {
|
||||
'proxy': {
|
||||
'name': name,
|
||||
'nova_flavor_id': nova_flavor_id,
|
||||
'glance_image_id': glance_image_id,
|
||||
'keypair_id': keypair_id,
|
||||
'securitygroup_ids': securitygroup_ids,
|
||||
'userdata': userdata,
|
||||
'args': args
|
||||
}
|
||||
}
|
||||
|
||||
def test_create(self):
|
||||
userdata = '#!/bin/sh echo "Hello"'
|
||||
f = tempfile.TemporaryFile()
|
||||
f.write(userdata)
|
||||
f.seek(0)
|
||||
params = {
|
||||
'name':'proxy',
|
||||
'nova_flavor_id': 1,
|
||||
'glance_image_id': '22222222',
|
||||
'keypair_id': '33333333',
|
||||
'securitygroup_ids': ['44444444', '55555555'],
|
||||
'userdata': f,
|
||||
'args': {
|
||||
"key1": "value1",
|
||||
"key2": "value2"
|
||||
}
|
||||
}
|
||||
proxy = self.cs.proxy.create(self.gid, **params)
|
||||
body = self._create_body(**params)
|
||||
body['proxy']['userdata'] = base64.b64encode(userdata)
|
||||
self.cs.assert_called('POST', '/groups/%s/proxy' % self.gid, body)
|
||||
self.assertIsInstance(proxy, self.proxy_type)
|
||||
|
||||
def test_create_invalid_parameters(self):
|
||||
self.assertRaises(exc.CommandError, self.cs.proxy.create,
|
||||
self.gid, securitygroup_ids='invalid')
|
||||
self.assertRaises(exc.CommandError, self.cs.proxy.create,
|
||||
self.gid, args='invalid')
|
||||
|
||||
def _update_body(self, ipc_endpoint=None, shm_endpoint=None,
|
||||
fs_endpoint=None, app_status=None):
|
||||
return {
|
||||
'proxy': {
|
||||
'ipc_endpoint': ipc_endpoint,
|
||||
'shm_endpoint': shm_endpoint,
|
||||
'fs_endpoint': fs_endpoint,
|
||||
'app_status': app_status
|
||||
}
|
||||
}
|
||||
|
||||
def test_update(self):
|
||||
ipc_endpoint = 'ipc_endpoint'
|
||||
shm_endpoint = 'shm_endpoint'
|
||||
fs_endpoint = 'fs_endpoint'
|
||||
app_status = 'ACTIVE'
|
||||
proxy = self.cs.proxy.update(self.gid, shm_endpoint,
|
||||
ipc_endpoint, fs_endpoint,
|
||||
app_status)
|
||||
body = self._update_body(ipc_endpoint, shm_endpoint,
|
||||
fs_endpoint, app_status)
|
||||
self.cs.assert_called('PUT', '/groups/%s/proxy' % self.gid, body)
|
||||
self.assertIsInstance(proxy, self.proxy_type)
|
|
@ -0,0 +1,111 @@
|
|||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from rackclient import exceptions as exc
|
||||
from rackclient.tests import utils
|
||||
from rackclient.tests.v1 import fakes
|
||||
from rackclient.v1 import securitygroups
|
||||
|
||||
|
||||
class SecuritygroupsTest(utils.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(SecuritygroupsTest, self).setUp()
|
||||
self.cs = fakes.FakeClient()
|
||||
self.securitygroup_type = securitygroups.Securitygroup
|
||||
self.gid = '11111111'
|
||||
self.user_id = '4ffc664c198e435e9853f253lkbcd7a7'
|
||||
self.project_id = '9sac664c198e435e9853f253lkbcd7a7'
|
||||
|
||||
def test_list(self):
|
||||
securitygroups = self.cs.securitygroups.list(self.gid)
|
||||
self.cs.assert_called('GET', '/groups/%s/securitygroups' % self.gid)
|
||||
for securitygroup in securitygroups:
|
||||
self.assertIsInstance(securitygroup, self.securitygroup_type)
|
||||
|
||||
def test_get(self):
|
||||
securitygroup_id = 'aaaaaaaa'
|
||||
securitygroup = self.cs.securitygroups.get(self.gid, securitygroup_id)
|
||||
self.cs.assert_called('GET', '/groups/%s/securitygroups/%s' % (self.gid, securitygroup_id))
|
||||
self.assertEqual(self.gid, securitygroup.gid)
|
||||
self.assertEqual(self.user_id, securitygroup.user_id)
|
||||
self.assertEqual(self.project_id, securitygroup.project_id)
|
||||
self.assertEqual(securitygroup_id, securitygroup.securitygroup_id)
|
||||
self.assertEqual('pppppppp', securitygroup.neutron_securitygroup_id)
|
||||
self.assertEqual('securitygroup1', securitygroup.name)
|
||||
self.assertEqual(True, securitygroup.is_default)
|
||||
self.assertEqual('Exist', securitygroup.status)
|
||||
|
||||
def _create_body(self, name, is_default, rules):
|
||||
return {
|
||||
'securitygroup': {
|
||||
'name': name,
|
||||
'is_default': is_default,
|
||||
'securitygrouprules': rules
|
||||
}
|
||||
}
|
||||
|
||||
def test_create(self):
|
||||
name = 'securitygroup1'
|
||||
is_default = True
|
||||
rules = [{
|
||||
'protocol': 'tcp',
|
||||
'port_range_max': '80',
|
||||
'port_range_min': '80',
|
||||
'remote_ip_prefix': '0.0.0.0/0'
|
||||
}]
|
||||
securitygroup = self.cs.securitygroups.create(self.gid, name, is_default, rules)
|
||||
body = self._create_body(name, is_default, rules)
|
||||
self.cs.assert_called('POST', '/groups/%s/securitygroups' % self.gid, body)
|
||||
self.assertIsInstance(securitygroup, self.securitygroup_type)
|
||||
|
||||
def test_create_invalid_parameters(self):
|
||||
name = 'securitygroup1'
|
||||
rules = [{
|
||||
'protocol': 'tcp',
|
||||
'port_range_max': '80',
|
||||
'port_range_min': '80',
|
||||
'remote_ip_prefix': '0.0.0.0/0'
|
||||
}]
|
||||
self.assertRaises(exc.CommandError, self.cs.securitygroups.create,
|
||||
self.gid, name, 'invalid', rules)
|
||||
|
||||
rules = {}
|
||||
self.assertRaises(exc.CommandError, self.cs.securitygroups.create,
|
||||
self.gid, name, True, rules)
|
||||
|
||||
def _update_body(self, is_default):
|
||||
return {
|
||||
'securitygroup': {
|
||||
'is_default': is_default
|
||||
}
|
||||
}
|
||||
|
||||
def test_update(self):
|
||||
is_default = True
|
||||
securitygroup_id = 'aaaaaaaa'
|
||||
securitygroup = self.cs.securitygroups.update(self.gid, securitygroup_id, is_default)
|
||||
body = self._update_body(is_default)
|
||||
self.cs.assert_called('PUT', '/groups/%s/securitygroups/%s' % (self.gid, securitygroup_id), body)
|
||||
self.assertIsInstance(securitygroup, self.securitygroup_type)
|
||||
|
||||
def test_update_invalid_parameters(self):
|
||||
is_default = 'invalid'
|
||||
securitygroup_id = 'aaaaaaaa'
|
||||
self.assertRaises(exc.CommandError, self.cs.securitygroups.update,
|
||||
self.gid, securitygroup_id, is_default)
|
||||
|
||||
def test_delete(self):
|
||||
securitygroup_id = 'aaaaaaaa'
|
||||
self.cs.securitygroups.delete(self.gid, securitygroup_id)
|
||||
self.cs.assert_called('DELETE', '/groups/%s/securitygroups/%s' % (self.gid, securitygroup_id))
|
|
@ -0,0 +1,486 @@
|
|||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from ConfigParser import NoOptionError
|
||||
import StringIO
|
||||
import copy
|
||||
import fixtures
|
||||
import mock
|
||||
from mock import mock_open
|
||||
import os
|
||||
import re
|
||||
from testtools import matchers
|
||||
from rackclient import exceptions
|
||||
import rackclient.shell
|
||||
from rackclient.v1.proxy import ProxyManager
|
||||
from rackclient.tests.v1 import fakes
|
||||
from rackclient.tests import utils
|
||||
|
||||
|
||||
class BaseShellTest(utils.TestCase):
|
||||
|
||||
FAKE_ENV = {
|
||||
'RACK_URL': 'http://www.example.com:8088/v1',
|
||||
'RACK_GID': '11111111',
|
||||
}
|
||||
|
||||
def setUp(self):
|
||||
"""Run before each test."""
|
||||
super(BaseShellTest, self).setUp()
|
||||
|
||||
for var in self.FAKE_ENV:
|
||||
self.useFixture(fixtures.EnvironmentVariable(var,
|
||||
self.FAKE_ENV[var]))
|
||||
self.shell = rackclient.shell.RackShell()
|
||||
self.useFixture(fixtures.MonkeyPatch(
|
||||
'rackclient.client.get_client_class',
|
||||
lambda *_: fakes.FakeClient))
|
||||
|
||||
def assert_called(self, method, url, body=None, **kwargs):
|
||||
self.shell.cs.assert_called(method, url, body, **kwargs)
|
||||
|
||||
def assert_called_anytime(self, method, url, body=None):
|
||||
self.shell.cs.assert_called_anytime(method, url, body)
|
||||
|
||||
@mock.patch('sys.stdout', new_callable=StringIO.StringIO)
|
||||
def run_command(self, cmd, mock_stdout):
|
||||
if isinstance(cmd, list):
|
||||
self.shell.main(cmd)
|
||||
else:
|
||||
self.shell.main(cmd.split())
|
||||
return mock_stdout.getvalue()
|
||||
|
||||
|
||||
class ShellTest(BaseShellTest):
|
||||
|
||||
def test_group_list(self):
|
||||
self.run_command('group-list')
|
||||
self.assert_called('GET', '/groups')
|
||||
|
||||
def test_group_show(self):
|
||||
self.run_command('group-show 11111111')
|
||||
self.assert_called('GET', '/groups/11111111', pos=-6)
|
||||
self.assert_called('GET', '/groups/11111111/keypairs', pos=-5)
|
||||
self.assert_called('GET', '/groups/11111111/securitygroups', pos=-4)
|
||||
self.assert_called('GET', '/groups/11111111/networks', pos=-3)
|
||||
self.assert_called('GET', '/groups/11111111/processes', pos=-2)
|
||||
self.assert_called('GET', '/groups/11111111/proxy', pos=-1)
|
||||
|
||||
@mock.patch.object(ProxyManager, 'get', side_effect=Exception())
|
||||
def test_group_show_no_proxy(self, mock_proxy_manager):
|
||||
stdout = self.run_command('group-show 11111111')
|
||||
required = '.*?^\|\s+proxy \(pid\)\s+\|\s+\|'
|
||||
self.assertThat(stdout,
|
||||
matchers.MatchesRegex(required, re.DOTALL | re.MULTILINE))
|
||||
|
||||
def test_group_create(self):
|
||||
self.run_command('group-create --description detail group1')
|
||||
self.assert_called('POST', '/groups')
|
||||
|
||||
def test_group_update(self):
|
||||
self.run_command('group-update --name group2 --description detail2 11111111')
|
||||
self.assert_called('PUT', '/groups/11111111')
|
||||
|
||||
def test_group_delete(self):
|
||||
self.run_command('group-delete 11111111')
|
||||
self.assert_called('DELETE', '/groups/11111111')
|
||||
|
||||
def test_keypair_list(self):
|
||||
self.run_command('keypair-list')
|
||||
self.assert_called('GET', '/groups/11111111/keypairs')
|
||||
|
||||
def test_keypair_show(self):
|
||||
self.run_command('keypair-show aaaaaaaa')
|
||||
self.assert_called('GET', '/groups/11111111/keypairs/aaaaaaaa')
|
||||
|
||||
def test_keypair_create(self):
|
||||
self.run_command('keypair-create --name keypair1 '
|
||||
'--is_default true')
|
||||
self.assert_called('POST', '/groups/11111111/keypairs')
|
||||
|
||||
def test_keypair_update(self):
|
||||
self.run_command('keypair-update --is_default false aaaaaaaa')
|
||||
self.assert_called('PUT', '/groups/11111111/keypairs/aaaaaaaa')
|
||||
|
||||
def test_keypair_delete(self):
|
||||
self.run_command('keypair-delete aaaaaaaa')
|
||||
self.assert_called('DELETE', '/groups/11111111/keypairs/aaaaaaaa')
|
||||
|
||||
def test_securitygroup_list(self):
|
||||
self.run_command('securitygroup-list')
|
||||
self.assert_called('GET', '/groups/11111111/securitygroups')
|
||||
|
||||
def test_securitygroup_show(self):
|
||||
self.run_command('securitygroup-show aaaaaaaa')
|
||||
self.assert_called('GET', '/groups/11111111/securitygroups/aaaaaaaa')
|
||||
|
||||
def test_securitygroup_create(self):
|
||||
self.run_command('securitygroup-create --name securitygroup1 '
|
||||
'--is_default true '
|
||||
'--rule protocol=tcp,port_range_max=80,'
|
||||
'port_range_min=80,remote_ip_prefix=10.0.0.0/24 '
|
||||
'--rule protocol=icmp,remote_ip_prefix=10.0.0.0/24')
|
||||
self.assert_called('POST', '/groups/11111111/securitygroups')
|
||||
|
||||
def test_securitygroup_update(self):
|
||||
self.run_command('securitygroup-update --is_default false aaaaaaaa')
|
||||
self.assert_called('PUT', '/groups/11111111/securitygroups/aaaaaaaa')
|
||||
|
||||
def test_securitygroup_delete(self):
|
||||
self.run_command('securitygroup-delete aaaaaaaa')
|
||||
self.assert_called('DELETE', '/groups/11111111/securitygroups/aaaaaaaa')
|
||||
|
||||
def test_network_list(self):
|
||||
self.run_command('network-list')
|
||||
self.assert_called('GET', '/groups/11111111/networks')
|
||||
|
||||
def test_network_show(self):
|
||||
self.run_command('network-show aaaaaaaa')
|
||||
self.assert_called('GET', '/groups/11111111/networks/aaaaaaaa')
|
||||
|
||||
def test_network_create(self):
|
||||
self.run_command('network-create --name network1 '
|
||||
'--is_admin true '
|
||||
'--gateway_ip 10.0.0.254 '
|
||||
'--dns_nameserver 8.8.8.8 '
|
||||
'--dns_nameserver 8.8.4.4 '
|
||||
'--ext_router_id rrrrrrrr '
|
||||
'10.0.0.0/24')
|
||||
self.assert_called('POST', '/groups/11111111/networks')
|
||||
|
||||
def test_network_delete(self):
|
||||
self.run_command('network-delete aaaaaaaa')
|
||||
self.assert_called('DELETE', '/groups/11111111/networks/aaaaaaaa')
|
||||
|
||||
def test_process_list(self):
|
||||
self.run_command('process-list')
|
||||
self.assert_called('GET', '/groups/11111111/processes')
|
||||
|
||||
def test_process_show(self):
|
||||
self.run_command('process-show aaaaaaaa')
|
||||
self.assert_called('GET', '/groups/11111111/processes/aaaaaaaa')
|
||||
|
||||
def test_process_create(self):
|
||||
test_userdata = os.path.join(os.path.dirname(__file__), 'test_userdata.txt')
|
||||
self.run_command('process-create --ppid aaaaaaaa '
|
||||
'--name process1 '
|
||||
'--nova_flavor_id yyyyyyyy '
|
||||
'--glance_image_id xxxxxxxx '
|
||||
'--keypair_id iiiiiiii '
|
||||
'--securitygroup_id jjjjjjjj '
|
||||
'--securitygroup_id kkkkkkkk '
|
||||
'--userdata %s '
|
||||
'--args key1=value1,key2=value2' % test_userdata)
|
||||
self.assert_called('POST', '/groups/11111111/processes')
|
||||
|
||||
def test_process_create_with_no_option(self):
|
||||
self.run_command('process-create')
|
||||
self.assert_called('POST', '/groups/11111111/processes')
|
||||
|
||||
def test_process_could_not_open_userdata_file(self):
|
||||
try:
|
||||
self.run_command('process-create --ppid aaaaaaaa '
|
||||
'--userdata not_exists.txt')
|
||||
except exceptions.CommandError as e:
|
||||
required = ".*?^Can't open 'not_exists.txt'"
|
||||
self.assertThat(e.message,
|
||||
matchers.MatchesRegex(required, re.DOTALL | re.MULTILINE))
|
||||
|
||||
def test_process_with_args_file(self):
|
||||
test_args = os.path.join(os.path.dirname(__file__), 'test_args.txt')
|
||||
self.run_command('process-create --ppid aaaaaaaa '
|
||||
'--args %s' % test_args)
|
||||
self.assert_called('POST', '/groups/11111111/processes')
|
||||
|
||||
@mock.patch('__builtin__.open', side_effect=IOError())
|
||||
def test_process_could_not_open_args_file(self, m_open):
|
||||
try:
|
||||
test_args = os.path.join(os.path.dirname(__file__), 'test_args.txt')
|
||||
self.run_command('process-create --ppid aaaaaaaa '
|
||||
'--args %s' % test_args)
|
||||
except exceptions.CommandError as e:
|
||||
required = ".*?^Can't open '.*?rackclient/tests/v1/test_args.txt'"
|
||||
self.assertThat(str(e.message),
|
||||
matchers.MatchesRegex(required, re.DOTALL | re.MULTILINE))
|
||||
|
||||
def test_process_invalid_args_file(self):
|
||||
invalid_args = os.path.join(os.path.dirname(__file__), 'test_invalid_args.txt')
|
||||
try:
|
||||
self.run_command('process-create --ppid aaaaaaaa '
|
||||
'--args %s' % invalid_args)
|
||||
except exceptions.CommandError as e:
|
||||
required = ('.*?rackclient/tests/v1/test_invalid_args.txt '
|
||||
'is not the format of key=value lines')
|
||||
self.assertThat(e.message,
|
||||
matchers.MatchesRegex(required, re.DOTALL | re.MULTILINE))
|
||||
|
||||
def test_process_invalid_args(self):
|
||||
try:
|
||||
self.run_command('process-create --ppid aaaaaaaa '
|
||||
'--args key1value1')
|
||||
except exceptions.CommandError as e:
|
||||
required = '.*?^\'key1value1\' is not in the format of key=value'
|
||||
self.assertThat(str(e.message),
|
||||
matchers.MatchesRegex(required, re.DOTALL | re.MULTILINE))
|
||||
|
||||
def test_process_update(self):
|
||||
self.run_command('process-update --app_status ACTIVE aaaaaaaa')
|
||||
self.assert_called('PUT', '/groups/11111111/processes/aaaaaaaa')
|
||||
|
||||
def test_process_delete(self):
|
||||
self.run_command('process-delete aaaaaaaa')
|
||||
self.assert_called('DELETE', '/groups/11111111/processes/aaaaaaaa')
|
||||
|
||||
def test_proxy_show(self):
|
||||
self.run_command('proxy-show')
|
||||
self.assert_called('GET', '/groups/11111111/proxy')
|
||||
|
||||
def test_proxy_create(self):
|
||||
test_userdata = os.path.join(os.path.dirname(__file__), 'test_userdata.txt')
|
||||
self.run_command('proxy-create '
|
||||
'--name proxy '
|
||||
'--nova_flavor_id yyyyyyyy '
|
||||
'--glance_image_id xxxxxxxx '
|
||||
'--keypair_id iiiiiiii '
|
||||
'--securitygroup_id jjjjjjjj '
|
||||
'--securitygroup_id kkkkkkkk '
|
||||
'--userdata %s '
|
||||
'--args key1=value1,key2=value2' % test_userdata)
|
||||
self.assert_called('POST', '/groups/11111111/proxy')
|
||||
|
||||
def test_proxy_create_with_no_option(self):
|
||||
self.run_command('proxy-create')
|
||||
self.assert_called('POST', '/groups/11111111/proxy')
|
||||
|
||||
def test_procexy_could_not_open_userdata_file(self):
|
||||
try:
|
||||
self.run_command('proxy-create '
|
||||
'--userdata not_exists.txt')
|
||||
except exceptions.CommandError as e:
|
||||
required = ".*?^Can't open 'not_exists.txt'"
|
||||
self.assertThat(e.message,
|
||||
matchers.MatchesRegex(required, re.DOTALL | re.MULTILINE))
|
||||
|
||||
def test_proxy_with_args_file(self):
|
||||
test_args = os.path.join(os.path.dirname(__file__), 'test_args.txt')
|
||||
self.run_command('proxy-create '
|
||||
'--args %s' % test_args)
|
||||
self.assert_called('POST', '/groups/11111111/proxy')
|
||||
|
||||
@mock.patch('__builtin__.open', side_effect=IOError())
|
||||
def test_proxy_could_not_open_args_file(self, m_open):
|
||||
try:
|
||||
test_args = os.path.join(os.path.dirname(__file__), 'test_args.txt')
|
||||
self.run_command('proxy-create '
|
||||
'--args %s' % test_args)
|
||||
except exceptions.CommandError as e:
|
||||
required = ".*?^Can't open '.*?rackclient/tests/v1/test_args.txt'"
|
||||
self.assertThat(str(e.message),
|
||||
matchers.MatchesRegex(required, re.DOTALL | re.MULTILINE))
|
||||
|
||||
def test_proxy_invalid_args_file(self):
|
||||
invalid_args = os.path.join(os.path.dirname(__file__), 'test_invalid_args.txt')
|
||||
try:
|
||||
self.run_command('proxy-create '
|
||||
'--args %s' % invalid_args)
|
||||
except exceptions.CommandError as e:
|
||||
required = ('.*?rackclient/tests/v1/test_invalid_args.txt '
|
||||
'is not the format of key=value lines')
|
||||
self.assertThat(e.message,
|
||||
matchers.MatchesRegex(required, re.DOTALL | re.MULTILINE))
|
||||
|
||||
def test_proxy_invalid_args(self):
|
||||
try:
|
||||
self.run_command('proxy-create '
|
||||
'--args key1value1')
|
||||
except exceptions.CommandError as e:
|
||||
required = '.*?^\'key1value1\' is not in the format of key=value'
|
||||
self.assertThat(str(e.message),
|
||||
matchers.MatchesRegex(required, re.DOTALL | re.MULTILINE))
|
||||
|
||||
def test_proxy_update(self):
|
||||
self.run_command('proxy-update --app_status ACTIVE '
|
||||
'--shm_endpoint shm_endpoint '
|
||||
'--ipc_endpoint ipc_endpoint '
|
||||
'--fs_endpoint fs_endpoint')
|
||||
self.assert_called('PUT', '/groups/11111111/proxy')
|
||||
|
||||
|
||||
class ShellGroupInitTest(BaseShellTest):
|
||||
|
||||
CONFIG = {
|
||||
'group': {
|
||||
'name': 'group1',
|
||||
'description': 'This is group1'
|
||||
},
|
||||
'keypair': {
|
||||
'name': 'keypair1',
|
||||
'is_default': 't'
|
||||
},
|
||||
'securitygroup': {
|
||||
'name': 'securitygroup1',
|
||||
'is_default': 't',
|
||||
'rules': 'protocol=tcp,port_range_max=80,'
|
||||
'port_range_min=80,remote_ip_prefix=10.0.0.0/24 '
|
||||
'protocol=icmp,remote_ip_prefix=10.0.0.0/24'
|
||||
},
|
||||
'network': {
|
||||
'cidr': '10.0.0.0/24',
|
||||
'name': 'network1',
|
||||
'is_admin': 't',
|
||||
'gateway_ip': '10.0.0.254',
|
||||
'dns_nameservers': '8.8.8.8 8.8.4.4',
|
||||
'ext_router_id': 'rrrrrrrr'
|
||||
},
|
||||
'proxy': {
|
||||
'name': 'proxy',
|
||||
'nova_flavor_id': 'yyyyyyyy',
|
||||
'glance_image_id': 'xxxxxxxx',
|
||||
'args': 'key1=value1,key2=value2'
|
||||
}
|
||||
}
|
||||
|
||||
def setUp(self):
|
||||
super(ShellGroupInitTest, self).setUp()
|
||||
self.config = copy.deepcopy(self.CONFIG)
|
||||
self.patcher = mock.patch('rackclient.v1.shell.ConfigParser')
|
||||
self.mock_config = self.patcher.start()
|
||||
|
||||
def tearDown(self):
|
||||
super(BaseShellTest, self).tearDown()
|
||||
self.patcher.stop()
|
||||
|
||||
def _fake_get(self, section, key):
|
||||
try:
|
||||
return self.config[section][key]
|
||||
except KeyError:
|
||||
raise NoOptionError(key, section)
|
||||
|
||||
def test_group_init(self):
|
||||
test_userdata = os.path.join(os.path.dirname(__file__),
|
||||
'test_userdata.txt')
|
||||
self.config['proxy']['userdata'] = test_userdata
|
||||
self.mock_config.return_value.get = self._fake_get
|
||||
self.run_command('group-init /path/to/group.conf')
|
||||
|
||||
def test_group_init_with_required(self):
|
||||
self.config['group'].pop('description')
|
||||
self.config['keypair'].pop('name')
|
||||
self.config['keypair'].pop('is_default')
|
||||
self.config['securitygroup'].pop('name')
|
||||
self.config['securitygroup'].pop('is_default')
|
||||
self.config['securitygroup'].pop('rules')
|
||||
self.config['network'].pop('name')
|
||||
self.config['network'].pop('is_admin')
|
||||
self.config['network'].pop('gateway_ip')
|
||||
self.config['network'].pop('dns_nameservers')
|
||||
self.config['network'].pop('ext_router_id')
|
||||
self.config['proxy'].pop('name')
|
||||
self.config['proxy'].pop('args')
|
||||
self.mock_config.return_value.get = self._fake_get
|
||||
self.run_command('group-init /path/to/group.conf')
|
||||
|
||||
def test_group_init_without_group_name(self):
|
||||
self.config['group'].pop('name')
|
||||
self.mock_config.return_value.get = self._fake_get
|
||||
|
||||
try:
|
||||
self.run_command('group-init /path/to/group.conf')
|
||||
except exceptions.CommandError as e:
|
||||
required = '.*?^Group name is required.'
|
||||
self.assertThat(
|
||||
str(e.message),
|
||||
matchers.MatchesRegex(required,
|
||||
re.DOTALL | re.MULTILINE))
|
||||
|
||||
def test_group_init_invalid_securitygroup_rules(self):
|
||||
self.config['securitygroup']['rules'] = 'invalid'
|
||||
self.mock_config.return_value.get = self._fake_get
|
||||
|
||||
try:
|
||||
self.run_command('group-init /path/to/group.conf')
|
||||
except exceptions.CommandError as e:
|
||||
required = ('.*?^Could not create a securitygroup: '
|
||||
'securitygroup rules are not valid formart: '
|
||||
'\'.*\' is not in the format of key=value')
|
||||
self.assertThat(
|
||||
str(e.message),
|
||||
matchers.MatchesRegex(required,
|
||||
re.DOTALL | re.MULTILINE))
|
||||
|
||||
def test_group_init_without_network_cidr(self):
|
||||
self.config['network'].pop('cidr')
|
||||
self.mock_config.return_value.get = self._fake_get
|
||||
|
||||
try:
|
||||
self.run_command('group-init /path/to/group.conf')
|
||||
except exceptions.CommandError as e:
|
||||
required = '.*?^Network cidr is required.'
|
||||
self.assertThat(
|
||||
str(e.message),
|
||||
matchers.MatchesRegex(required,
|
||||
re.DOTALL | re.MULTILINE))
|
||||
|
||||
def test_group_init_without_proxy_nova_flavor_id(self):
|
||||
self.config['proxy'].pop('nova_flavor_id')
|
||||
self.mock_config.return_value.get = self._fake_get
|
||||
|
||||
try:
|
||||
self.run_command('group-init /path/to/group.conf')
|
||||
except exceptions.CommandError as e:
|
||||
required = '.*?^Flavor id is required.'
|
||||
self.assertThat(
|
||||
str(e.message),
|
||||
matchers.MatchesRegex(required,
|
||||
re.DOTALL | re.MULTILINE))
|
||||
|
||||
def test_group_init_without_proxy_glance_image_id(self):
|
||||
self.config['proxy'].pop('glance_image_id')
|
||||
self.mock_config.return_value.get = self._fake_get
|
||||
|
||||
try:
|
||||
self.run_command('group-init /path/to/group.conf')
|
||||
except exceptions.CommandError as e:
|
||||
required = '.*?^Image id is required.'
|
||||
self.assertThat(
|
||||
str(e.message),
|
||||
matchers.MatchesRegex(required,
|
||||
re.DOTALL | re.MULTILINE))
|
||||
|
||||
@mock.patch('__builtin__.open', side_effect=IOError())
|
||||
def test_group_init_could_not_open_userdata(self, m_open):
|
||||
self.config['proxy']['userdata'] = 'fake_userdata.txt'
|
||||
self.mock_config.return_value.get = self._fake_get
|
||||
|
||||
try:
|
||||
self.run_command('group-init /path/to/group.conf')
|
||||
except exceptions.CommandError as e:
|
||||
required = '.*?^Can\'t open fake_userdata.txt.'
|
||||
self.assertThat(
|
||||
str(e.message),
|
||||
matchers.MatchesRegex(required,
|
||||
re.DOTALL | re.MULTILINE))
|
||||
|
||||
def test_group_init_invalid_args(self):
|
||||
self.config['proxy']['args'] = 'key1:value1'
|
||||
self.mock_config.return_value.get = self._fake_get
|
||||
|
||||
try:
|
||||
self.run_command('group-init /path/to/group.conf')
|
||||
except exceptions.CommandError as e:
|
||||
required = ('.*?^\'key1:value1\' is not '
|
||||
'in the format of key=value')
|
||||
self.assertThat(
|
||||
str(e.message),
|
||||
matchers.MatchesRegex(required,
|
||||
re.DOTALL | re.MULTILINE))
|
|
@ -0,0 +1,2 @@
|
|||
#!/bin/sh
|
||||
echo Hello
|
|
@ -21,7 +21,21 @@ from rackclient.v1.securitygroups import SecuritygroupManager
|
|||
|
||||
|
||||
class Client(object):
|
||||
"""
|
||||
Top-level Object to access the rack API.
|
||||
|
||||
Create an rackclient instance::
|
||||
|
||||
>>> from rackclient.v1 import client
|
||||
>>> client = client.Client()
|
||||
|
||||
Then call methods on its managers::
|
||||
|
||||
>>> client.processes.list()
|
||||
...
|
||||
>>> client.groups.list()
|
||||
...
|
||||
"""
|
||||
def __init__(self, rack_url=None, http_log_debug=False):
|
||||
self.rack_url = rack_url
|
||||
self.http_log_debug = http_log_debug
|
||||
|
|
|
@ -25,9 +25,20 @@ class GroupManager(base.Manager):
|
|||
resource_class = Group
|
||||
|
||||
def list(self):
|
||||
"""
|
||||
Get a list of all groups.
|
||||
|
||||
:rtype: list of Group.
|
||||
"""
|
||||
return self._list("/groups", "groups")
|
||||
|
||||
def get(self, gid):
|
||||
"""
|
||||
Get a group.
|
||||
|
||||
:param gid: ID of group to get.
|
||||
:rtype: Group.
|
||||
"""
|
||||
return self._get("/groups/%s" % gid, "group")
|
||||
|
||||
def _build_body(self, name, description=None):
|
||||
|
@ -39,12 +50,30 @@ class GroupManager(base.Manager):
|
|||
}
|
||||
|
||||
def create(self, name, description=None):
|
||||
"""
|
||||
Create a group.
|
||||
|
||||
:param name: Name of the group.
|
||||
:param description: Descritpion of the group.
|
||||
"""
|
||||
body = self._build_body(name, description)
|
||||
return self._create("/groups", body, "group")
|
||||
|
||||
def update(self, gid, name, description=None):
|
||||
"""
|
||||
Update the name or the description of the group.
|
||||
|
||||
:param gid: ID of the group.
|
||||
:param name: Name of the group to update.
|
||||
:param description: Description of the group to update.
|
||||
"""
|
||||
body = self._build_body(name, description)
|
||||
return self._update("/groups/%s" % gid, body, "group")
|
||||
|
||||
def delete(self, gid):
|
||||
self._delete("/groups/%s" % gid)
|
||||
"""
|
||||
Delete a group.
|
||||
|
||||
:param gid: ID of the group to delete.
|
||||
"""
|
||||
self._delete("/groups/%s" % gid)
|
||||
|
|
|
@ -27,12 +27,32 @@ class KeypairManager(base.Manager):
|
|||
resource_class = Keypair
|
||||
|
||||
def list(self, gid):
|
||||
"""
|
||||
Get a list of all keypairs in the specified group.
|
||||
|
||||
:param gid: ID of the group.
|
||||
:rtype: list of Keypair.
|
||||
"""
|
||||
return self._list("/groups/%s/keypairs" % gid, "keypairs")
|
||||
|
||||
def get(self, gid, keypair_id):
|
||||
"""
|
||||
Get a keypair.
|
||||
|
||||
:param gid: ID of the group.
|
||||
:param keypair_id: ID of the keypair to get.
|
||||
:rtype: Keypair.
|
||||
"""
|
||||
return self._get("/groups/%s/keypairs/%s" % (gid, keypair_id), "keypair")
|
||||
|
||||
def create(self, gid, name=None, is_default=False):
|
||||
"""
|
||||
Create a keypair.
|
||||
|
||||
:param gid: ID of the group.
|
||||
:param name: Name of the keypair.
|
||||
:param is_default: Set to the default keypair of the group.
|
||||
"""
|
||||
try:
|
||||
is_default = strutils.bool_from_string(is_default, True)
|
||||
except Exception:
|
||||
|
@ -47,6 +67,13 @@ class KeypairManager(base.Manager):
|
|||
return self._create("/groups/%s/keypairs" % gid, body, "keypair")
|
||||
|
||||
def update(self, gid, keypair_id, is_default):
|
||||
"""
|
||||
Update the status of keypair.
|
||||
|
||||
:param gid: ID of the group.
|
||||
:param keypair_id: ID of the keypair to update.
|
||||
:param is_default: Set to the default keypair of the group.
|
||||
"""
|
||||
try:
|
||||
is_default = strutils.bool_from_string(is_default, True)
|
||||
except Exception:
|
||||
|
@ -60,4 +87,10 @@ class KeypairManager(base.Manager):
|
|||
return self._update("/groups/%s/keypairs/%s" % (gid, keypair_id), body, "keypair")
|
||||
|
||||
def delete(self, gid, keypair_id):
|
||||
self._delete("/groups/%s/keypairs/%s" % (gid, keypair_id))
|
||||
"""
|
||||
Delete a keypair.
|
||||
|
||||
:param gid: ID of the group.
|
||||
:param keypair_id: ID of the keypair to delete.
|
||||
"""
|
||||
self._delete("/groups/%s/keypairs/%s" % (gid, keypair_id))
|
||||
|
|
|
@ -28,12 +28,36 @@ class NetworkManager(base.Manager):
|
|||
resource_class = Network
|
||||
|
||||
def list(self, gid):
|
||||
"""
|
||||
Get a list of all networks in the specified group.
|
||||
|
||||
:param gid: ID of the group.
|
||||
:rtype: list of Network.
|
||||
"""
|
||||
return self._list("/groups/%s/networks" % gid, "networks")
|
||||
|
||||
def get(self, gid, network_id):
|
||||
"""
|
||||
Get a network.
|
||||
|
||||
:param gid: ID of the group.
|
||||
:param network_id: ID of the network to get.
|
||||
:rtype: Network.
|
||||
"""
|
||||
return self._get("/groups/%s/networks/%s" % (gid, network_id), "network")
|
||||
|
||||
def create(self, gid, cidr, name=None, is_admin=False, gateway=None, dns_nameservers=None, ext_router_id=None):
|
||||
"""
|
||||
Create a network.
|
||||
|
||||
:param gid: ID of the group.
|
||||
:param cidr: CIDR of the new network.
|
||||
:param name: Name of the new network.
|
||||
:param is_admin: is_admin.
|
||||
:param gateway: Gateway ip address of the new network.
|
||||
:param list dns_nameservers: List of DNS servers for the new network.
|
||||
:param ext_router_id: Router id the new network connect to.
|
||||
"""
|
||||
def _is_valid_cidr(address):
|
||||
try:
|
||||
netaddr.IPNetwork(address)
|
||||
|
@ -75,4 +99,10 @@ class NetworkManager(base.Manager):
|
|||
return self._create("/groups/%s/networks" % gid, body, "network")
|
||||
|
||||
def delete(self, gid, network_id):
|
||||
self._delete("/groups/%s/networks/%s" % (gid, network_id))
|
||||
"""
|
||||
Delete a network.
|
||||
|
||||
:param gid: ID of the group.
|
||||
:param network_id: ID of the network to delete.
|
||||
"""
|
||||
self._delete("/groups/%s/networks/%s" % (gid, network_id))
|
||||
|
|
|
@ -27,9 +27,22 @@ class ProcessManager(base.Manager):
|
|||
resource_class = Process
|
||||
|
||||
def list(self, gid):
|
||||
"""
|
||||
Get a list of all processes in the specified group.
|
||||
|
||||
:param gid: ID of the group.
|
||||
:rtype: list of Process.
|
||||
"""
|
||||
return self._list("/groups/%s/processes" % gid, "processes")
|
||||
|
||||
def get(self, gid, pid):
|
||||
"""
|
||||
Get a server.
|
||||
|
||||
:param gid: ID of the group.
|
||||
:param pid: ID of the process to get.
|
||||
:rtype: Process.
|
||||
"""
|
||||
return self._get("/groups/%s/processes/%s" % (gid, pid), "process")
|
||||
|
||||
def create(self, gid, ppid=None, **kwargs):
|
||||
|
@ -42,13 +55,13 @@ class ProcessManager(base.Manager):
|
|||
|
||||
Parameters in kwargs:
|
||||
|
||||
:param name: string
|
||||
:param nova_flavor_id: string
|
||||
:param glance_image_id: string
|
||||
:param keypair_id: string
|
||||
:param securitygroup_ids: a list of strings
|
||||
:param userdata: file type object or string
|
||||
:param dict args: a dict of key-value pairs to be stored as metadata
|
||||
:param name: Name of the new process
|
||||
:param nova_flavor_id: ID of a flavor
|
||||
:param glance_image_id: ID of a glance image
|
||||
:param keypair_id: ID of a keypair
|
||||
:param list securitygroup_ids: List of IDs of securitygroups
|
||||
:param userdata: file type object or string of script
|
||||
:param dict args: Dict of key-value pairs to be stored as metadata
|
||||
'''
|
||||
|
||||
securitygroup_ids = kwargs.get('securitygroup_ids')
|
||||
|
@ -80,6 +93,13 @@ class ProcessManager(base.Manager):
|
|||
return self._create("/groups/%s/processes" % gid, body, "process")
|
||||
|
||||
def update(self, gid, pid, app_status):
|
||||
"""
|
||||
Update status of process.
|
||||
|
||||
:param gid: ID of the group.
|
||||
:param pid: ID of the process.
|
||||
:param app_status: Application layer status of the process.
|
||||
"""
|
||||
body = {
|
||||
"process": {
|
||||
"app_status": app_status
|
||||
|
@ -88,4 +108,10 @@ class ProcessManager(base.Manager):
|
|||
return self._update("/groups/%s/processes/%s" % (gid, pid), body, "process")
|
||||
|
||||
def delete(self, gid, pid):
|
||||
self._delete("/groups/%s/processes/%s" % (gid, pid))
|
||||
"""
|
||||
Delete a process.
|
||||
|
||||
:param gid: ID of the group.
|
||||
:param pid: ID of the process to delete.
|
||||
"""
|
||||
self._delete("/groups/%s/processes/%s" % (gid, pid))
|
||||
|
|
|
@ -27,21 +27,27 @@ class ProxyManager(base.Manager):
|
|||
resource_class = Proxy
|
||||
|
||||
def get(self, gid):
|
||||
"""
|
||||
Get a rack-proxy process information.
|
||||
|
||||
:param gid: ID of the group.
|
||||
:rtype: Process
|
||||
"""
|
||||
return self._get("/groups/%s/proxy" % (gid), "proxy")
|
||||
|
||||
def create(self, gid, name=None, nova_flavor_id=None, glance_image_id=None, keypair_id=None,
|
||||
securitygroup_ids=None, userdata=None, args=None):
|
||||
"""
|
||||
Create a RACK proxy.
|
||||
Create a rack-proxy process.
|
||||
|
||||
:param gid: string
|
||||
:param name: string
|
||||
:param nova_flavor_id: string
|
||||
:param glance_image_id: string
|
||||
:param keypair_id: string
|
||||
:param securitygroup_ids: a list of strings
|
||||
:param userdata: file type object or string
|
||||
:param dict args: a dict of key-value pairs to be stored as metadata
|
||||
:param gid: ID of a group
|
||||
:param name: Name of the rack-proxy process
|
||||
:param nova_flavor_id: ID of a flavor
|
||||
:param glance_image_id: ID of a glance image
|
||||
:param keypair_id: ID of a keypair
|
||||
:param securitygroup_ids: List of IDs of securitygroups
|
||||
:param userdata: file type object or string of script
|
||||
:param dict args: Dict of key-value pairs to be stored as metadata
|
||||
"""
|
||||
|
||||
if securitygroup_ids is not None and not isinstance(securitygroup_ids, list):
|
||||
|
@ -70,13 +76,13 @@ class ProxyManager(base.Manager):
|
|||
|
||||
def update(self, gid, shm_endpoint=None, ipc_endpoint=None, fs_endpoint=None, app_status=None):
|
||||
"""
|
||||
Update parameters of a RACK proxy.
|
||||
Update parameters of a rack-proxy process.
|
||||
|
||||
:param gid: string
|
||||
:param shm_endpoint: A endpoint of Shared memory. Arbitrary string value.
|
||||
:param ipc_endpoint: A endpoint of IPC. Arbitrary string value.
|
||||
:param fs_endpoint: A endpoint of File System. Arbitrary string value.
|
||||
:param app_status: An application layer status of a RACK proxy, assuming 'ACTIVE' or 'ERROR'.
|
||||
:param gid: ID of a group
|
||||
:param shm_endpoint: An endpoint of Shared memory. Arbitrary string value.
|
||||
:param ipc_endpoint: An endpoint of IPC. Arbitrary string value.
|
||||
:param fs_endpoint: An endpoint of File System. Arbitrary string value.
|
||||
:param app_status: Application layer status of a rack-proxy process.
|
||||
"""
|
||||
|
||||
body = {
|
||||
|
|
|
@ -27,12 +27,33 @@ class SecuritygroupManager(base.Manager):
|
|||
resource_class = Securitygroup
|
||||
|
||||
def list(self, gid):
|
||||
"""
|
||||
Get a list of all securitygroups in the specified group.
|
||||
|
||||
:param gid: ID of the group.
|
||||
:rtype: list of Securitygroup.
|
||||
"""
|
||||
return self._list("/groups/%s/securitygroups" % gid, "securitygroups")
|
||||
|
||||
def get(self, gid, securitygroup_id):
|
||||
"""
|
||||
Get a securitygroup.
|
||||
|
||||
:param gid: ID of the group.
|
||||
:param securitygroup_id: ID of the securitygroup to get.
|
||||
:rtype: Securitygroup.
|
||||
"""
|
||||
return self._get("/groups/%s/securitygroups/%s" % (gid, securitygroup_id), "securitygroup")
|
||||
|
||||
def create(self, gid, name=None, is_default=False, securitygroup_rules=None):
|
||||
"""
|
||||
Create a securitygroup.
|
||||
|
||||
:param gid: ID of the group.
|
||||
:param name: Name of the securitygroup.
|
||||
:param is_default: Set to the default securitygroup of the group.
|
||||
:param list securitygroup_rules: List of rules of the securitygroup.
|
||||
"""
|
||||
try:
|
||||
is_default = strutils.bool_from_string(is_default, True)
|
||||
except Exception:
|
||||
|
@ -52,6 +73,13 @@ class SecuritygroupManager(base.Manager):
|
|||
return self._create("/groups/%s/securitygroups" % gid, body, "securitygroup")
|
||||
|
||||
def update(self, gid, securitygroup_id, is_default=False):
|
||||
"""
|
||||
Update status of securitygroup.
|
||||
|
||||
:param gid: ID of the group.
|
||||
:param securitygroup_id: ID of the securitygroup to update.
|
||||
:param is_default: Set to the default securitygroup of the group.
|
||||
"""
|
||||
try:
|
||||
is_default = strutils.bool_from_string(is_default, True)
|
||||
except Exception:
|
||||
|
@ -65,4 +93,10 @@ class SecuritygroupManager(base.Manager):
|
|||
return self._update("/groups/%s/securitygroups/%s" % (gid, securitygroup_id), body, "securitygroup")
|
||||
|
||||
def delete(self, gid, securitygroup_id):
|
||||
self._delete("/groups/%s/securitygroups/%s" % (gid, securitygroup_id))
|
||||
"""
|
||||
Delete a securitygroup.
|
||||
|
||||
:param gid: ID of the group.
|
||||
:param securitygroup_id: ID of the securitygroup to delete.
|
||||
"""
|
||||
self._delete("/groups/%s/securitygroups/%s" % (gid, securitygroup_id))
|
||||
|
|
|
@ -1,11 +1,27 @@
|
|||
from ConfigParser import ConfigParser, NoOptionError
|
||||
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from ConfigParser import ConfigParser
|
||||
from ConfigParser import NoOptionError
|
||||
|
||||
import argparse
|
||||
import os
|
||||
from oslo.utils import strutils
|
||||
import prettytable
|
||||
|
||||
from oslo.utils import strutils
|
||||
from rackclient import exceptions
|
||||
from rackclient.openstack.common import cliutils
|
||||
from rackclient.openstack.common.gettextutils import _
|
||||
from rackclient import exceptions
|
||||
from rackclient.v1.syscall.default import signal
|
||||
from rackclient.v1.syscall.default import file as rackfile
|
||||
|
||||
|
@ -24,6 +40,9 @@ def _keyvalue_to_dict(text):
|
|||
|
||||
|
||||
def do_group_list(cs, args):
|
||||
"""
|
||||
Print a list of all groups.
|
||||
"""
|
||||
groups = cs.groups.list()
|
||||
fields = ['gid', 'name', 'description', 'status']
|
||||
print_list(groups, fields, sortby='gid')
|
||||
|
@ -34,6 +53,9 @@ def do_group_list(cs, args):
|
|||
metavar='<gid>',
|
||||
help=_("Group id"))
|
||||
def do_group_show(cs, args):
|
||||
"""
|
||||
Show details about the given group.
|
||||
"""
|
||||
group = cs.groups.get(args.gid)
|
||||
keypairs = cs.keypairs.list(args.gid)
|
||||
securitygroups = cs.securitygroups.list(args.gid)
|
||||
|
@ -63,6 +85,9 @@ def do_group_show(cs, args):
|
|||
metavar='<description>',
|
||||
help=_("Details of the new group"))
|
||||
def do_group_create(cs, args):
|
||||
"""
|
||||
Create a new group.
|
||||
"""
|
||||
group = cs.groups.create(args.name, args.description)
|
||||
d = group._info
|
||||
print_dict(d)
|
||||
|
@ -81,6 +106,9 @@ def do_group_create(cs, args):
|
|||
metavar='<description>',
|
||||
help=_("Details of the group"))
|
||||
def do_group_update(cs, args):
|
||||
"""
|
||||
Update the specified group.
|
||||
"""
|
||||
group = cs.groups.update(args.gid, args.name, args.description)
|
||||
d = group._info
|
||||
print_dict(d)
|
||||
|
@ -91,10 +119,16 @@ def do_group_update(cs, args):
|
|||
metavar='<gid>',
|
||||
help=_("Group id"))
|
||||
def do_group_delete(cs, args):
|
||||
"""
|
||||
Delete the specified group.
|
||||
"""
|
||||
cs.groups.delete(args.gid)
|
||||
|
||||
|
||||
def do_keypair_list(cs, args):
|
||||
"""
|
||||
Print a list of all keypairs in the specified group.
|
||||
"""
|
||||
keypairs = cs.keypairs.list(args.gid)
|
||||
fields = ['keypair_id', 'name', 'is_default', 'status']
|
||||
print_list(keypairs, fields, sortby='keypair_id')
|
||||
|
@ -105,6 +139,9 @@ def do_keypair_list(cs, args):
|
|||
metavar='<keypair_id>',
|
||||
help=_("Keypair ID"))
|
||||
def do_keypair_show(cs, args):
|
||||
"""
|
||||
Show details about the given keypair.
|
||||
"""
|
||||
keypair = cs.keypairs.get(args.gid, args.keypair_id)
|
||||
d = keypair._info
|
||||
print_dict(d)
|
||||
|
@ -121,6 +158,9 @@ def do_keypair_show(cs, args):
|
|||
type=lambda v: strutils.bool_from_string(v, True),
|
||||
default=False)
|
||||
def do_keypair_create(cs, args):
|
||||
"""
|
||||
Create a new keypair.
|
||||
"""
|
||||
keypair = cs.keypairs.create(args.gid, args.name, args.is_default)
|
||||
d = keypair._info
|
||||
print_dict(d)
|
||||
|
@ -137,6 +177,9 @@ def do_keypair_create(cs, args):
|
|||
type=lambda v: strutils.bool_from_string(v, True),
|
||||
default=True)
|
||||
def do_keypair_update(cs, args):
|
||||
"""
|
||||
Update the specified keypair.
|
||||
"""
|
||||
keypair = cs.keypairs.update(args.gid, args.keypair_id, args.is_default)
|
||||
d = keypair._info
|
||||
print_dict(d)
|
||||
|
@ -147,10 +190,16 @@ def do_keypair_update(cs, args):
|
|||
metavar='<keypair_id>',
|
||||
help=_("Keypair id"))
|
||||
def do_keypair_delete(cs, args):
|
||||
"""
|
||||
Delete the specified keypair.
|
||||
"""
|
||||
cs.keypairs.delete(args.gid, args.keypair_id)
|
||||
|
||||
|
||||
def do_securitygroup_list(cs, args):
|
||||
"""
|
||||
Print a list of all security groups in the specified group.
|
||||
"""
|
||||
securitygroups = cs.securitygroups.list(args.gid)
|
||||
fields = [
|
||||
'securitygroup_id', 'name', 'is_default', 'status'
|
||||
|
@ -163,6 +212,9 @@ def do_securitygroup_list(cs, args):
|
|||
metavar='<securitygroup_id>',
|
||||
help=_("Securitygroup id"))
|
||||
def do_securitygroup_show(cs, args):
|
||||
"""
|
||||
Show details about the given security group.
|
||||
"""
|
||||
securitygroup = cs.securitygroups.get(args.gid, args.securitygroup_id)
|
||||
d = securitygroup._info
|
||||
print_dict(d)
|
||||
|
@ -180,8 +232,9 @@ def do_securitygroup_show(cs, args):
|
|||
default=False)
|
||||
@cliutils.arg(
|
||||
'--rule',
|
||||
metavar="<protocol=tcp|udp|icmp,port_range_max=integer,port_range_min=integer,"
|
||||
"remote_ip_prefix=cidr,remote_securitygroup_id=securitygroup_uuid>",
|
||||
metavar="<protocol=tcp|udp|icmp,port_range_max=integer,"
|
||||
"port_range_min=integer,remote_ip_prefix=cidr,"
|
||||
"remote_securitygroup_id=securitygroup_uuid>",
|
||||
action='append',
|
||||
type=_keyvalue_to_dict,
|
||||
dest='rules',
|
||||
|
@ -194,7 +247,13 @@ def do_securitygroup_show(cs, args):
|
|||
"remote_securitygroup_id: Remote securitygroup id to apply rule. "
|
||||
"(Can be repeated)"))
|
||||
def do_securitygroup_create(cs, args):
|
||||
securitygroup = cs.securitygroups.create(args.gid, args.name, args.is_default, args.rules)
|
||||
"""
|
||||
Create a new security group.
|
||||
"""
|
||||
securitygroup = cs.securitygroups.create(args.gid,
|
||||
args.name,
|
||||
args.is_default,
|
||||
args.rules)
|
||||
d = securitygroup._info
|
||||
print_dict(d)
|
||||
|
||||
|
@ -210,7 +269,12 @@ def do_securitygroup_create(cs, args):
|
|||
type=lambda v: strutils.bool_from_string(v, True),
|
||||
default=True)
|
||||
def do_securitygroup_update(cs, args):
|
||||
securitygroup = cs.securitygroups.update(args.gid, args.securitygroup_id, args.is_default)
|
||||
"""
|
||||
Update the specified security group.
|
||||
"""
|
||||
securitygroup = cs.securitygroups.update(args.gid,
|
||||
args.securitygroup_id,
|
||||
args.is_default)
|
||||
d = securitygroup._info
|
||||
print_dict(d)
|
||||
|
||||
|
@ -220,10 +284,16 @@ def do_securitygroup_update(cs, args):
|
|||
metavar='<securitygroup_id>',
|
||||
help=_("Securitygroup id"))
|
||||
def do_securitygroup_delete(cs, args):
|
||||
"""
|
||||
Delete the specified security group.
|
||||
"""
|
||||
cs.securitygroups.delete(args.gid, args.securitygroup_id)
|
||||
|
||||
|
||||
def do_network_list(cs, args):
|
||||
"""
|
||||
Print a list of all networks in the specified group.
|
||||
"""
|
||||
networks = cs.networks.list(args.gid)
|
||||
fields = [
|
||||
'network_id', 'name', 'is_admin', 'status'
|
||||
|
@ -236,6 +306,9 @@ def do_network_list(cs, args):
|
|||
metavar='<network_id>',
|
||||
help=_("network id"))
|
||||
def do_network_show(cs, args):
|
||||
"""
|
||||
Show details about the given network.
|
||||
"""
|
||||
network = cs.networks.get(args.gid, args.network_id)
|
||||
d = network._info
|
||||
print_dict(d)
|
||||
|
@ -270,8 +343,16 @@ def do_network_show(cs, args):
|
|||
metavar='<ext_router_id>',
|
||||
help=_("Router id the new network connects to"))
|
||||
def do_network_create(cs, args):
|
||||
network = cs.networks.create(args.gid, args.cidr, args.name, args.is_admin,
|
||||
args.gateway_ip, args.dns_nameservers, args.ext_router_id)
|
||||
"""
|
||||
Create a network.
|
||||
"""
|
||||
network = cs.networks.create(args.gid,
|
||||
args.cidr,
|
||||
args.name,
|
||||
args.is_admin,
|
||||
args.gateway_ip,
|
||||
args.dns_nameservers,
|
||||
args.ext_router_id)
|
||||
d = network._info
|
||||
print_dict(d)
|
||||
|
||||
|
@ -281,10 +362,16 @@ def do_network_create(cs, args):
|
|||
metavar='<network_id>',
|
||||
help=_("network id"))
|
||||
def do_network_delete(cs, args):
|
||||
"""
|
||||
Delete the specified network.
|
||||
"""
|
||||
cs.networks.delete(args.gid, args.network_id)
|
||||
|
||||
|
||||
def do_process_list(cs, args):
|
||||
"""
|
||||
Print a list of all processes in the specified group.
|
||||
"""
|
||||
processes = cs.processes.list(args.gid)
|
||||
fields = [
|
||||
'pid', 'ppid', 'name', 'status'
|
||||
|
@ -297,6 +384,9 @@ def do_process_list(cs, args):
|
|||
metavar='<pid>',
|
||||
help=_("process ID"))
|
||||
def do_process_show(cs, args):
|
||||
"""
|
||||
Show details about the given process.
|
||||
"""
|
||||
process = cs.processes.get(args.gid, args.pid)
|
||||
d = process._info
|
||||
print_process(d)
|
||||
|
@ -338,13 +428,15 @@ def do_process_show(cs, args):
|
|||
metavar='<key1=value1,key2=value2 or a file including key=value lines>',
|
||||
help=_("Key-value pairs to be passed to metadata server"))
|
||||
def do_process_create(cs, args):
|
||||
"""
|
||||
Create a new process.
|
||||
"""
|
||||
if args.userdata:
|
||||
try:
|
||||
userdata = open(args.userdata)
|
||||
except IOError as e:
|
||||
raise exceptions.CommandError(
|
||||
_("Can't open '%(userdata)s': %(exc)s") %
|
||||
{'userdata': args.userdata, 'exc': e})
|
||||
_("Can't open '%s'") % args.userdata)
|
||||
else:
|
||||
userdata = None
|
||||
|
||||
|
@ -352,19 +444,16 @@ def do_process_create(cs, args):
|
|||
if os.path.exists(args.args):
|
||||
try:
|
||||
f = open(args.args)
|
||||
except IOError as e:
|
||||
raise exceptions.CommandError(
|
||||
_("Can't open '%(args)s': %(exc)s") %
|
||||
{'args': args.args, 'exc': e})
|
||||
options = {}
|
||||
for line in f:
|
||||
try:
|
||||
options = {}
|
||||
for line in f:
|
||||
k, v = line.split('=', 1)
|
||||
options.update({k.strip(): v.strip()})
|
||||
except ValueError:
|
||||
raise exceptions.CommandError(
|
||||
_("%(args)s is not the format of key=value lines")
|
||||
)
|
||||
except IOError as e:
|
||||
raise exceptions.CommandError(
|
||||
_("Can't open '%s'") % args.args)
|
||||
except ValueError:
|
||||
raise exceptions.CommandError(
|
||||
_("%s is not the format of key=value lines") % args.args)
|
||||
else:
|
||||
try:
|
||||
options = _keyvalue_to_dict(args.args)
|
||||
|
@ -373,9 +462,14 @@ def do_process_create(cs, args):
|
|||
else:
|
||||
options = None
|
||||
|
||||
process = cs.processes.create(args.gid, ppid=args.ppid, name=args.name, nova_flavor_id=args.nova_flavor_id,
|
||||
glance_image_id=args.glance_image_id, keypair_id=args.keypair_id,
|
||||
securitygroup_ids=args.securitygroup_ids, userdata=userdata,
|
||||
process = cs.processes.create(args.gid,
|
||||
ppid=args.ppid,
|
||||
name=args.name,
|
||||
nova_flavor_id=args.nova_flavor_id,
|
||||
glance_image_id=args.glance_image_id,
|
||||
keypair_id=args.keypair_id,
|
||||
securitygroup_ids=args.securitygroup_ids,
|
||||
userdata=userdata,
|
||||
args=options)
|
||||
d = process._info
|
||||
print_process(d)
|
||||
|
@ -390,6 +484,9 @@ def do_process_create(cs, args):
|
|||
metavar='<app_status>',
|
||||
help=_("Application layer status of the process"))
|
||||
def do_process_update(cs, args):
|
||||
"""
|
||||
Update the specified process.
|
||||
"""
|
||||
process = cs.processes.update(args.gid, args.pid, args.app_status)
|
||||
d = process._info
|
||||
print_process(d)
|
||||
|
@ -400,10 +497,16 @@ def do_process_update(cs, args):
|
|||
metavar='<pid>',
|
||||
help=_("Process id"))
|
||||
def do_process_delete(cs, args):
|
||||
"""
|
||||
Delete the specified process.
|
||||
"""
|
||||
cs.processes.delete(args.gid, args.pid)
|
||||
|
||||
|
||||
def do_proxy_show(cs, args):
|
||||
"""
|
||||
Show details about the given rack-proxy process.
|
||||
"""
|
||||
proxy = cs.proxy.get(args.gid)
|
||||
d = proxy._info
|
||||
print_process(d)
|
||||
|
@ -441,13 +544,15 @@ def do_proxy_show(cs, args):
|
|||
metavar='<key1=value1,key2=value2 or a file including key=value lines>',
|
||||
help=_("Key-value pairs to be passed to metadata server"))
|
||||
def do_proxy_create(cs, args):
|
||||
"""
|
||||
Create a new rack-proxy process.
|
||||
"""
|
||||
if args.userdata:
|
||||
try:
|
||||
userdata = open(args.userdata)
|
||||
except IOError as e:
|
||||
raise exceptions.CommandError(
|
||||
_("Can't open '%(userdata)s': %(exc)s") %
|
||||
{'userdata': args.userdata, 'exc': e})
|
||||
_("Can't open '%s'") % args.userdata)
|
||||
else:
|
||||
userdata = None
|
||||
|
||||
|
@ -455,19 +560,16 @@ def do_proxy_create(cs, args):
|
|||
if os.path.exists(args.args):
|
||||
try:
|
||||
f = open(args.args)
|
||||
except IOError as e:
|
||||
raise exceptions.CommandError(
|
||||
_("Can't open '%(args)s': %(exc)s") %
|
||||
{'args': args.args, 'exc': e})
|
||||
options = {}
|
||||
for line in f:
|
||||
try:
|
||||
options = {}
|
||||
for line in f:
|
||||
k, v = line.split('=', 1)
|
||||
options.update({k.strip(): v.strip()})
|
||||
except ValueError:
|
||||
raise exceptions.CommandError(
|
||||
_("%(args)s is not the format of key=value lines")
|
||||
)
|
||||
except IOError as e:
|
||||
raise exceptions.CommandError(
|
||||
_("Can't open '%s'") % args.args)
|
||||
except ValueError:
|
||||
raise exceptions.CommandError(
|
||||
_("%s is not the format of key=value lines") % args.args)
|
||||
else:
|
||||
try:
|
||||
options = _keyvalue_to_dict(args.args)
|
||||
|
@ -476,9 +578,13 @@ def do_proxy_create(cs, args):
|
|||
else:
|
||||
options = None
|
||||
|
||||
proxy = cs.proxy.create(args.gid, name=args.name, nova_flavor_id=args.nova_flavor_id,
|
||||
glance_image_id=args.glance_image_id, keypair_id=args.keypair_id,
|
||||
securitygroup_ids=args.securitygroup_ids, userdata=userdata,
|
||||
proxy = cs.proxy.create(args.gid,
|
||||
name=args.name,
|
||||
nova_flavor_id=args.nova_flavor_id,
|
||||
glance_image_id=args.glance_image_id,
|
||||
keypair_id=args.keypair_id,
|
||||
securitygroup_ids=args.securitygroup_ids,
|
||||
userdata=userdata,
|
||||
args=options)
|
||||
d = proxy._info
|
||||
print_process(d)
|
||||
|
@ -501,7 +607,14 @@ def do_proxy_create(cs, args):
|
|||
metavar='<app_status>',
|
||||
help=_("Application layer status of the proxy"))
|
||||
def do_proxy_update(cs, args):
|
||||
proxy = cs.proxy.update(args.gid, args.shm_endpoint, args.ipc_endpoint, args.fs_endpoint, args.app_status)
|
||||
"""
|
||||
Update the specified rack-proxy process.
|
||||
"""
|
||||
proxy = cs.proxy.update(args.gid,
|
||||
args.shm_endpoint,
|
||||
args.ipc_endpoint,
|
||||
args.fs_endpoint,
|
||||
args.app_status)
|
||||
d = proxy._info
|
||||
print_process(d)
|
||||
|
||||
|
@ -549,6 +662,10 @@ def print_dict(d):
|
|||
metavar='<config-file>',
|
||||
help=_("Configuration file included parameters of the new group"))
|
||||
def do_group_init(cs, args):
|
||||
"""
|
||||
Create a group, a keypair, a security group, a network and
|
||||
a rack-proxy based on the specified configuration file.
|
||||
"""
|
||||
config = ConfigParser()
|
||||
config.read(args.config)
|
||||
|
||||
|
@ -561,6 +678,7 @@ def do_group_init(cs, args):
|
|||
description = config.get('group', 'description')
|
||||
except NoOptionError:
|
||||
description = None
|
||||
|
||||
group = cs.groups.create(name, description)
|
||||
d = group._info
|
||||
print_dict(d)
|
||||
|
@ -574,12 +692,9 @@ def do_group_init(cs, args):
|
|||
name = None
|
||||
try:
|
||||
is_default = config.get('keypair', 'is_default')
|
||||
if is_default:
|
||||
strutils.bool_from_string(is_default, True)
|
||||
else:
|
||||
is_default = False
|
||||
except NoOptionError:
|
||||
is_default = False
|
||||
|
||||
keypair = cs.keypairs.create(gid, name, is_default)
|
||||
d = keypair._info
|
||||
print_dict(d)
|
||||
|
@ -591,10 +706,6 @@ def do_group_init(cs, args):
|
|||
name = None
|
||||
try:
|
||||
is_default = config.get('securitygroup', 'is_default')
|
||||
if is_default:
|
||||
strutils.bool_from_string(is_default, True)
|
||||
else:
|
||||
is_default = False
|
||||
except NoOptionError:
|
||||
is_default = False
|
||||
try:
|
||||
|
@ -603,6 +714,11 @@ def do_group_init(cs, args):
|
|||
rules[i] = _keyvalue_to_dict(rules[i])
|
||||
except NoOptionError:
|
||||
rules = []
|
||||
except argparse.ArgumentTypeError as e:
|
||||
raise exceptions.CommandError(
|
||||
_("Could not create a securitygroup: "
|
||||
"securitygroup rules are not valid formart: %s") % e.message)
|
||||
|
||||
securitygroup = cs.securitygroups.create(gid, name, is_default, rules)
|
||||
d = securitygroup._info
|
||||
print_dict(d)
|
||||
|
@ -618,16 +734,10 @@ def do_group_init(cs, args):
|
|||
name = None
|
||||
try:
|
||||
is_admin = config.get('network', 'is_admin')
|
||||
if is_admin:
|
||||
strutils.bool_from_string(is_admin, True)
|
||||
else:
|
||||
is_admin = False
|
||||
except NoOptionError:
|
||||
is_admin = False
|
||||
try:
|
||||
gateway_ip = config.get('network', 'gateway_ip')
|
||||
if gateway_ip == '':
|
||||
gateway_ip = None
|
||||
except NoOptionError:
|
||||
gateway_ip = None
|
||||
try:
|
||||
|
@ -638,6 +748,7 @@ def do_group_init(cs, args):
|
|||
ext_router_id = config.get('network', 'ext_router_id')
|
||||
except NoOptionError:
|
||||
ext_router_id = None
|
||||
|
||||
network = cs.networks.create(gid, cidr, name, is_admin,
|
||||
gateway_ip, dns_nameservers, ext_router_id)
|
||||
d = network._info
|
||||
|
@ -660,16 +771,26 @@ def do_group_init(cs, args):
|
|||
securitygroup_ids = [securitygroup.securitygroup_id]
|
||||
try:
|
||||
userdata = config.get('proxy', 'userdata')
|
||||
userdata = open(userdata)
|
||||
except NoOptionError:
|
||||
userdata = None
|
||||
except IOError:
|
||||
raise exceptions.CommandError(
|
||||
_("Can't open %s.") % userdata)
|
||||
try:
|
||||
proxy_args = config.get('proxy', 'args').replace(' ', '')
|
||||
proxy_args = config.get('proxy', 'args')
|
||||
proxy_args = _keyvalue_to_dict(proxy_args)
|
||||
except NoOptionError:
|
||||
proxy_args = None
|
||||
proxy = cs.proxy.create(gid, name=name, nova_flavor_id=nova_flavor_id,
|
||||
glance_image_id=glance_image_id, keypair_id=keypair_id,
|
||||
securitygroup_ids=securitygroup_ids, userdata=userdata,
|
||||
except argparse.ArgumentTypeError as e:
|
||||
raise exceptions.CommandError(e)
|
||||
|
||||
proxy = cs.proxy.create(gid, name=name,
|
||||
nova_flavor_id=nova_flavor_id,
|
||||
glance_image_id=glance_image_id,
|
||||
keypair_id=keypair_id,
|
||||
securitygroup_ids=securitygroup_ids,
|
||||
userdata=userdata,
|
||||
args=proxy_args)
|
||||
d = proxy._info
|
||||
print_process(d)
|
||||
|
@ -682,94 +803,3 @@ def do_group_init(cs, args):
|
|||
"proxy pid": proxy.pid
|
||||
}
|
||||
print_dict(result_dict)
|
||||
|
||||
|
||||
@cliutils.arg(
|
||||
'--ipc_endpoint',
|
||||
metavar='<ipc_endpoint>',
|
||||
help=_("The IPC Endpoint"))
|
||||
@cliutils.arg(
|
||||
'target_pid',
|
||||
metavar='<target_pid>',
|
||||
help=_("Target process id to send the message to"))
|
||||
@cliutils.arg(
|
||||
'message',
|
||||
metavar='<message>',
|
||||
help=_("Message to send"))
|
||||
def do_signal_send(cs, args):
|
||||
sig = signal.SignalManager(args.ipc_endpoint)
|
||||
sig.send(args.target_pid, args.message)
|
||||
|
||||
|
||||
@cliutils.arg(
|
||||
'--ipc_endpoint',
|
||||
metavar='<ipc_endpoint>',
|
||||
help=_("The IPC Endpoint"))
|
||||
@cliutils.arg(
|
||||
'target_pid',
|
||||
metavar='<target_pid>',
|
||||
help=_("Target process id to send the message to"))
|
||||
def do_signal_receive(cs, args):
|
||||
sig = signal.SignalManager(args.ipc_endpoint)
|
||||
|
||||
def _print_message(message):
|
||||
print message
|
||||
|
||||
sig.receive(_print_message, args.target_pid)
|
||||
|
||||
|
||||
def _filesystem_path(text):
|
||||
try:
|
||||
container, file_name = text.strip('/').split('/', 1)
|
||||
return (container, file_name)
|
||||
except ValueError:
|
||||
msg = "%r " % text
|
||||
raise argparse.ArgumentTypeError(msg)
|
||||
|
||||
|
||||
@cliutils.arg(
|
||||
'--proxy_ip',
|
||||
metavar='<proxy_ip>',
|
||||
help=_("rack-proxy's IP address"))
|
||||
@cliutils.arg(
|
||||
'target_path',
|
||||
metavar='</foo/bar/hoge.txt>',
|
||||
type=_filesystem_path,
|
||||
help=_("File path in the RACK file system to upload file to"))
|
||||
@cliutils.arg(
|
||||
'file',
|
||||
metavar='<PATH>',
|
||||
type=argparse.FileType('r'),
|
||||
help=_("File to upload to the RACK file system"))
|
||||
def do_file_put(cs, args):
|
||||
if args.proxy_ip:
|
||||
url = "http://" + args.proxy_ip + ":8080/auth/v1.0"
|
||||
else:
|
||||
url = None
|
||||
f = rackfile.File(args.target_path[0], args.target_path[1], mode='w', url=url)
|
||||
for l in args.file:
|
||||
f.write(l)
|
||||
f.close()
|
||||
|
||||
|
||||
@cliutils.arg(
|
||||
'--proxy_ip',
|
||||
metavar='<proxy_ip>',
|
||||
help=_("rack-proxy's IP address"))
|
||||
@cliutils.arg(
|
||||
'target_path',
|
||||
metavar='</foo/bar/hoge.txt>',
|
||||
type=_filesystem_path,
|
||||
help=_("File path in the RACK file system to download from"))
|
||||
def do_file_get(cs, args):
|
||||
if args.proxy_ip:
|
||||
url = "http://" + args.proxy_ip + ":8080/auth/v1.0"
|
||||
else:
|
||||
url = None
|
||||
rackf = rackfile.File(args.target_path[0], args.target_path[1], mode='r', url=url)
|
||||
filename = args.target_path[1].split('/')[-1]
|
||||
f = open(filename, 'w')
|
||||
for l in rackf.readlines():
|
||||
f.write(l)
|
||||
f.close()
|
||||
rackf.close()
|
||||
|
|
|
@ -1,27 +1,35 @@
|
|||
import json
|
||||
import logging
|
||||
import tempfile
|
||||
from rackclient import process_context
|
||||
from swiftclient import client as swift_client
|
||||
from swiftclient import exceptions as swift_exc
|
||||
from rackclient import process_context
|
||||
from rackclient import exceptions
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
PCTXT = process_context.PCTXT
|
||||
SWIFT_PORT = 8080
|
||||
|
||||
|
||||
def _get_swift_client(v1_authurl=None):
|
||||
if v1_authurl:
|
||||
authurl = v1_authurl
|
||||
elif PCTXT.fs_endpoint:
|
||||
d = json.loads(PCTXT.fs_endpoint)
|
||||
credentials = {
|
||||
"user": d["os_username"],
|
||||
"key": d["os_password"],
|
||||
"tenant_name": d["os_tenant_name"],
|
||||
"authurl": d["os_auth_url"],
|
||||
"auth_version": "2"
|
||||
}
|
||||
return swift_client.Connection(**credentials)
|
||||
def _get_swift_client():
|
||||
if PCTXT.fs_endpoint:
|
||||
try:
|
||||
d = json.loads(PCTXT.fs_endpoint)
|
||||
credentials = {
|
||||
"user": d["os_username"],
|
||||
"key": d["os_password"],
|
||||
"tenant_name": d["os_tenant_name"],
|
||||
"authurl": d["os_auth_url"],
|
||||
"auth_version": "2"
|
||||
}
|
||||
return swift_client.Connection(**credentials)
|
||||
except (ValueError, KeyError):
|
||||
msg = "The format of fs_endpoint is invalid."
|
||||
raise exceptions.InvalidFSEndpointError(msg)
|
||||
else:
|
||||
authurl = "http://" + ':'.join([PCTXT.proxy_ip, str(SWIFT_PORT)]) + "/auth/v1.0"
|
||||
authurl = "http://%s:%d/auth/v1.0" % (PCTXT.proxy_ip, SWIFT_PORT)
|
||||
|
||||
credentials = {
|
||||
"user": "rack:admin",
|
||||
|
@ -33,59 +41,101 @@ def _get_swift_client(v1_authurl=None):
|
|||
return swift_client.Connection(preauthurl=authurl, preauthtoken=token)
|
||||
|
||||
|
||||
def get_objects(container, url=None):
|
||||
swift = _get_swift_client(url)
|
||||
objects = []
|
||||
for f in swift.get_container(container)[1]:
|
||||
objects.append(f["name"])
|
||||
return objects
|
||||
def listdir(directory):
|
||||
swift = _get_swift_client()
|
||||
directory = directory.strip('/')
|
||||
|
||||
|
||||
def load(container, name, chunk_size=None, url=None):
|
||||
swift = _get_swift_client(url)
|
||||
return swift.get_object(container, name, resp_chunk_size=chunk_size)[1]
|
||||
|
||||
|
||||
def save(container, name, data, url=None):
|
||||
swift = _get_swift_client(url)
|
||||
files = []
|
||||
try:
|
||||
swift.put_container(container)
|
||||
except:
|
||||
pass
|
||||
return swift.put_object(container, name, data)
|
||||
objects = swift.get_container(directory)[1]
|
||||
for o in objects:
|
||||
file_path = '/' + directory + '/' + o['name']
|
||||
files.append(File(file_path))
|
||||
except swift_exc.ClientException as e:
|
||||
if e.http_status == 404:
|
||||
msg = "Directory '%s' does not exist." % directory
|
||||
raise exceptions.InvalidDirectoryError(msg)
|
||||
else:
|
||||
raise exceptions.FileSystemAccessError()
|
||||
|
||||
return files
|
||||
|
||||
|
||||
class File(object):
|
||||
def __init__(self, container, name, mode="r", chunk_size=102400000, url=None):
|
||||
self.container = container
|
||||
self.name = name
|
||||
self.mode = mode
|
||||
self.url = url
|
||||
self.file = tempfile.TemporaryFile()
|
||||
if self.mode == 'r':
|
||||
self._rsync(chunk_size=chunk_size)
|
||||
elif self.mode == 'w':
|
||||
pass
|
||||
|
||||
def __init__(self, file_path, mode="r"):
|
||||
self.path = file_path
|
||||
self.file = None
|
||||
if mode not in ('r', 'w'):
|
||||
raise ValueError(
|
||||
"mode must be 'r' or 'w', not %s" % mode)
|
||||
else:
|
||||
raise ValueError("mode string must begin with 'r' or 'w', not %s" % mode)
|
||||
self.mode = mode
|
||||
|
||||
def _load(self, chunk_size=None):
|
||||
return load(self.container, self.name, chunk_size=chunk_size, url=self.url)
|
||||
def get_name(self):
|
||||
return self.path.strip('/').split('/', 1)[1]
|
||||
|
||||
def _rsync(self, chunk_size=None):
|
||||
for c in self._load(chunk_size=chunk_size):
|
||||
self.file.write(c)
|
||||
self.file.flush()
|
||||
self.file.seek(0)
|
||||
def get_directory(self):
|
||||
return self.path.strip('/').split('/', 1)[0]
|
||||
|
||||
def _save(self):
|
||||
return save(self.container, self.name, self.file, url=self.url)
|
||||
def load(self, chunk_size=None):
|
||||
if self.file:
|
||||
return
|
||||
|
||||
if self.mode == 'r':
|
||||
self.file = tempfile.TemporaryFile()
|
||||
swift = _get_swift_client()
|
||||
|
||||
try:
|
||||
_, contents = swift.get_object(self.get_directory(),
|
||||
self.get_name(), chunk_size)
|
||||
if chunk_size:
|
||||
for c in contents:
|
||||
self.file.write(c)
|
||||
else:
|
||||
self.file.write(contents)
|
||||
self.file.flush()
|
||||
self.file.seek(0)
|
||||
except swift_exc.ClientException as e:
|
||||
if e.http_status == 404:
|
||||
msg = "File '%s' does not exist." % self.path
|
||||
raise exceptions.InvalidFilePathError(msg)
|
||||
else:
|
||||
raise exceptions.FileSystemAccessError()
|
||||
|
||||
def write(self, *args, **kwargs):
|
||||
if not self.file:
|
||||
self.file = tempfile.TemporaryFile()
|
||||
|
||||
self.file.write(*args, **kwargs)
|
||||
|
||||
def close(self):
|
||||
if self.mode == "w":
|
||||
self.file.seek(0)
|
||||
self._save()
|
||||
if self.mode == 'w':
|
||||
swift = _get_swift_client()
|
||||
|
||||
try:
|
||||
swift.put_container(self.get_directory())
|
||||
self.file.seek(0)
|
||||
swift.put_object(self.get_directory(), self.get_name(),
|
||||
self.file)
|
||||
except swift_exc.ClientException as e:
|
||||
if e.http_status == 404:
|
||||
msg = ("Directory '%s' does not exist. "
|
||||
"The file object will be closed."
|
||||
% self.get_directory())
|
||||
raise exceptions.InvalidDirectoryError(msg)
|
||||
else:
|
||||
msg = ("Could not save the file to the file system. "
|
||||
"The file object will be closed.")
|
||||
raise exceptions.FileSystemAccessError(msg)
|
||||
finally:
|
||||
self.file.close()
|
||||
|
||||
self.file.close()
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self.file, name)
|
||||
if self.file:
|
||||
return getattr(self.file, name)
|
||||
else:
|
||||
raise AttributeError("%s instance has no attribute '%s'",
|
||||
self.__class__.__name__, name)
|
||||
|
|
|
@ -0,0 +1,114 @@
|
|||
import cPickle
|
||||
import logging
|
||||
import pika
|
||||
from rackclient import exceptions
|
||||
from rackclient import process_context
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
PCTXT = process_context.PCTXT
|
||||
|
||||
|
||||
class Messaging(object):
|
||||
def __init__(self):
|
||||
self.connection = _create_connection()
|
||||
self.channel = self.connection.channel()
|
||||
self.declare_queue(PCTXT.pid)
|
||||
|
||||
def declare_queue(self, queue_name):
|
||||
queue_name = str(queue_name)
|
||||
self.channel.exchange_declare(exchange=PCTXT.gid, type='topic')
|
||||
self.channel.queue_declare(queue=queue_name)
|
||||
self.channel.queue_bind(exchange=PCTXT.gid,
|
||||
queue=queue_name,
|
||||
routing_key=PCTXT.gid + '.' + queue_name)
|
||||
|
||||
def receive_all_msg(self, queue_name=None,
|
||||
timeout_limit=180, msg_limit_count=None):
|
||||
if not queue_name:
|
||||
queue_name = PCTXT.pid
|
||||
|
||||
self.channel = self.connection.channel()
|
||||
receive = self.Receive()
|
||||
timeout_limit = int(timeout_limit)
|
||||
self.connection.add_timeout(deadline=timeout_limit,
|
||||
callback_method=receive.time_out)
|
||||
self.channel.basic_consume(receive.get_all_msg,
|
||||
queue=queue_name,
|
||||
no_ack=False)
|
||||
receive.channel = self.channel
|
||||
receive.msg_count_limit = msg_limit_count
|
||||
self.channel.start_consuming()
|
||||
return receive.message_list
|
||||
|
||||
def receive_msg(self, queue_name=None, timeout_limit=180):
|
||||
if not queue_name:
|
||||
queue_name = PCTXT.pid
|
||||
self.channel = self.connection.channel()
|
||||
receive = self.Receive()
|
||||
timeout_limit = int(timeout_limit)
|
||||
self.connection.add_timeout(deadline=timeout_limit,
|
||||
callback_method=receive.time_out)
|
||||
self.channel.basic_consume(receive.get_msg,
|
||||
queue=queue_name,
|
||||
no_ack=False)
|
||||
receive.channel = self.channel
|
||||
self.channel.start_consuming()
|
||||
return receive.message
|
||||
|
||||
class Receive(object):
|
||||
def __init__(self):
|
||||
self.channel = None
|
||||
self.message = None
|
||||
self.message_list = []
|
||||
self.msg_count_limit = None
|
||||
|
||||
def get_all_msg(self, ch, method, properties, body):
|
||||
ch.basic_ack(delivery_tag=method.delivery_tag)
|
||||
self.message_list.append(cPickle.loads(body))
|
||||
msg_count = len(self.message_list)
|
||||
LOG.debug("Received message count. %s", msg_count)
|
||||
if self.msg_count_limit and self.msg_count_limit <= msg_count:
|
||||
ch.stop_consuming()
|
||||
|
||||
def get_msg(self, ch, method, properties, body):
|
||||
self.message = cPickle.loads(body)
|
||||
ch.basic_ack(delivery_tag=method.delivery_tag)
|
||||
ch.stop_consuming()
|
||||
|
||||
def time_out(self):
|
||||
self.channel.stop_consuming()
|
||||
|
||||
def send_msg(self, target, message=None):
|
||||
routing_key = PCTXT.gid + '.' + target
|
||||
send_dict = {'pid': PCTXT.pid}
|
||||
if message:
|
||||
send_dict['message'] = message
|
||||
send_msg = cPickle.dumps(send_dict)
|
||||
self.channel.basic_publish(exchange=PCTXT.gid,
|
||||
routing_key=routing_key,
|
||||
body=send_msg)
|
||||
|
||||
|
||||
def _create_connection():
|
||||
if PCTXT.ipc_endpoint:
|
||||
connection_param = pika.ConnectionParameters(PCTXT.ipc_endpoint)
|
||||
else:
|
||||
connection_param = pika.ConnectionParameters(PCTXT.proxy_ip)
|
||||
try:
|
||||
connection = pika.BlockingConnection(connection_param)
|
||||
except pika.exceptions.AMQPConnectionError as e:
|
||||
raise exceptions.AMQPConnectionError(e)
|
||||
return connection
|
||||
|
||||
|
||||
def init():
|
||||
msg = Messaging()
|
||||
if PCTXT.ppid:
|
||||
LOG.debug("Messaging: send message to %s", PCTXT.ppid)
|
||||
msg.send_msg(PCTXT.ppid)
|
||||
while True:
|
||||
receive_msg = msg.receive_msg()
|
||||
if receive_msg and PCTXT.ppid == receive_msg.get("pid"):
|
||||
LOG.debug("Messaging: receive message from %s",
|
||||
receive_msg.get("pid"))
|
||||
break
|
|
@ -1,14 +1,13 @@
|
|||
import logging
|
||||
import time
|
||||
from datetime import datetime
|
||||
|
||||
import redis
|
||||
|
||||
from rackclient import process_context
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
PCTXT = process_context.PCTXT
|
||||
|
||||
|
||||
class EndOfFile(Exception):
|
||||
message = 'EOF'
|
||||
|
@ -57,14 +56,6 @@ def reference_key_pattern(name="*", pid="*"):
|
|||
return name + ":" + pid
|
||||
|
||||
|
||||
def get_host():
|
||||
return process_context.PCTXT.proxy_ip
|
||||
|
||||
|
||||
def get_pid():
|
||||
return process_context.PCTXT.pid
|
||||
|
||||
|
||||
PIPE = 1
|
||||
FIFO = 2
|
||||
PORT = 6379
|
||||
|
@ -73,7 +64,7 @@ PORT = 6379
|
|||
class Pipe:
|
||||
def __init__(self, name=None, read=None, write=None):
|
||||
now = datetime.now()
|
||||
self.host = get_host()
|
||||
self.host = PCTXT.proxy_ip
|
||||
self.port = PORT
|
||||
if name:
|
||||
self.is_named = True
|
||||
|
@ -84,13 +75,13 @@ class Pipe:
|
|||
else:
|
||||
self.is_named = False
|
||||
self.r = redis.StrictRedis(host=self.host, port=self.port, db=PIPE)
|
||||
parent_pipe = self.r.keys(reference_key_pattern(pid=get_pid()))
|
||||
parent_pipe = self.r.keys(reference_key_pattern(pid=PCTXT.pid))
|
||||
if parent_pipe:
|
||||
self.name = self.r.get(parent_pipe[0])
|
||||
else:
|
||||
self.name = get_pid()
|
||||
read_state = self.r.hget(read_state_key(self.name), get_pid()) or now
|
||||
write_state = self.r.hget(write_state_key(self.name), get_pid()) or now
|
||||
self.name = PCTXT.pid
|
||||
read_state = self.r.hget(read_state_key(self.name), PCTXT.pid) or now
|
||||
write_state = self.r.hget(write_state_key(self.name), PCTXT.pid) or now
|
||||
if read is not None:
|
||||
if read:
|
||||
read_state = now
|
||||
|
@ -103,8 +94,8 @@ class Pipe:
|
|||
write_state = "close"
|
||||
self.read_state = read_state
|
||||
self.write_state = write_state
|
||||
self.r.hset(read_state_key(self.name), get_pid(), self.read_state)
|
||||
self.r.hset(write_state_key(self.name), get_pid(), self.write_state)
|
||||
self.r.hset(read_state_key(self.name), PCTXT.pid, self.read_state)
|
||||
self.r.hset(write_state_key(self.name), PCTXT.pid, self.write_state)
|
||||
|
||||
def read(self):
|
||||
if self.read_state == "close":
|
||||
|
@ -134,11 +125,11 @@ class Pipe:
|
|||
|
||||
def close_reader(self):
|
||||
self.read_state = "close"
|
||||
self.r.hset(read_state_key(self.name), get_pid(), self.read_state)
|
||||
self.r.hset(read_state_key(self.name), PCTXT.pid, self.read_state)
|
||||
|
||||
def close_writer(self):
|
||||
self.write_state = "close"
|
||||
self.r.hset(write_state_key(self.name), get_pid(), self.write_state)
|
||||
self.r.hset(write_state_key(self.name), PCTXT.pid, self.write_state)
|
||||
|
||||
def has_reader(self):
|
||||
read_states = self.r.hvals(read_state_key(self.name))
|
||||
|
@ -167,7 +158,10 @@ class Pipe:
|
|||
self.r.delete(*tuple(keys))
|
||||
|
||||
@classmethod
|
||||
def flush_by_pid(cls, pid, host=get_host()):
|
||||
def flush_by_pid(cls, pid, host=None):
|
||||
if not host:
|
||||
host = PCTXT.proxy_ip
|
||||
|
||||
r = redis.StrictRedis(host=host, port=PORT, db=PIPE)
|
||||
keys = [pid,
|
||||
read_state_key(pid),
|
||||
|
@ -176,7 +170,10 @@ class Pipe:
|
|||
r.delete(*tuple(keys))
|
||||
|
||||
@classmethod
|
||||
def flush_by_name(cls, name, host=get_host()):
|
||||
def flush_by_name(cls, name, host=None):
|
||||
if not host:
|
||||
host = PCTXT.proxy_ip
|
||||
|
||||
r = redis.StrictRedis(host=host, port=PORT, db=FIFO)
|
||||
keys = [name,
|
||||
read_state_key(name),
|
||||
|
@ -184,7 +181,10 @@ class Pipe:
|
|||
r.delete(*tuple(keys))
|
||||
|
||||
@classmethod
|
||||
def share(cls, ppid, pid, host=get_host()):
|
||||
def share(cls, ppid, pid, host=None):
|
||||
if not host:
|
||||
host = PCTXT.proxy_ip
|
||||
|
||||
now = datetime.now()
|
||||
r = redis.StrictRedis(host=host, port=PORT, db=PIPE)
|
||||
keys = r.keys(reference_key_pattern(pid=ppid))
|
||||
|
|
|
@ -1,23 +1,46 @@
|
|||
import logging
|
||||
import Queue
|
||||
import threading
|
||||
from rackclient import process_context
|
||||
from rackclient.v1.syscall.default import pipe as rackpipe, file as rackfile
|
||||
|
||||
from rackclient import exceptions
|
||||
from rackclient import process_context
|
||||
from rackclient.v1 import processes
|
||||
from rackclient.v1.syscall.default import messaging
|
||||
from rackclient.v1.syscall.default import pipe as rackpipe
|
||||
from rackclient.v1.syscall.default import file as rackfile
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
PCTXT = process_context.PCTXT
|
||||
|
||||
|
||||
def fork(pid=PCTXT.pid, is_async=False, **kwargs):
|
||||
if is_async:
|
||||
th = threading.Thread(target=_fork, args=[pid], kwargs=kwargs)
|
||||
th.start()
|
||||
return th
|
||||
else:
|
||||
return _fork(pid, **kwargs)
|
||||
def fork(opt_list, timeout_limit=180):
|
||||
LOG.debug("start fork")
|
||||
LOG.debug("fork create processes count: %s", len(opt_list))
|
||||
|
||||
return_process_list = []
|
||||
while True:
|
||||
try:
|
||||
child_list = _bulk_fork(PCTXT.pid, opt_list)
|
||||
success_list, error_list = _check_connection(PCTXT.pid,
|
||||
child_list,
|
||||
timeout_limit)
|
||||
except Exception as e:
|
||||
raise exceptions.ForkError(e)
|
||||
|
||||
def _fork(pid, **kwargs):
|
||||
child = PCTXT.client.processes.create(gid=PCTXT.gid, ppid=pid, **kwargs)
|
||||
rackpipe.Pipe.share(pid, child.pid)
|
||||
return child
|
||||
return_process_list += success_list
|
||||
if error_list:
|
||||
opt_list = []
|
||||
for error_process in error_list:
|
||||
args = error_process.args
|
||||
args.pop('gid')
|
||||
args.pop('pid')
|
||||
args.pop('ppid')
|
||||
args.pop('proxy_ip')
|
||||
opt_list.append(dict(args=args))
|
||||
else:
|
||||
break
|
||||
|
||||
return return_process_list
|
||||
|
||||
|
||||
def pipe(name=None):
|
||||
|
@ -25,5 +48,82 @@ def pipe(name=None):
|
|||
return p
|
||||
|
||||
|
||||
def fopen(container, file_name, mode="r"):
|
||||
return rackfile.File(container, file_name, mode)
|
||||
def fopen(file_path, mode="r"):
|
||||
return rackfile.File(file_path, mode)
|
||||
|
||||
|
||||
def _bulk_fork(pid, args_list):
|
||||
LOG.debug("start bulk_fork")
|
||||
q = Queue.Queue()
|
||||
|
||||
def _fork(pid, **kwargs):
|
||||
try:
|
||||
child = PCTXT.client.processes.create(gid=PCTXT.gid,
|
||||
ppid=pid,
|
||||
**kwargs)
|
||||
q.put(child)
|
||||
except Exception as e:
|
||||
attr = dict(args=kwargs, error=e)
|
||||
q.put(processes.Process(PCTXT.client, attr))
|
||||
|
||||
tg = []
|
||||
process_list = []
|
||||
while True:
|
||||
for args in args_list:
|
||||
t = threading.Thread(target=_fork, args=(pid,), kwargs=args)
|
||||
t.start()
|
||||
tg.append(t)
|
||||
|
||||
for t in tg:
|
||||
t.join()
|
||||
|
||||
args_list = []
|
||||
success_processes = []
|
||||
for i in range(q.qsize()):
|
||||
process = q.get()
|
||||
if hasattr(process, "error"):
|
||||
args_list.append(process.args)
|
||||
else:
|
||||
success_processes.append(process)
|
||||
|
||||
process_list += success_processes
|
||||
LOG.debug("bulk_fork success processes count: %s", len(process_list))
|
||||
if not success_processes:
|
||||
msg = "No child process is created."
|
||||
raise Exception(msg)
|
||||
elif not args_list:
|
||||
break
|
||||
return process_list
|
||||
|
||||
|
||||
def _check_connection(pid, process_list, timeout):
|
||||
LOG.debug("start check_connection")
|
||||
msg = messaging.Messaging()
|
||||
msg_list = msg.receive_all_msg(timeout_limit=timeout,
|
||||
msg_limit_count=len(process_list))
|
||||
|
||||
pid_list = []
|
||||
for message in msg_list:
|
||||
if message.get('pid'):
|
||||
pid_list.append(message.get('pid'))
|
||||
|
||||
actives = []
|
||||
inactives = []
|
||||
for process in process_list:
|
||||
if pid_list and process.pid in pid_list:
|
||||
rackpipe.Pipe.share(pid, process.pid)
|
||||
msg.send_msg(target=process.pid, message="start")
|
||||
actives.append(process)
|
||||
pid_list.remove(process.pid)
|
||||
else:
|
||||
PCTXT.client.processes.delete(PCTXT.gid, process.pid)
|
||||
inactives.append(process)
|
||||
|
||||
LOG.debug("_check_connection active processes count: %s", len(actives))
|
||||
LOG.debug("_check_connection inactive processes count: %s", len(inactives))
|
||||
|
||||
if not actives:
|
||||
msg = "No child process is active."
|
||||
raise Exception(msg)
|
||||
|
||||
return actives, inactives
|
|
@ -8,3 +8,4 @@ oslo.utils>=0.2.0
|
|||
PrettyTable>=0.7,<0.8
|
||||
websocket-client>=0.16.0
|
||||
python-keystoneclient>=0.11.2
|
||||
pika>=0.9.14
|
||||
|
|
|
@ -0,0 +1,6 @@
|
|||
coverage
|
||||
discover
|
||||
fixtures
|
||||
testrepository
|
||||
testtools
|
||||
mock
|
|
@ -0,0 +1,32 @@
|
|||
[tox]
|
||||
envlist = py26,py27,py33,pep8
|
||||
minversion = 1.6
|
||||
skipsdist = True
|
||||
|
||||
[testenv]
|
||||
usedevelop = True
|
||||
install_command = pip install -U {opts} {packages}
|
||||
setenv = VIRTUAL_ENV={envdir}
|
||||
|
||||
deps = -r{toxinidir}/requirements.txt
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
commands =
|
||||
find . -type f -name "*.pyc" -delete
|
||||
python setup.py testr --testr-args='{posargs}'
|
||||
|
||||
[testenv:pep8]
|
||||
commands = flake8 {posargs}
|
||||
|
||||
[testenv:venv]
|
||||
commands = {posargs}
|
||||
|
||||
[testenv:cover]
|
||||
commands = python setup.py testr --coverage --testr-args='{posargs}'
|
||||
|
||||
[tox:jenkins]
|
||||
downloadcache = ~/cache/pip
|
||||
|
||||
[flake8]
|
||||
ignore =
|
||||
show-source = True
|
||||
exclude=.venv,.git,.tox,dist,*openstack/common*,*lib/python*,*egg,build,doc/source/conf.py
|
Loading…
Reference in New Issue