add first functional test with infrastructure
Change-Id: If8400f4802a29352c752e8443c7b1b2f063d4dfa
This commit is contained in:
commit
c61d6fdf66
|
@ -0,0 +1,150 @@
|
|||
# Copyright 2014 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import testtools
|
||||
|
||||
from ec2api.tests.functional import base
|
||||
from ec2api.tests.functional import config
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
|
||||
class KeyPairTest(base.EC2TestCase):
|
||||
|
||||
def test_create_delete_key_pair(self):
|
||||
keyName = 'Test key'
|
||||
resp, data = self.client.CreateKeyPair(KeyName=keyName)
|
||||
self.assertEqual(200, resp.status_code, base.EC2ErrorConverter(data))
|
||||
res_clean = self.addResourceCleanUp(self.client.DeleteKeyPair,
|
||||
KeyName=keyName)
|
||||
|
||||
self.assertEqual(keyName, data['KeyName'])
|
||||
self.assertIsNotNone(data.get('KeyFingerprint'))
|
||||
self.assertGreater(len(data['KeyFingerprint']), 0)
|
||||
self.assertGreater(len(data.get('KeyMaterial')), 0)
|
||||
|
||||
resp, data = self.client.DeleteKeyPair(KeyName=keyName)
|
||||
self.assertEqual(200, resp.status_code, base.EC2ErrorConverter(data))
|
||||
self.cancelResourceCleanUp(res_clean)
|
||||
|
||||
def test_create_duplicate_key_pair(self):
|
||||
keyName = 'Test key'
|
||||
resp, data = self.client.CreateKeyPair(KeyName=keyName)
|
||||
self.assertEqual(200, resp.status_code, base.EC2ErrorConverter(data))
|
||||
res_clean = self.addResourceCleanUp(self.client.DeleteKeyPair,
|
||||
KeyName=keyName)
|
||||
|
||||
resp, data = self.client.CreateKeyPair(KeyName=keyName)
|
||||
self.assertEqual(400, resp.status_code, base.EC2ErrorConverter(data))
|
||||
self.assertEqual('InvalidKeyPair.Duplicate', data['Error']['Code'])
|
||||
|
||||
resp, data = self.client.DeleteKeyPair(KeyName=keyName)
|
||||
self.assertEqual(200, resp.status_code, base.EC2ErrorConverter(data))
|
||||
self.cancelResourceCleanUp(res_clean)
|
||||
|
||||
def test_describe_key_pairs(self):
|
||||
keyName = 'Test key'
|
||||
resp, data = self.client.CreateKeyPair(KeyName=keyName)
|
||||
self.assertEqual(200, resp.status_code, base.EC2ErrorConverter(data))
|
||||
res_clean = self.addResourceCleanUp(self.client.DeleteKeyPair,
|
||||
KeyName=keyName)
|
||||
self.assertIsNotNone(data.get('KeyFingerprint'))
|
||||
self.assertGreater(len(data['KeyFingerprint']), 0)
|
||||
fingerprint = data.get('KeyFingerprint')
|
||||
|
||||
resp, data = self.client.DescribeKeyPairs(KeyNames=[keyName])
|
||||
self.assertEqual(200, resp.status_code, base.EC2ErrorConverter(data))
|
||||
self.assertEqual(1, len(data.get('KeyPairs')))
|
||||
data = data['KeyPairs'][0]
|
||||
self.assertEqual(keyName, data['KeyName'])
|
||||
self.assertIsNotNone(data.get('KeyFingerprint'))
|
||||
self.assertGreater(len(data['KeyFingerprint']), 0)
|
||||
self.assertIsNone(data.get('KeyMaterial'))
|
||||
|
||||
resp, data = self.client.DescribeKeyPairs(
|
||||
Filters=[{'Name': 'key-name', 'Values': [keyName]}])
|
||||
self.assertEqual(200, resp.status_code, base.EC2ErrorConverter(data))
|
||||
self.assertEqual(1, len(data.get('KeyPairs')))
|
||||
self.assertEqual(keyName, data['KeyPairs'][0]['KeyName'])
|
||||
|
||||
resp, data = self.client.DescribeKeyPairs(
|
||||
Filters=[{'Name': 'fingerprint', 'Values': [fingerprint]}])
|
||||
self.assertEqual(200, resp.status_code, base.EC2ErrorConverter(data))
|
||||
self.assertEqual(1, len(data.get('KeyPairs')))
|
||||
self.assertEqual(keyName, data['KeyPairs'][0]['KeyName'])
|
||||
|
||||
resp, data = self.client.DescribeKeyPairs(KeyNames=['fake key'])
|
||||
self.assertEqual(400, resp.status_code, base.EC2ErrorConverter(data))
|
||||
self.assertEqual('InvalidKeyPair.NotFound', data['Error']['Code'])
|
||||
|
||||
resp, data = self.client.DeleteKeyPair(KeyName=keyName)
|
||||
self.assertEqual(200, resp.status_code, base.EC2ErrorConverter(data))
|
||||
self.cancelResourceCleanUp(res_clean)
|
||||
|
||||
resp, data = self.client.DescribeKeyPairs(KeyNames=[keyName])
|
||||
self.assertEqual(400, resp.status_code, base.EC2ErrorConverter(data))
|
||||
self.assertEqual('InvalidKeyPair.NotFound', data['Error']['Code'])
|
||||
|
||||
# NOTE(andrey-mp): Amazon allows to delete absent key and returns 200
|
||||
resp, data = self.client.DeleteKeyPair(KeyName=keyName)
|
||||
self.assertEqual(200, resp.status_code, base.EC2ErrorConverter(data))
|
||||
|
||||
def test_import_empty_key_pair(self):
|
||||
keyName = 'Test key'
|
||||
publicKey = ''
|
||||
resp, data = self.client.ImportKeyPair(KeyName=keyName,
|
||||
PublicKeyMaterial=publicKey)
|
||||
if resp.status_code == 200:
|
||||
self.addResourceCleanUp(self.client.DeleteKeyPair, KeyName=keyName)
|
||||
self.assertEqual(400, resp.status_code, base.EC2ErrorConverter(data))
|
||||
self.assertEqual('MissingParameter', data['Error']['Code'])
|
||||
|
||||
@testtools.skipUnless(CONF.aws.run_incompatible_tests,
|
||||
"Different error code")
|
||||
def test_import_invalid_key_pair(self):
|
||||
keyName = 'Test key'
|
||||
publicKey = 'ssh-rsa JUNK test@ubuntu'
|
||||
resp, data = self.client.ImportKeyPair(KeyName=keyName,
|
||||
PublicKeyMaterial=publicKey)
|
||||
if resp.status_code == 200:
|
||||
self.addResourceCleanUp(self.client.DeleteKeyPair, KeyName=keyName)
|
||||
self.assertEqual(400, resp.status_code, base.EC2ErrorConverter(data))
|
||||
self.assertEqual('InvalidKey.Format', data['Error']['Code'])
|
||||
|
||||
def test_import_key_pair(self):
|
||||
keyName = 'Test key'
|
||||
publicKey = ("ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCs"
|
||||
"Ne3/1ILNCqFyfYWDeTKLD6jEXC2OQHLmietMWW+/vd"
|
||||
"aZq7KZEwO0jhglaFjU1mpqq4Gz5RX156sCTNM9vRbw"
|
||||
"KAxfsdF9laBYVsex3m3Wmui3uYrKyumsoJn2g9GNnG1P"
|
||||
"I1mrVjZ61i0GY3khna+wzlTpCCmy5HNlrmbj3XLqBUpip"
|
||||
"TOXmsnr4sChzC53KCd8LXuwc1i/CZPvF+3XipvAgFSE53pCt"
|
||||
"LOeB1kYMOBaiUPLQTWXR3JpckqFIQwhIH0zoHlJvZE8hh90"
|
||||
"XcPojYN56tI0OlrGqojbediJYD0rUsJu4weZpbn8vilb3JuDY+jws"
|
||||
"snSA8wzBx3A/8y9Pp1B test@ubuntu")
|
||||
resp, data = self.client.ImportKeyPair(KeyName=keyName,
|
||||
PublicKeyMaterial=publicKey)
|
||||
self.assertEqual(200, resp.status_code, base.EC2ErrorConverter(data))
|
||||
res_clean = self.addResourceCleanUp(self.client.DeleteKeyPair,
|
||||
KeyName=keyName)
|
||||
|
||||
self.assertEqual(keyName, data['KeyName'])
|
||||
self.assertIsNotNone(data.get('KeyFingerprint'))
|
||||
self.assertGreater(len(data['KeyFingerprint']), 0)
|
||||
self.assertIsNone(data.get('KeyMaterial'))
|
||||
|
||||
resp, data = self.client.DeleteKeyPair(KeyName=keyName)
|
||||
self.assertEqual(200, resp.status_code, base.EC2ErrorConverter(data))
|
||||
self.cancelResourceCleanUp(res_clean)
|
|
@ -0,0 +1,458 @@
|
|||
# Copyright 2014 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import logging
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
|
||||
import six
|
||||
from tempest_lib import base
|
||||
from tempest_lib import exceptions
|
||||
from tempest_lib.openstack.common import log
|
||||
import testtools
|
||||
|
||||
from ec2api.tests.functional import botocoreclient
|
||||
from ec2api.tests.functional import config as cfg
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
logging.getLogger('botocore').setLevel(logging.INFO)
|
||||
logging.getLogger(
|
||||
'botocore.vendored.requests.packages.urllib3.connectionpool'
|
||||
).setLevel(logging.WARNING)
|
||||
|
||||
|
||||
def safe_setup(f):
|
||||
"""A decorator used to wrap the setUpClass for safe setup."""
|
||||
|
||||
def decorator(cls):
|
||||
try:
|
||||
f(cls)
|
||||
except Exception as se:
|
||||
etype, value, trace = sys.exc_info()
|
||||
LOG.exception("setUpClass failed: %s" % se)
|
||||
try:
|
||||
cls.tearDownClass()
|
||||
except Exception as te:
|
||||
LOG.exception("tearDownClass failed: %s" % te)
|
||||
try:
|
||||
raise etype(value), None, trace
|
||||
finally:
|
||||
del trace # for avoiding circular refs
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
class EC2ErrorConverter(object):
|
||||
|
||||
_data = ''
|
||||
|
||||
def __init__(self, data, *args, **kwargs):
|
||||
self._data = data
|
||||
|
||||
def __str__(self):
|
||||
if isinstance(self._data, six.string_types):
|
||||
return self._data
|
||||
if isinstance(self._data, dict) and 'Error' in self._data:
|
||||
result = ''
|
||||
if 'Message' in self._data['Error']:
|
||||
result = self._data['Error']['Message']
|
||||
if 'Code' in self._data['Error']:
|
||||
result += ' (' + self._data['Error']['Code'] + ')'
|
||||
return result
|
||||
return str(self._data)
|
||||
|
||||
|
||||
class EC2ResponceException(Exception):
|
||||
def __init__(self, resp, data):
|
||||
self.resp = resp
|
||||
self.data = data
|
||||
|
||||
|
||||
class EC2Waiter(object):
|
||||
|
||||
def __init__(self, wait_func):
|
||||
self.wait_func = wait_func
|
||||
self.default_timeout = CONF.aws.build_timeout
|
||||
self.default_check_interval = CONF.aws.build_interval
|
||||
|
||||
def _state_wait(self, f, f_args=None, f_kwargs=None,
|
||||
final_set=set(), valid_set=None):
|
||||
if not isinstance(final_set, set):
|
||||
final_set = set((final_set,))
|
||||
if not isinstance(valid_set, set) and valid_set is not None:
|
||||
valid_set = set((valid_set,))
|
||||
interval = self.default_check_interval
|
||||
start_time = time.time()
|
||||
args = f_args if f_args is not None else []
|
||||
kwargs = f_kwargs if f_kwargs is not None else {}
|
||||
try:
|
||||
old_status = status = f(*args, **kwargs)
|
||||
except exceptions.NotFound:
|
||||
old_status = status = "NotFound"
|
||||
while True:
|
||||
if status != old_status:
|
||||
LOG.info('State transition "%s" ==> "%s" %d second',
|
||||
old_status, status, time.time() - start_time)
|
||||
if status in final_set:
|
||||
return status
|
||||
if valid_set is not None and status not in valid_set:
|
||||
return status
|
||||
dtime = time.time() - start_time
|
||||
if dtime > self.default_timeout:
|
||||
raise testtools.TestCase.failureException(
|
||||
"State change timeout exceeded!"
|
||||
'(%ds) While waiting'
|
||||
'for %s at "%s"' %
|
||||
(dtime, final_set, status))
|
||||
time.sleep(interval)
|
||||
interval += self.default_check_interval
|
||||
old_status = status
|
||||
try:
|
||||
status = f(*args, **kwargs)
|
||||
except exceptions.NotFound:
|
||||
status = "NotFound"
|
||||
|
||||
def _state_wait_gone(self, f, f_args=None, f_kwargs=None):
|
||||
interval = self.default_check_interval
|
||||
start_time = time.time()
|
||||
args = f_args if f_args is not None else []
|
||||
kwargs = f_kwargs if f_kwargs is not None else {}
|
||||
try:
|
||||
old_status = status = f(*args, **kwargs)
|
||||
while True:
|
||||
if status != old_status:
|
||||
LOG.info('State transition "%s" ==> "%s" %d second',
|
||||
old_status, status, time.time() - start_time)
|
||||
dtime = time.time() - start_time
|
||||
if dtime > self.default_timeout:
|
||||
raise testtools.TestCase.failureException(
|
||||
"State change timeout exceeded while waiting"
|
||||
" for deleting")
|
||||
time.sleep(interval)
|
||||
interval += self.default_check_interval
|
||||
old_status = status
|
||||
status = f(*args, **kwargs)
|
||||
except exceptions.NotFound:
|
||||
pass
|
||||
|
||||
def wait_available(self, obj_id, final_set=('available')):
|
||||
self._state_wait(self.wait_func, f_args=[obj_id],
|
||||
final_set=final_set)
|
||||
|
||||
def wait_delete(self, obj_id):
|
||||
self._state_wait_gone(self.wait_func, f_args=[obj_id])
|
||||
|
||||
def wait_no_exception(self, *args, **kwargs):
|
||||
interval = self.default_check_interval
|
||||
start_time = time.time()
|
||||
while True:
|
||||
try:
|
||||
self.wait_func(*args, **kwargs)
|
||||
return
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
dtime = time.time() - start_time
|
||||
if dtime > self.default_timeout:
|
||||
raise testtools.TestCase.failureException(
|
||||
"Timeout exceeded while waiting")
|
||||
time.sleep(interval)
|
||||
interval += self.default_check_interval
|
||||
|
||||
|
||||
class EC2TestCase(base.BaseTestCase):
|
||||
"""Recommended to use as base class for boto related test."""
|
||||
|
||||
# The trash contains cleanup functions and paramaters in tuples
|
||||
# (function, *args, **kwargs)
|
||||
_global_resource_trash_bin = {}
|
||||
_global_sequence = -1
|
||||
|
||||
@classmethod
|
||||
@safe_setup
|
||||
def setUpClass(cls):
|
||||
super(EC2TestCase, cls).setUpClass()
|
||||
cls.client = botocoreclient.APIClientEC2()
|
||||
|
||||
@classmethod
|
||||
def addResourceCleanUpStatic(cls, function, *args, **kwargs):
|
||||
"""Adds CleanUp callable, used by tearDownClass.
|
||||
|
||||
Recommended to a use (deep)copy on the mutable args.
|
||||
"""
|
||||
tb = traceback.extract_stack(limit=2)
|
||||
cls._global_sequence = cls._global_sequence + 1
|
||||
cls._global_resource_trash_bin[cls._global_sequence] = (function,
|
||||
args, kwargs,
|
||||
tb[0])
|
||||
return cls._global_sequence
|
||||
|
||||
def setUp(self):
|
||||
super(EC2TestCase, self).setUp()
|
||||
self._resource_trash_bin = {}
|
||||
self._sequence = -1
|
||||
|
||||
def tearDown(self):
|
||||
fail_count = self.cleanUp(self._resource_trash_bin)
|
||||
super(EC2TestCase, self).tearDown()
|
||||
if fail_count:
|
||||
raise exceptions.TempestException("%d cleanUp operation failed"
|
||||
% fail_count)
|
||||
|
||||
def addResourceCleanUp(self, function, *args, **kwargs):
|
||||
"""Adds CleanUp callable, used by tearDown.
|
||||
|
||||
Recommended to a use (deep)copy on the mutable args.
|
||||
"""
|
||||
tb = traceback.extract_stack(limit=2)[0]
|
||||
self._sequence = self._sequence + 1
|
||||
self._resource_trash_bin[self._sequence] = (function, args, kwargs, tb)
|
||||
|
||||
# LOG.debug("For cleaning up: %s\n From: %s" %
|
||||
# (self.friendly_function_call_str(function, *args, **kwargs),
|
||||
# str((tb[0], tb[1], tb[2]))))
|
||||
|
||||
return self._sequence
|
||||
|
||||
def cancelResourceCleanUp(self, key):
|
||||
"""Cancel Clean up request."""
|
||||
del self._resource_trash_bin[key]
|
||||
|
||||
# NOTE(andrey-mp): if ERROR in responce_code then skip logging
|
||||
_VALID_CLEANUP_ERRORS = [
|
||||
'NotFound',
|
||||
'Gateway.NotAttached'
|
||||
]
|
||||
|
||||
_CLEANUP_WAITERS = {
|
||||
'DeleteVpc': (
|
||||
'get_vpc_waiter',
|
||||
lambda kwargs: kwargs['VpcId']),
|
||||
'DeleteSubnet': (
|
||||
'get_subnet_waiter',
|
||||
lambda kwargs: kwargs['SubnetId']),
|
||||
'DeleteNetworkInterface': (
|
||||
'get_network_interface_waiter',
|
||||
lambda kwargs: kwargs['NetworkInterfaceId']),
|
||||
'TerminateInstances': (
|
||||
'get_instance_waiter',
|
||||
lambda kwargs: kwargs['InstanceIds'][0]),
|
||||
'DeleteVolume': (
|
||||
'get_volume_waiter',
|
||||
lambda kwargs: kwargs['VolumeId']),
|
||||
'DetachVolume': (
|
||||
'get_volume_attachment_waiter',
|
||||
lambda kwargs: kwargs['VolumeId']),
|
||||
'DeleteSnapshot': (
|
||||
'get_snapshot_waiter',
|
||||
lambda kwargs: kwargs['SnapshotId']),
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
fail_count = cls.cleanUp(cls._global_resource_trash_bin)
|
||||
super(EC2TestCase, cls).tearDownClass()
|
||||
if fail_count:
|
||||
raise exceptions.TempestException("%d cleanUp operation failed"
|
||||
% fail_count)
|
||||
|
||||
@classmethod
|
||||
def cleanUp(cls, trash_bin):
|
||||
"""Calls the callables added by addResourceCleanUp,
|
||||
|
||||
when you overwire this function dont't forget to call this too.
|
||||
"""
|
||||
fail_count = 0
|
||||
trash_keys = sorted(trash_bin, reverse=True)
|
||||
for key in trash_keys:
|
||||
(function, pos_args, kw_args, tb) = trash_bin[key]
|
||||
try:
|
||||
LOG.debug("Cleaning up: %s\n From: %s" %
|
||||
(cls.friendly_function_call_str(function, *pos_args,
|
||||
**kw_args),
|
||||
str((tb[0], tb[1], tb[2]))))
|
||||
resp, data = function(*pos_args, **kw_args)
|
||||
if resp.status_code != 200:
|
||||
error = data.get('Error', {})
|
||||
error_code = error.get('Code')
|
||||
for err in cls._VALID_CLEANUP_ERRORS:
|
||||
if err in error_code:
|
||||
break
|
||||
else:
|
||||
err_msg = (error if isinstance(error, basestring)
|
||||
else error.get('Message'))
|
||||
msg = ("Cleanup failed with status %d and message"
|
||||
" '%s'(Code = %s)"
|
||||
% (resp.status_code, err_msg, error_code))
|
||||
LOG.error(msg)
|
||||
elif function.__name__ in cls._CLEANUP_WAITERS:
|
||||
(waiter, obj_id) = cls._CLEANUP_WAITERS[function.__name__]
|
||||
waiter = getattr(cls, waiter)
|
||||
obj_id = obj_id(kw_args)
|
||||
waiter().wait_delete(obj_id)
|
||||
except BaseException as exc:
|
||||
fail_count += 1
|
||||
LOG.exception(exc)
|
||||
finally:
|
||||
del trash_bin[key]
|
||||
return fail_count
|
||||
|
||||
@classmethod
|
||||
def friendly_function_name_simple(cls, call_able):
|
||||
name = ""
|
||||
if hasattr(call_able, "im_class"):
|
||||
name += call_able.im_class.__name__ + "."
|
||||
name += call_able.__name__
|
||||
return name
|
||||
|
||||
@classmethod
|
||||
def friendly_function_call_str(cls, call_able, *args, **kwargs):
|
||||
string = cls.friendly_function_name_simple(call_able)
|
||||
string += "(" + ", ".join(map(str, args))
|
||||
if len(kwargs):
|
||||
if len(args):
|
||||
string += ", "
|
||||
string += ", ".join("=".join(map(str, (key, value)))
|
||||
for (key, value) in kwargs.items())
|
||||
return string + ")"
|
||||
|
||||
@classmethod
|
||||
def _vpc_get_state(cls, vpc_id):
|
||||
resp, data = cls.client.DescribeVpcs(VpcIds=[vpc_id])
|
||||
if resp.status_code == 200:
|
||||
return data['Vpcs'][0]['State']
|
||||
|
||||
if resp.status_code == 400:
|
||||
error = data['Error']
|
||||
if error['Code'] == 'InvalidVpcID.NotFound':
|
||||
raise exceptions.NotFound()
|
||||
|
||||
raise EC2ResponceException(resp, data)
|
||||
|
||||
@classmethod
|
||||
def get_vpc_waiter(cls):
|
||||
return EC2Waiter(cls._vpc_get_state)
|
||||
|
||||
@classmethod
|
||||
def _subnet_get_state(cls, subnet_id):
|
||||
resp, data = cls.client.DescribeSubnets(SubnetIds=[subnet_id])
|
||||
if resp.status_code == 200:
|
||||
return data['Subnets'][0]['State']
|
||||
|
||||
if resp.status_code == 400:
|
||||
error = data['Error']
|
||||
if error['Code'] == 'InvalidSubnetID.NotFound':
|
||||
raise exceptions.NotFound()
|
||||
|
||||
raise EC2ResponceException(resp, data)
|
||||
|
||||
@classmethod
|
||||
def get_subnet_waiter(cls):
|
||||
return EC2Waiter(cls._subnet_get_state)
|
||||
|
||||
@classmethod
|
||||
def _instance_get_state(cls, instance_id):
|
||||
resp, data = cls.client.DescribeInstances(InstanceIds=[instance_id])
|
||||
if resp.status_code == 200:
|
||||
state = data['Reservations'][0]['Instances'][0]['State']['Name']
|
||||
if state != 'terminated':
|
||||
return state
|
||||
raise exceptions.NotFound()
|
||||
|
||||
if resp.status_code == 400:
|
||||
error = data['Error']
|
||||
if error['Code'] == 'InvalidInstanceID.NotFound':
|
||||
raise exceptions.NotFound()
|
||||
|
||||
raise EC2ResponceException(resp, data)
|
||||
|
||||
@classmethod
|
||||
def get_instance_waiter(cls):
|
||||
return EC2Waiter(cls._instance_get_state)
|
||||
|
||||
@classmethod
|
||||
def _network_interface_get_state(cls, ni_id):
|
||||
resp, data = cls.client.DescribeNetworkInterfaces(
|
||||
NetworkInterfaceIds=[ni_id])
|
||||
if resp.status_code == 200:
|
||||
return data['NetworkInterfaces'][0]['Status']
|
||||
|
||||
if resp.status_code == 400:
|
||||
error = data['Error']
|
||||
if error['Code'] == 'InvalidNetworkInterfaceID.NotFound':
|
||||
raise exceptions.NotFound()
|
||||
|
||||
raise EC2ResponceException(resp, data)
|
||||
|
||||
@classmethod
|
||||
def get_network_interface_waiter(cls):
|
||||
return EC2Waiter(cls._network_interface_get_state)
|
||||
|
||||
@classmethod
|
||||
def _volume_get_state(cls, volume_id):
|
||||
resp, data = cls.client.DescribeVolumes(VolumeIds=[volume_id])
|
||||
if resp.status_code == 200:
|
||||
return data['Volumes'][0]['State']
|
||||
|
||||
if resp.status_code == 400:
|
||||
error = data['Error']
|
||||
if error['Code'] == 'InvalidVolume.NotFound':
|
||||
raise exceptions.NotFound()
|
||||
|
||||
raise EC2ResponceException(resp, data)
|
||||
|
||||
@classmethod
|
||||
def get_volume_waiter(cls):
|
||||
return EC2Waiter(cls._volume_get_state)
|
||||
|
||||
@classmethod
|
||||
def _volume_attachment_get_state(cls, volume_id):
|
||||
resp, data = cls.client.DescribeVolumes(VolumeIds=[volume_id])
|
||||
if resp.status_code == 200:
|
||||
volume = data['Volumes'][0]
|
||||
if 'Attachments' in volume and len(volume['Attachments']) > 0:
|
||||
return volume['Attachments'][0]['State']
|
||||
raise exceptions.NotFound()
|
||||
|
||||
if resp.status_code == 400:
|
||||
error = data['Error']
|
||||
if error['Code'] == 'InvalidVolume.NotFound':
|
||||
raise exceptions.NotFound()
|
||||
|
||||
raise EC2ResponceException(resp, data)
|
||||
|
||||
@classmethod
|
||||
def get_volume_attachment_waiter(cls):
|
||||
return EC2Waiter(cls._volume_attachment_get_state)
|
||||
|
||||
@classmethod
|
||||
def _snapshot_get_state(cls, volume_id):
|
||||
resp, data = cls.client.DescribeSnapshots(SnapshotIds=[volume_id])
|
||||
if resp.status_code == 200:
|
||||
return data['Snapshots'][0]['State']
|
||||
|
||||
if resp.status_code == 400:
|
||||
error = data['Error']
|
||||
if error['Code'] == 'InvalidSnapshot.NotFound':
|
||||
raise exceptions.NotFound()
|
||||
|
||||
raise EC2ResponceException(resp, data)
|
||||
|
||||
@classmethod
|
||||
def get_snapshot_waiter(cls):
|
||||
return EC2Waiter(cls._snapshot_get_state)
|
|
@ -0,0 +1,67 @@
|
|||
# Copyright 2014 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import types
|
||||
|
||||
from botocore import session
|
||||
from tempest_lib.openstack.common import log as logging
|
||||
|
||||
from ec2api.tests.functional import config as cfg
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BotocoreClientBase(object):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.region = CONF.aws.aws_region
|
||||
self.connection_data = {
|
||||
'config_file': (None, 'AWS_CONFIG_FILE', None),
|
||||
'region': ('region', 'BOTO_DEFAULT_REGION', self.region),
|
||||
}
|
||||
|
||||
access = CONF.aws.aws_access
|
||||
secret = CONF.aws.aws_secret
|
||||
if not access or not secret:
|
||||
raise Exception('Auth params did not provided')
|
||||
|
||||
self.session = session.get_session(self.connection_data)
|
||||
self.session.set_credentials(access, secret)
|
||||
|
||||
def __getattr__(self, name):
|
||||
"""Automatically creates methods for the allowed methods set."""
|
||||
op = self.service.get_operation(name)
|
||||
if not op:
|
||||
raise AttributeError(name)
|
||||
|
||||
def func(self, *args, **kwargs):
|
||||
return op.call(self.endpoint, *args, **kwargs)
|
||||
|
||||
func.__name__ = name
|
||||
setattr(self, name, types.MethodType(func, self, self.__class__))
|
||||
setattr(self.__class__, name,
|
||||
types.MethodType(func, None, self.__class__))
|
||||
return getattr(self, name)
|
||||
|
||||
|
||||
class APIClientEC2(BotocoreClientBase):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(APIClientEC2, self).__init__(*args, **kwargs)
|
||||
self.service = self.session.get_service('ec2')
|
||||
self.endpoint = self.service.get_endpoint(
|
||||
region_name=self.region,
|
||||
endpoint_url=CONF.aws.ec2_url)
|
|
@ -0,0 +1,121 @@
|
|||
# Copyright 2012 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import logging as std_logging
|
||||
import os
|
||||
|
||||
from oslo.config import cfg
|
||||
from tempest_lib.openstack.common import log as logging
|
||||
|
||||
|
||||
def register_opt_group(conf, opt_group, options):
|
||||
conf.register_group(opt_group)
|
||||
for opt in options:
|
||||
conf.register_opt(opt, group=opt_group.name)
|
||||
|
||||
|
||||
aws_group = cfg.OptGroup(name='aws',
|
||||
title='AWS options')
|
||||
AWSGroup = [
|
||||
cfg.StrOpt('ec2_url',
|
||||
default="http://localhost:8788/",
|
||||
help="EC2 URL"),
|
||||
cfg.StrOpt('aws_secret',
|
||||
default=None,
|
||||
help="AWS Secret Key",
|
||||
secret=True),
|
||||
cfg.StrOpt('aws_access',
|
||||
default=None,
|
||||
help="AWS Access Key"),
|
||||
cfg.StrOpt('aws_region',
|
||||
default="RegionOne",
|
||||
help="AWS region for EC2 tests"),
|
||||
cfg.StrOpt('aws_zone',
|
||||
default='nova',
|
||||
help="AWS zone inside region for EC2 tests"),
|
||||
cfg.IntOpt('build_timeout',
|
||||
default=120,
|
||||
help="Status Change Timeout"),
|
||||
cfg.IntOpt('build_interval',
|
||||
default=1,
|
||||
help="Status Change Test Interval"),
|
||||
cfg.StrOpt('instance_type',
|
||||
default="m1.tiny",
|
||||
help="Instance type"),
|
||||
cfg.StrOpt('image_id',
|
||||
default=None,
|
||||
help="Image ID for instance running"),
|
||||
cfg.BoolOpt('run_incompatible_tests',
|
||||
default=False,
|
||||
help='Will run all tests plus incompatible with Amazon.'),
|
||||
]
|
||||
|
||||
|
||||
def register_opts():
|
||||
register_opt_group(cfg.CONF, aws_group, AWSGroup)
|
||||
|
||||
|
||||
# this should never be called outside of this class
|
||||
class ConfigPrivate(object):
|
||||
"""Provides OpenStack configuration information."""
|
||||
|
||||
DEFAULT_CONFIG_DIR = os.path.join(
|
||||
os.path.abspath(os.path.dirname(os.path.dirname(__file__))),
|
||||
"etc")
|
||||
|
||||
DEFAULT_CONFIG_FILE = "functional_tests.conf"
|
||||
|
||||
def _set_attrs(self):
|
||||
self.aws = cfg.CONF.aws
|
||||
self.debug = cfg.CONF.debug
|
||||
|
||||
def __init__(self, parse_conf=True):
|
||||
"""Initialize a configuration from a conf directory and conf file."""
|
||||
super(ConfigPrivate, self).__init__()
|
||||
config_files = []
|
||||
|
||||
# Environment variables override defaults...
|
||||
conf_dir = os.environ.get('TEST_CONFIG_DIR', '.')
|
||||
conf_file = os.environ.get('TEST_CONFIG', self.DEFAULT_CONFIG_FILE)
|
||||
path = os.path.join(conf_dir, conf_file)
|
||||
|
||||
# only parse the config file if we expect one to exist. This is needed
|
||||
# to remove an issue with the config file up to date checker.
|
||||
if parse_conf:
|
||||
config_files.append(path)
|
||||
|
||||
cfg.CONF([], project='ec2api', default_config_files=config_files)
|
||||
logging.setup('ec2api')
|
||||
LOG = logging.getLogger('ec2api')
|
||||
LOG.info("Using ec2api config file %s" % path)
|
||||
register_opts()
|
||||
self._set_attrs()
|
||||
if parse_conf:
|
||||
cfg.CONF.log_opt_values(LOG, std_logging.DEBUG)
|
||||
|
||||
|
||||
class ConfigProxy(object):
|
||||
_config = None
|
||||
|
||||
def __getattr__(self, attr):
|
||||
if not self._config:
|
||||
self._config = ConfigPrivate()
|
||||
|
||||
return getattr(self._config, attr)
|
||||
|
||||
|
||||
CONF = ConfigProxy()
|
Loading…
Reference in New Issue