fixed gitignore conflict

This commit is contained in:
cameron-r 2014-10-22 14:41:56 -05:00
commit 1e9b016c4a
11 changed files with 319 additions and 155 deletions

1
.gitignore vendored
View File

@ -2,3 +2,4 @@
.vagrant
.idea
*.swp
tests/logs/

View File

@ -37,7 +37,15 @@ Using the native OpenStack Dashboard or APIs you would be able to manage the EC2
The driver should now be loaded. The contents of the repository is mapped to `/opt/stack/nova/nova/virt/ec2/`, and you can edit it directly from your host computer with an IDE of your choice.
###Important notes
###Running Tests
1. Moto can be used to mock the EC2 server. To install moto, run `pip install moto`.
1. To optionally use Moto, run `source /opt/stack/nova/nova/virt/ec2/tests/setup_moto.sh`.
2. `~/devstack/rejoin-stack.sh`
3. `cd /opt/stack/nova/nova/virt/ec2/tests`
4. Use `nosetests -s test_ec2driver.py`
5. To stop Moto, run `source /opt/stack/nova/nova/virt/ec2/tests/shutdown_moto.sh`.
###Important Notes
In Amazons EC2 there is no concept of suspend and resume on instances. Therefore, we simply stop EC2 instances when suspended and start the instances when resumed, we do the same on pause and un-pause.
##To Be Continued

View File

@ -1,7 +1,6 @@
#!/usr/bin/env python
import os
def get_nova_creds():
d = {}
d['username'] = os.environ['OS_USERNAME']

View File

@ -16,11 +16,13 @@
"""Connection to the Amazon Web Services - EC2 service"""
from threading import Lock
import base64
import time
from boto import ec2
import boto.ec2.cloudwatch
from boto import exception as boto_exc
from boto.exception import EC2ResponseError
from boto.regioninfo import RegionInfo
from oslo.config import cfg
from novaclient.v1_1 import client
@ -36,12 +38,9 @@ from nova.openstack.common import log as logging
from nova.openstack.common import loopingcall
from nova.virt import driver
from nova.virt import virtapi
from nova.compute import flavors
import base64
import time
from novaclient.v1_1 import client
from credentials import get_nova_creds
LOG = logging.getLogger(__name__)
ec2driver_opts = [
@ -118,9 +117,9 @@ class EC2Driver(driver.ComputeDriver):
def __init__(self, virtapi, read_only=False):
super(EC2Driver, self).__init__(virtapi)
self.host_status_base = {
'vcpus': 100000,
'memory_mb': 8000000000,
'local_gb': 600000000000,
'vcpus': VCPUS,
'memory_mb': MEMORY_IN_MBS,
'local_gb': DISK_IN_GB,
'vcpus_used': 0,
'memory_mb_used': 0,
'local_gb_used': 100000000000,
@ -136,23 +135,32 @@ class EC2Driver(driver.ComputeDriver):
self.creds = get_nova_creds()
self.nova = client.Client(**self.creds)
# To connect to EC2
self.ec2_conn = ec2.connect_to_region(
aws_region, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key)
region = RegionInfo(name=aws_region, endpoint=aws_endpoint)
self.ec2_conn = ec2.EC2Connection(aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
host=host,
port=port,
region=region,
is_secure=secure)
self.cloudwatch_conn = ec2.cloudwatch.connect_to_region(
aws_region, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key)
self.reservation = self.ec2_conn.get_all_reservations()
self.security_group_lock = Lock()
if not '_EC2_NODES' in globals():
set_nodes([CONF.host])
def init_host(self, host):
"""Initialize anything that is necessary for the driver to function,
including catching up with currently running VM's on the given host.
"""
return
def list_instances(self):
"""Return the names of all the instances known to the virtualization
layer, as a list.
"""
all_instances = self.ec2_conn.get_all_instances()
instance_ids = []
for instance in all_instances:
@ -169,6 +177,27 @@ class EC2Driver(driver.ComputeDriver):
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
"""Create a new instance/VM/domain on the virtualization platform.
Once this successfully completes, the instance should be
running (power_state.RUNNING).
If this fails, any partial instance should be completely
cleaned up, and the virtualization platform should be in the state
that it was before this call began.
:param context: security context <Not Yet Implemented>
:param instance: nova.objects.instance.Instance
This function should use the data there to guide
the creation of the new instance.
:param image_meta: image object returned by nova.image.glance that
defines the image from which to boot this instance
:param injected_files: User files to inject into instance.
:param admin_password: set in instance. <Not Yet Implemented>
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param block_device_info: Information about block devices to be
attached to the instance.
"""
LOG.info("***** Calling SPAWN *******************")
LOG.info("****** %s" % instance._user_data)
LOG.info("****** Allocating an elastic IP *********")
@ -194,24 +223,21 @@ class EC2Driver(driver.ComputeDriver):
self.ec2_conn.associate_address(instance_id=ec2_id, allocation_id=elastic_ip_address.allocation_id)
def snapshot(self, context, instance, image_id, update_task_state):
"""Snapshot an image of the specified instance
on EC2 and create an Image which gets stored in AMI (internally in EBS Snapshot)
:param context: security context
:param instance: nova.objects.instance.Instance
:param image_id: Reference to a pre-created image that will hold the snapshot.
"""
Snapshot an image on EC2 and create an Image which gets stored in AMI (internally in EBS Snapshot)
"""
LOG.info("***** Calling SNAPSHOT *******************")
if(instance['metadata']['ec2_id'] is None):
if instance['metadata']['ec2_id'] is None:
raise exception.InstanceNotRunning(instance_id=instance['uuid'])
# Adding the below line only alters the state of the instance and not
# its image in OpenStack.
update_task_state(
task_state=task_states.IMAGE_UPLOADING, expected_state=task_states.IMAGE_SNAPSHOT)
# TODO change the image status to Active instead of in saving or
# queuing
ec2_id = instance['metadata']['ec2_id']
ec_instance_info = self.ec2_conn.get_only_instances(
instance_ids=[ec2_id], filters=None, dry_run=False, max_results=None)
@ -220,15 +246,14 @@ class EC2Driver(driver.ComputeDriver):
ec2_image_id = ec2_instance.create_image(name=str(
image_id), description="Image from OpenStack", no_reboot=False, dry_run=False)
LOG.info("Image has been created state to %s." % ec2_image_id)
# The instance will be in pending state when it comes up, waiting for
# it to be in available
# The instance will be in pending state when it comes up, waiting forit to be in available
self._wait_for_image_state(ec2_image_id, "available")
image_api = glance.get_default_image_service()
image_ref = glance.generate_image_url(image_id)
metadata = {'is_public': False,
# 'checksum': '4eada48c2843d2a262c814ddc92ecf2c', #Hard-coded value for now
'location': image_ref,
'properties': {
'kernel_id': instance['kernel_id'],
@ -236,7 +261,7 @@ class EC2Driver(driver.ComputeDriver):
'owner_id': instance['project_id'],
'ramdisk_id': instance['ramdisk_id'],
'ec2_image_id': ec2_image_id
}
}
}
image_api.update(context, image_id, metadata)
@ -349,15 +374,17 @@ class EC2Driver(driver.ComputeDriver):
else:
# Deleting the instance from EC2
ec2_id = instance['metadata']['ec2_id']
ec2_instances = self.ec2_conn.get_only_instances(instance_ids=[ec2_id])
try:
ec2_instances = self.ec2_conn.get_only_instances(instance_ids=[ec2_id])
except Exception:
return
if ec2_instances.__len__() == 0:
LOG.warning(_("EC2 instance with ID %s not found") % ec2_id, instance=instance)
return
else:
# get the elastic ip associated with the instance & disassociate
# it, and release it
ec2_instance = ec2_instances[0]
elastic_ip_address = self.ec2_conn.get_all_addresses(addresses=[ec2_instance.ip_address])[0]
elastic_ip_address = self.ec2_conn.get_all_addresses(addresses=instance['metadata']['public_ip_address'])[0]
LOG.info("****** Disassociating the elastic IP *********")
self.ec2_conn.disassociate_address(elastic_ip_address.public_ip)
@ -507,10 +534,10 @@ class EC2Driver(driver.ComputeDriver):
'username': 'EC2user',
'password': 'EC2password'}
def _get_ec2_instance_ids_for_security_group(self, ec2_security_group):
def _get_ec2_instance_ids_with_security_group(self, ec2_security_group):
return [instance.id for instance in ec2_security_group.instances()]
def _get_openstack_instances_for_security_group(self, openstack_security_group):
def _get_openstack_instances_with_security_group(self, openstack_security_group):
return [instance for instance in (self.nova.servers.list())
if openstack_security_group.name in [group['name'] for group in instance.security_groups]]
@ -548,21 +575,28 @@ class EC2Driver(driver.ComputeDriver):
openstack_security_group = self.nova.security_groups.get(security_group_id)
ec2_security_group = self._get_or_create_ec2_security_group(openstack_security_group)
ec2_instance_ids_for_security_group = self._get_ec2_instance_ids_for_security_group(ec2_security_group)
ec2_ids_for_openstack_instances_for_security_group = [
ec2_ids_for_ec2_instances_with_security_group = self._get_ec2_instance_ids_with_security_group(ec2_security_group)
ec2_ids_for_openstack_instances_with_security_group = [
instance.metadata['ec2_id'] for instance
in self._get_openstack_instances_for_security_group(openstack_security_group)
in self._get_openstack_instances_with_security_group(openstack_security_group)
]
self.security_group_lock.acquire()
try:
if self._should_add_security_group_to_instance(ec2_instance_ids_for_security_group, ec2_ids_for_openstack_instances_for_security_group):
ec2_instance_id_to_add_security_group = self._get_id_of_ec2_instance_to_update_security_group(ec2_instance_ids_for_security_group, ec2_ids_for_openstack_instances_for_security_group)
self._add_security_group_to_instance(ec2_instance_id_to_add_security_group, ec2_security_group)
ec2_instance_to_update = self._get_id_of_ec2_instance_to_update_security_group(
ec2_ids_for_ec2_instances_with_security_group,
ec2_ids_for_openstack_instances_with_security_group
)
should_add_security_group = self._should_add_security_group_to_instance(
ec2_ids_for_ec2_instances_with_security_group,
ec2_ids_for_openstack_instances_with_security_group)
if should_add_security_group:
self._add_security_group_to_instance(ec2_instance_to_update, ec2_security_group)
else:
ec2_instance_id_to_remove_security_group = self._get_id_of_ec2_instance_to_update_security_group(ec2_instance_ids_for_security_group, ec2_ids_for_openstack_instances_for_security_group)
self._remove_security_group_from_instance(ec2_instance_id_to_remove_security_group, ec2_security_group)
self._remove_security_group_from_instance(ec2_instance_to_update, ec2_security_group)
finally:
self.security_group_lock.release()

View File

@ -1,40 +1,8 @@
# Copyright (c) 2014 Thoughtworks.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either expressed or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
# This is the config file which is going to hold the values for being able
# to connect to the AWS Public cloud.
aws_region = "us-east-1"
aws_access_key_id = "AKIAIZJDDRNNJUWZ3LXA"
aws_secret_access_key = "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"
#Adding a Red Hat Linux image below
aws_ami = "ami-785bae10"
#aws_ami = "ami-864d84ee"
instance_type = "t2.micro"
# Mapping OpenStack's flavor IDs(which seems to be randomly assigned) to EC2's flavor names
flavor_map = {2: 't2.micro', 5: 't2.small', 1: 't2.medium', 3: 'c3.xlarge', 4: 'c3.2xlarge'}
#Add image maps key: image in openstack, Value: EC2_AMI_ID
image_map = {}
volume_map = {'3df37a34-662e-4aa8-b71d-b8313d2e945b': 'vol-83db57cb',
'7d63c661-7e93-445b-b3cb-765f1c8ae4c0': 'vol-1eea8a56'}
keypair_map = {}
# The limit on maximum resources you could have in the AWS EC2.
VCPUS = 100
MEMORY_IN_MBS = 88192
DISK_IN_GB = 1028
if os.environ.get('MOCK_EC2'):
print "test environment"
from ec2driver_test_config import *
else:
print "prod env"
from ec2driver_standard_config import *

View File

@ -0,0 +1,46 @@
# Copyright (c) 2014 Thoughtworks.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either expressed or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This is the config file which is going to hold the values for being able
# to connect to the AWS Public cloud.
from collections import defaultdict
aws_region = 'us-east-1'
aws_endpoint = 'ec2.us-east-1.amazonaws.com'
aws_access_key_id = 'AKIAIZJDDRNNJUWZ3LXA'
aws_secret_access_key = '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'
port = 443
host = str(port) + ":" + aws_endpoint
secure = True
#Adding a Red Hat Linux image below
aws_ami = "ami-785bae10"
#aws_ami = "ami-864d84ee"
instance_type = "t2.micro"
# Mapping OpenStack's flavor IDs(which seems to be randomly assigned) to EC2's flavor names
flavor_map = {2: 't2.micro', 5: 't2.small', 1: 't2.medium', 3: 'c3.xlarge', 4: 'c3.2xlarge'}
#Add image maps key: image in openstack, Value: EC2_AMI_ID
image_map = {}
# Using defaultdict as we need to get a default EBS volume to be returned if we access this map with an unknown key
volume_map_no_default = {'ed6fcf64-8c74-49a0-a30c-76128c7bda47': 'vol-83db57cb',
'ac28d216-6dda-4a7b-86c4-d95209ae8181': 'vol-1eea8a56'}
volume_map = defaultdict(lambda: 'vol-83db57cb', volume_map_no_default)
keypair_map = {}
# The limit on maximum resources you could have in the AWS EC2.
VCPUS = 100
MEMORY_IN_MBS = 88192
DISK_IN_GB = 1028

48
ec2driver_test_config.py Normal file
View File

@ -0,0 +1,48 @@
# Copyright (c) 2014 Thoughtworks.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either expressed or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This is the config file which is going to hold the values for being able
# to connect to the AWS Public cloud.
from collections import defaultdict
aws_region = "moto_region"
aws_endpoint = "localhost"
aws_access_key_id = 'the_key'
aws_secret_access_key = 'the_secret'
port = 1234
host = str(port) + ":" + aws_endpoint
secure = False
#Adding a Red Hat Linux image below
aws_ami = "ami-785bae10"
#aws_ami = "ami-864d84ee"
instance_type = "t2.micro"
# Mapping OpenStack's flavor IDs(which seems to be randomly assigned) to EC2's flavor names
flavor_map = {2: 't2.micro', 5: 't2.small', 1: 't2.medium', 3: 'c3.xlarge', 4: 'c3.2xlarge'}
#Add image maps key: image in openstack, Value: EC2_AMI_ID
image_map = {}
# Using defaultdict as we need to get a default EBS volume to be returned if we access this map with an unknown key
volume_map_no_default = {'ed6fcf64-8c74-49a0-a30c-76128c7bda47': 'vol-83db57cb',
'ac28d216-6dda-4a7b-86c4-d95209ae8181': 'vol-1eea8a56'}
volume_map = defaultdict(lambda: 'vol-83db57cb', volume_map_no_default)
keypair_map = {}
# The limit on maximum resources you could have in the AWS EC2.
VCPUS = 100
MEMORY_IN_MBS = 88192
DISK_IN_GB = 1028

66
tests/ec2_test_base.py Normal file
View File

@ -0,0 +1,66 @@
import unittest
import time
from boto.regioninfo import RegionInfo
from novaclient.v1_1 import client
from boto import ec2
from ..credentials import get_nova_creds
from ..ec2driver_config import *
class EC2TestBase(unittest.TestCase):
@staticmethod
def sleep_if_ec2_not_mocked(seconds):
if not os.environ.get('MOCK_EC2'):
time.sleep(seconds)
@classmethod
def setUp(self):
print "Establishing connection with AWS"
region = RegionInfo(name=aws_region, endpoint=aws_endpoint)
self.ec2_conn = ec2.EC2Connection(aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
host=host,
port=port,
region = region,
is_secure=secure)
self.creds = get_nova_creds()
self.nova = client.Client(**self.creds)
# nova client for cinder
self.creds['service_type'] = 'volume'
self.nova_volume = client.Client(**self.creds)
self.servers = []
self.volumes = []
@classmethod
def tearDown(self):
print "Cleanup: Destroying the instance used for testing"
for instance in self.servers:
instance.delete()
# wait for all instances to completely shut down and detach volumes if any
self.sleep_if_ec2_not_mocked(120)
for volume in self.volumes:
volume.delete()
def spawn_ec2_instance(self):
print "aws_region: " + aws_region
print "Spawning an instance"
image = self.nova.images.find(name="cirros-0.3.1-x86_64-uec")
flavor = self.nova.flavors.find(name="m1.tiny")
server = self.nova.servers.create(
name="cirros-test", image=image.id, flavor=flavor.id)
instance = self.nova.servers.get(server.id)
while instance.status != 'ACTIVE':
EC2TestBase.sleep_if_ec2_not_mocked(10)
instance = self.nova.servers.get(server.id)
self.servers.append(instance)
return instance, server.id

13
tests/setup_moto.sh Executable file
View File

@ -0,0 +1,13 @@
#! /usr/bin/env bash
if [ ! -d "logs" ]; then
echo "Making logs directory"
mkdir logs
fi
echo "Setting environment variable MOCK_EC2=True"
export MOCK_EC2=True
echo "Restarting moto"
ps aux | grep moto_server | grep -v grep | awk '{print $2}' | xargs kill -9
moto_server ec2 -p1234 > logs/moto_log.txt 2>&1 &

7
tests/shutdown_moto.sh Executable file
View File

@ -0,0 +1,7 @@
#! /usr/bin/env bash
echo "Unsetting environment variable MOCK_EC2"
unset MOCK_EC2
echo "Killing moto"
ps aux | grep moto_server | grep -v grep | awk '{print $2}' | xargs kill -9

View File

@ -1,47 +1,23 @@
import unittest
import time
from novaclient.v1_1 import client
from ..credentials import get_nova_creds
from boto import ec2
from ..ec2driver_config import *
import urllib2
class EC2DriverTest(unittest.TestCase):
_multiprocess_shared_ = True
from novaclient.v1_1 import client
from ..ec2driver_config import *
from ec2_test_base import EC2TestBase
@classmethod
def setUp(self):
print "Establishing connection with AWS"
self.ec2_conn = ec2.connect_to_region(aws_region, aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key)
self.creds = get_nova_creds()
self.nova = client.Client(**self.creds)
self.servers = []
def spawn_ec2_instance(self):
print "Spawning an instance"
image = self.nova.images.find(name="cirros-0.3.1-x86_64-uec")
flavor = self.nova.flavors.find(name="m1.tiny")
server = self.nova.servers.create(
name="cirros-test", image=image.id, flavor=flavor.id)
instance = self.nova.servers.get(server.id)
while instance.status != 'ACTIVE':
time.sleep(10)
instance = self.nova.servers.get(server.id)
self.servers.append(instance)
return instance, server.id
class TestEC2Driver(EC2TestBase):
def test_spawn(self):
print "******* Spawn Test ***********"
instance, instance_ref = self.spawn_ec2_instance()
ec2_instance = self.ec2_conn.get_only_instances(instance_ids=[instance.metadata['ec2_id']], filters=None,
dry_run=False, max_results=None)
ec2_instance = self.ec2_conn.get_only_instances(instance_ids=[instance.metadata['ec2_id']])
ec2_eip = self.ec2_conn.get_all_addresses(addresses=instance.metadata['public_ip_address'])[0]
self.assertEqual(ec2_instance[0].id, instance.metadata['ec2_id'])
self.assertEqual(ec2_instance[0].ip_address, instance.metadata['public_ip_address'])
self.assertEqual(ec2_eip.instance_id, instance.metadata['ec2_id'])
def test_destroy(self):
print "******* Destroy Test ***********"
@ -49,27 +25,21 @@ class EC2DriverTest(unittest.TestCase):
ec2_id = instance.metadata['ec2_id']
ec2_instance = self.ec2_conn.get_only_instances(instance_ids=[ec2_id], filters=None, dry_run=False,
max_results=None)
ec2_instance = self.ec2_conn.get_only_instances(instance_ids=[ec2_id])[0]
# EC2 statecode: 16->Running, 32->Shutting Down
while ec2_instance[0].state != "running":
time.sleep(10)
ec2_instance = self.ec2_conn.get_only_instances(instance_ids=[ec2_id], filters=None, dry_run=False,
max_results=None)
while ec2_instance.state != "running":
self.sleep_if_ec2_not_mocked(10)
ec2_instance = self.ec2_conn.get_only_instances(instance_ids=[ec2_id])[0]
instance.delete()
ec2_instance = self.ec2_conn.get_only_instances(instance_ids=[ec2_id], filters=None, dry_run=False,
max_results=None)
ec2_instance = self.ec2_conn.get_only_instances(instance_ids=[ec2_id])[0]
# EC2 statecode: 16->Running, 32->Shutting Down
while ec2_instance[0].state != "shutting-down":
time.sleep(10)
ec2_instance = self.ec2_conn.get_only_instances(instance_ids=[ec2_id], filters=None, dry_run=False,
max_results=None)
while ec2_instance.state not in ("shutting-down", "terminated"):
self.sleep_if_ec2_not_mocked(10)
ec2_instance = self.ec2_conn.get_only_instances(instance_ids=[ec2_id])[0]
ec2_instance = self.ec2_conn.get_only_instances(instance_ids=[ec2_id], filters=None, dry_run=False,
max_results=None)
self.assertTrue(ec2_instance.state in ("shutting-down", "terminated"))
self.assertEquals(ec2_instance[0].state, "shutting-down")
def test_power_off(self):
print "******* Power Off Test ***********"
@ -78,12 +48,11 @@ class EC2DriverTest(unittest.TestCase):
self.nova.servers.stop(instance)
while instance.status != 'SHUTOFF':
time.sleep(5)
self.sleep_if_ec2_not_mocked(5)
instance = self.nova.servers.get(instance.id)
# assert power off
ec2_instance = self.ec2_conn.get_only_instances(instance_ids=[instance.metadata['ec2_id']], filters=None,
dry_run=False, max_results=None)[0]
ec2_instance = self.ec2_conn.get_only_instances(instance_ids=[instance.metadata['ec2_id']])[0]
self.assertEqual(ec2_instance.state, "stopped")
def test_soft_reboot(self):
@ -100,12 +69,11 @@ class EC2DriverTest(unittest.TestCase):
instance = self.nova.servers.get(instance.id)
while instance.status != 'ACTIVE':
time.sleep(5)
self.sleep_if_ec2_not_mocked(5)
instance = self.nova.servers.get(instance.id)
#assert restarted
ec2_instance = self.ec2_conn.get_only_instances(instance_ids=[instance.metadata['ec2_id']], filters=None,
dry_run=False, max_results=None)[0]
ec2_instance = self.ec2_conn.get_only_instances(instance_ids=[instance.metadata['ec2_id']])[0]
self.assertEqual(ec2_instance.state, "running")
def test_hard_reboot(self):
@ -117,26 +85,25 @@ class EC2DriverTest(unittest.TestCase):
# we are waiting for the status to actually get to 'Hard Reboot' before
# beginning to wait for it to go to 'Active' status
while instance.status != 'HARD_REBOOT':
time.sleep(5)
self.sleep_if_ec2_not_mocked(5)
instance = self.nova.servers.get(instance.id)
while instance.status != 'ACTIVE':
time.sleep(5)
self.sleep_if_ec2_not_mocked(5)
instance = self.nova.servers.get(instance.id)
#assert restarted
ec2_instance = self.ec2_conn.get_only_instances(instance_ids=[instance.metadata['ec2_id']], filters=None,
dry_run=False, max_results=None)[0]
ec2_instance = self.ec2_conn.get_only_instances(instance_ids=[instance.metadata['ec2_id']])[0]
self.assertEqual(ec2_instance.state, "running")
def test_resize(self):
print "******* Resize Test ***********"
instance, instance_ref = self.spawn_ec2_instance()
ec2_instance = self.ec2_conn.get_only_instances(instance_ids=[instance.metadata['ec2_id']], filters=None,
dry_run=False, max_results=None)[0]
ec2_instance = self.ec2_conn.get_only_instances(instance_ids=[instance.metadata['ec2_id']])[0]
ip_before_resize = self.ec2_conn.get_all_addresses(addresses=instance.metadata['public_ip_address'])[0]
ip_before_resize = ec2_instance.ip_address
self.assertEqual(ec2_instance.instance_type, "t2.micro")
new_flavor = self.nova.flavors.find(name="m1.small")
@ -147,23 +114,25 @@ class EC2DriverTest(unittest.TestCase):
# wait for the status to actually go to Verify_Resize, before
# confirming the resize.
while instance.status != 'VERIFY_RESIZE':
time.sleep(5)
self.sleep_if_ec2_not_mocked(5)
instance = self.nova.servers.get(instance.id)
# Confirm the resize
self.nova.servers.confirm_resize(instance)
while instance.status != 'ACTIVE':
time.sleep(5)
self.sleep_if_ec2_not_mocked(5)
instance = self.nova.servers.get(instance.id)
ec2_instance = self.ec2_conn.get_only_instances(instance_ids=[instance.metadata['ec2_id']], filters=None,
dry_run=False, max_results=None)[0]
ip_after_resize = ec2_instance.ip_address
self.assertEqual(ec2_instance.instance_type, "t2.small")
self.assertEqual(ip_before_resize, ip_after_resize,
"Public IP Address should be same before and after the resize")
ec2_instance = self.ec2_conn.get_only_instances(instance_ids=[instance.metadata['ec2_id']])[0]
# ip_after_resize = ec2_instance.ip_address
ip_after_resize = self.ec2_conn.get_all_addresses(addresses=instance.metadata['public_ip_address'])[0]
self.assertEqual(ec2_instance.instance_type, "t2.small")
self.assertEqual(ip_before_resize.public_ip, ip_after_resize.public_ip)
@unittest.skipIf(os.environ.get('MOCK_EC2'), 'Not supported by moto')
def test_user_data(self):
"""To test the spawn method by providing a file user_data for config drive.
Will bring up a LAMP server.
@ -176,7 +145,7 @@ class EC2DriverTest(unittest.TestCase):
userdata=user_data_content)
instance = self.nova.servers.get(server.id)
while instance.status != 'ACTIVE' and 'ec2_id' not in instance.metadata:
time.sleep(10)
self.sleep_if_ec2_not_mocked(10)
instance = self.nova.servers.get(server.id)
self.servers.append(instance)
@ -188,27 +157,32 @@ class EC2DriverTest(unittest.TestCase):
url = "http://"+ec2_instance[0].ip_address+"/phpinfo.php"
#wait for the instance to downalod all the dependencies for a LAMP server
time.sleep(300)
self.sleep_if_ec2_not_mocked(300)
print url
raw_response = urllib2.urlopen(url)
print raw_response
self.assertEqual(raw_response.code, 200)
@unittest.skipIf(os.environ.get('MOCK_EC2'), 'Not supported by moto')
def test_diagnostics(self):
print "******* Diagnostics Test ***********"
instance, instance_ref = self.spawn_ec2_instance()
print "instance_ref: ", instance_ref
diagnostics = instance.diagnostics()[1]
self.assertEqual(diagnostics['instance.instance_type'], 't2.micro')
self.assertEqual(diagnostics['instance._state'], 'running(16)')
@classmethod
def tearDown(self):
print "Cleanup: Destroying the instance used for testing"
for instance in self.servers:
instance.delete()
@unittest.skipIf(os.environ.get('MOCK_EC2'), 'Not supported by moto')
def test_attach_volume(self):
volume = self.nova_volume.volumes.create(1, snapshot_id=None, display_name='test', display_description=None,
volume_type=None, availability_zone=None, imageRef=None)
self.volumes.append(volume)
instance, instance_ref = self.spawn_ec2_instance()
self.nova.volumes.create_server_volume(instance_ref, volume.id, "/dev/sdb")
self.sleep_if_ec2_not_mocked(30)
volumes = self.nova.volumes.get_server_volumes(instance.id)
self.assertIn(volume, volumes)
if __name__ == '__main__':
unittest.main()