Venu | get host stats partial work

This commit is contained in:
new 2014-10-14 13:55:37 +05:30
commit 39657905b2
3 changed files with 157 additions and 76 deletions

View File

@ -22,7 +22,10 @@ Using the native OpenStack Dashboard or APIs you would be able to manage the EC2
1. Clone this repository: `git clone https://github.com/ThoughtWorksInc/OpenStack-EC2-Driver.git`
2. Run`vagrant up` from within the repository to create an Ubuntu virtualbox that will install devstack. This will take a couple minutes.
3. `vagrant ssh` to ssh into the new machine
4. Use `vim /etc/nova/nova.conf` to edit the nova configuration so that the compute_driver is set to ec2.EC2Driver
4. Use `vim /etc/nova/nova.conf` to edit the nova configuration so that
- the compute_driver is set to ec2.EC2Driver
- under the [conductor] section, add the following line
use_local = True
5. Restart nova
- `~/devstack/rejoin-stack.sh`
- go to the nova-cpu screen (`ctrl+a`, `6`)

View File

@ -14,8 +14,9 @@
# under the License.
"""Connection to the Amazon Web Services - EC2 service"""
from boto import ec2
import boto.ec2.cloudwatch
from boto import exception as boto_exc
from ec2driver_config import *
from oslo.config import cfg
@ -25,17 +26,27 @@ from nova.compute import power_state
from nova.compute import task_states
from nova import db
from nova import exception
from nova.image import glance
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import loopingcall
from nova.virt import driver
from nova.virt import virtapi
from nova.compute import flavors
from nova.compute import utils as compute_utils
import base64
from novaclient.v1_1 import client
from credentials import get_nova_creds
LOG = logging.getLogger(__name__)
ec2driver_opts = [
cfg.StrOpt('snapshot_image_format',
help='Snapshot image format (valid options are : '
'raw, qcow2, vmdk, vdi). '
'Defaults to same as source image'),
cfg.StrOpt('datastore_regex',
help='Regex to match the name of a datastore.'),
cfg.FloatOpt('task_poll_interval',
@ -61,7 +72,12 @@ CONF.register_opts(ec2driver_opts, 'ec2driver')
TIME_BETWEEN_API_CALL_RETRIES = 1.0
<<<<<<< HEAD
EC2_STATE_MAP = {"pending": power_state.NOSTATE,
=======
EC2_STATE_MAP = {
"pending" : power_state.BUILDING,
>>>>>>> FETCH_HEAD
"running" : power_state.RUNNING,
"shutting-down" : power_state.NOSTATE,
"terminated" : power_state.SHUTDOWN,
@ -69,6 +85,8 @@ EC2_STATE_MAP = {"pending": power_state.NOSTATE,
"stopped" : power_state.SHUTDOWN
}
DIAGNOSTIC_KEYS_TO_FILTER = ['group', 'block_device_mapping']
def set_nodes(nodes):
"""Sets EC2Driver's node.list.
@ -117,9 +135,14 @@ class EC2Driver(driver.ComputeDriver):
self._mounts = {}
self._interfaces = {}
# To connect to EC2
self.creds = get_nova_creds()
self.nova = client.Client(**self.creds)
# To connect to EC2
self.ec2_conn = ec2.connect_to_region(
aws_region, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key)
self.cloudwatch_conn = ec2.cloudwatch.connect_to_region(
aws_region, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key)
self.reservation = self.ec2_conn.get_all_reservations()
@ -144,45 +167,6 @@ class EC2Driver(driver.ComputeDriver):
"""Unplug VIFs from networks."""
pass
def _wait_for_state(self, instance, ec2_id, desired_state, desired_power_state):
def _wait_for_power_state():
"""Called at an interval until the VM is running again."""
ec2_instance = self.ec2_conn.get_only_instances(instance_ids=[ec2_id])
state = ec2_instance[0].state
if state == desired_state:
LOG.info("Instance has changed state to %s." % desired_state)
raise loopingcall.LoopingCallDone()
def _wait_for_status_check():
ec2_instance = self.ec2_conn.get_all_instance_status(instance_ids=[ec2_id])[0]
if ec2_instance.system_status.status == 'ok':
LOG.info("Instance status check is %s / %s" %
(ec2_instance.system_status.status, ec2_instance.instance_status.status))
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_power_state)
timer.start(interval=1).wait()
if desired_state == 'running':
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_status_check)
timer.start(interval=0.5).wait()
def _wait_for_image_state(self, ami_id, desired_state):
# Timer to wait for the iamge to reach a state
def _wait_for_state():
"""Called at an interval until the AMI image is available."""
images = self.ec2_conn.get_all_images(image_ids=[ami_id], owners=None,
executable_by=None, filters=None, dry_run=None)
state = images[0].state
# LOG.info("\n\n\nImage id = %s" % ami_id + ", state = %s\n\n\n" % state)
if state == desired_state:
LOG.info("Image has changed state to %s." % desired_state)
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_state)
timer.start(interval=0.5).wait()
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
LOG.info("***** Calling SPAWN *******************")
@ -204,18 +188,19 @@ class EC2Driver(driver.ComputeDriver):
ec2_id = ec2_instance[0].id
self._wait_for_state(instance, ec2_id, "running", power_state.RUNNING)
instance['metadata'].update({'ec2_id':ec2_instance[0].id, 'public_ip_address':elastic_ip_address.public_ip})
instance['metadata'].update({'ec2_id':ec2_id, 'public_ip_address':elastic_ip_address.public_ip})
LOG.info("****** Associating the elastic IP to the instance *********")
self.ec2_conn.associate_address(instance_id=ec2_id, allocation_id=elastic_ip_address.allocation_id)
def snapshot(self, context, instance, name, update_task_state):
def snapshot(self, context, instance, image_id, update_task_state):
"""
Snapshot an image on EC2 and create an Image which gets stored in AMI (internally in EBS Snapshot)
"""
LOG.info("***** Calling SNAPSHOT *******************")
if(instance['metadata']['ec2_id'] is None):
raise exception.InstanceNotRunning(instance_id=instance['uuid'])
@ -232,13 +217,29 @@ class EC2Driver(driver.ComputeDriver):
instance_ids=[ec2_id], filters=None, dry_run=False, max_results=None)
ec2_instance = ec_instance_info[0]
if ec2_instance.state == 'running':
image_id = ec2_instance.create_image(name=str(
name), description="Image from OpenStack", no_reboot=False, dry_run=False)
LOG.info("Image has been created state to %s." % image_id)
ec2_image_id = ec2_instance.create_image(name=str(
image_id), description="Image from OpenStack", no_reboot=False, dry_run=False)
LOG.info("Image has been created state to %s." % ec2_image_id)
# The instance will be in pending state when it comes up, waiting for
# it to be in available
self._wait_for_image_state(image_id, "available")
# TODO we need to fix the queueing issue in the images
self._wait_for_image_state(ec2_image_id, "available")
image_api = glance.get_default_image_service()
image_ref = glance.generate_image_url(image_id)
metadata = {'is_public': False,
# 'checksum': '4eada48c2843d2a262c814ddc92ecf2c', #Hard-coded value for now
'location': image_ref,
'properties': {
'kernel_id': instance['kernel_id'],
'image_state': 'available',
'owner_id': instance['project_id'],
'ramdisk_id': instance['ramdisk_id'],
'ec2_image_id': ec2_image_id
}
}
image_api.update(context, image_id, metadata)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
@ -338,10 +339,13 @@ class EC2Driver(driver.ComputeDriver):
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None):
LOG.info("***** Calling DESTROY *******************")
if 'ec2_id' not in instance['metadata']:
LOG.warning(_("Key '%s' not in EC2 instances") % instance['name'], instance=instance)
return
elif 'public_ip' not in instance['metadata'] and 'public_ip_address' not in instance['metadata']:
print instance['metadata']
LOG.warning(_("Public IP is null"), instance=instance)
return
else:
# Deleting the instance from EC2
ec2_id = instance['metadata']['ec2_id']
@ -406,34 +410,55 @@ class EC2Driver(driver.ComputeDriver):
raise exception.InterfaceDetachFailed('not attached')
def get_info(self, instance):
if(instance['metadata'] is None or instance['metadata']['ec2_id'] is None):
LOG.info("*************** GET INFO ********************")
if 'metadata' not in instance or 'ec2_id' not in instance['metadata']:
raise exception.InstanceNotFound(instance_id=instance['name'])
ec2_id = instance['metadata']['ec2_id']
ec2_instance = self.ec2_conn.get_only_instances(instance_ids=[ec2_id], filters=None, dry_run=False, max_results=None)[0]
ec2_instances = self.ec2_conn.get_only_instances(instance_ids=[ec2_id], filters=None, dry_run=False, max_results=None)
if ec2_instances.__len__() == 0:
LOG.warning(_("EC2 instance with ID %s not found") % ec2_id, instance=instance)
raise exception.InstanceNotFound(instance_id=instance['name'])
ec2_instance = ec2_instances[0]
return {'state': EC2_STATE_MAP.get(ec2_instance.state),
'max_mem': 0,
'mem': 0,
'num_cpu': 2,
'cpu_time': 0}
def allow_key(self, key):
for key_to_filter in DIAGNOSTIC_KEYS_TO_FILTER:
if key == key_to_filter:
return False
return True
def get_diagnostics(self, instance_name):
return {'cpu0_time': 17300000000,
'memory': 524288,
'vda_errors': -1,
'vda_read': 262144,
'vda_read_req': 112,
'vda_write': 5778432,
'vda_write_req': 488,
'vnet1_rx': 2070139,
'vnet1_rx_drop': 0,
'vnet1_rx_errors': 0,
'vnet1_rx_packets': 26701,
'vnet1_tx': 140208,
'vnet1_tx_drop': 0,
'vnet1_tx_errors': 0,
'vnet1_tx_packets': 662,
}
LOG.info("******* GET DIAGNOSTICS *********************************************")
instance = self.nova.servers.get(instance_name)
ec2_id = instance.metadata['ec2_id']
ec2_instances = self.ec2_conn.get_only_instances(instance_ids=[ec2_id], filters=None, dry_run=False, max_results=None)
if ec2_instances.__len__() == 0:
LOG.warning(_("EC2 instance with ID %s not found") % ec2_id, instance=instance)
raise exception.InstanceNotFound(instance_id=instance['name'])
ec2_instance = ec2_instances[0]
diagnostics = {}
for key, value in ec2_instance.__dict__.items() :
if self.allow_key(key):
diagnostics['instance.' + key] = str(value)
metrics = self.cloudwatch_conn.list_metrics(dimensions={'InstanceId': ec2_id})
import datetime
for metric in metrics:
end = datetime.datetime.utcnow()
start = end - datetime.timedelta(hours=1)
details = metric.query(start, end, 'Average', None, 3600)
if (len(details) > 0):
diagnostics['metrics.' + str(metric)] = details[0]
return diagnostics
def get_all_bw_counters(self, instances):
"""Return bandwidth usage counters for each interface on each
@ -628,6 +653,49 @@ class EC2Driver(driver.ComputeDriver):
def list_instance_uuids(self):
return []
def _wait_for_state(self, instance, ec2_id, desired_state, desired_power_state):
def _wait_for_power_state():
"""Called at an interval until the VM is running again."""
ec2_instance = self.ec2_conn.get_only_instances(instance_ids=[ec2_id])
state = ec2_instance[0].state
if state == desired_state:
LOG.info("Instance has changed state to %s." % desired_state)
raise loopingcall.LoopingCallDone()
def _wait_for_status_check():
ec2_instance = self.ec2_conn.get_all_instance_status(instance_ids=[ec2_id])[0]
if ec2_instance.system_status.status == 'ok':
LOG.info("Instance status check is %s / %s" %
(ec2_instance.system_status.status, ec2_instance.instance_status.status))
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_power_state)
timer.start(interval=1).wait()
if desired_state == 'running':
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_status_check)
timer.start(interval=0.5).wait()
def _wait_for_image_state(self, ami_id, desired_state):
# Timer to wait for the iamge to reach a state
def _wait_for_state():
"""Called at an interval until the AMI image is available."""
try:
images = self.ec2_conn.get_all_images(image_ids=[ami_id], owners=None,
executable_by=None, filters=None, dry_run=None)
state = images[0].state
# LOG.info("\n\n\nImage id = %s" % ami_id + ", state = %s\n\n\n" % state)
if state == desired_state:
LOG.info("Image has changed state to %s." % desired_state)
raise loopingcall.LoopingCallDone()
except boto_exc.EC2ResponseError:
pass
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_state)
timer.start(interval=0.5).wait()
class EC2VirtAPI(virtapi.VirtAPI):

View File

@ -31,11 +31,11 @@ class EC2DriverTest(unittest.TestCase):
time.sleep(10)
instance = self.nova.servers.get(server.id)
self.servers.append(instance)
return instance
return instance, server.id
def test_spawn(self):
print "******* Spawn Test ***********"
instance = self.spawn_ec2_instance()
instance, instance_ref = self.spawn_ec2_instance()
ec2_instance = self.ec2_conn.get_only_instances(instance_ids=[instance.metadata['ec2_id']], filters=None,
dry_run=False, max_results=None)
@ -45,7 +45,7 @@ class EC2DriverTest(unittest.TestCase):
def test_destroy(self):
print "******* Destroy Test ***********"
instance = self.spawn_ec2_instance()
instance, instance_ref = self.spawn_ec2_instance()
ec2_id = instance.metadata['ec2_id']
@ -73,7 +73,7 @@ class EC2DriverTest(unittest.TestCase):
def test_power_off(self):
print "******* Power Off Test ***********"
instance = self.spawn_ec2_instance()
instance, instance_ref = self.spawn_ec2_instance()
# Send poweroff to the instance
self.nova.servers.stop(instance)
@ -88,7 +88,7 @@ class EC2DriverTest(unittest.TestCase):
def test_soft_reboot(self):
print "******* Soft Reboot Test ***********"
instance = self.spawn_ec2_instance()
instance, instance_ref = self.spawn_ec2_instance()
# Send reboot to the instance with reboot_type = 'soft'
self.nova.servers.reboot(instance, client.servers.REBOOT_SOFT)
@ -110,7 +110,7 @@ class EC2DriverTest(unittest.TestCase):
def test_hard_reboot(self):
print "******* Hard Reboot Test ***********"
instance = self.spawn_ec2_instance()
instance, instance_ref = self.spawn_ec2_instance()
# Send reboot to the instance with reboot_type = 'soft'
self.nova.servers.reboot(instance, client.servers.REBOOT_HARD)
@ -131,7 +131,7 @@ class EC2DriverTest(unittest.TestCase):
def test_resize(self):
print "******* Resize Test ***********"
instance = self.spawn_ec2_instance()
instance, instance_ref = self.spawn_ec2_instance()
ec2_instance = self.ec2_conn.get_only_instances(instance_ids=[instance.metadata['ec2_id']], filters=None,
dry_run=False, max_results=None)[0]
@ -175,7 +175,7 @@ class EC2DriverTest(unittest.TestCase):
server = self.nova.servers.create(name="cirros-test", image=image.id, flavor=flavor.id,
userdata=user_data_content)
instance = self.nova.servers.get(server.id)
while instance.status != 'ACTIVE':
while instance.status != 'ACTIVE' and 'ec2_id' not in instance.metadata:
time.sleep(10)
instance = self.nova.servers.get(server.id)
self.servers.append(instance)
@ -194,6 +194,16 @@ class EC2DriverTest(unittest.TestCase):
print raw_response
self.assertEqual(raw_response.code, 200)
def test_diagnostics(self):
print "******* Diagnostics Test ***********"
instance, instance_ref = self.spawn_ec2_instance()
print "instance_ref: ", instance_ref
diagnostics = instance.diagnostics()[1]
self.assertEqual(diagnostics['instance.instance_type'], 't2.micro')
self.assertEqual(diagnostics['instance._state'], 'running(16)')
@classmethod
def tearDown(self):
print "Cleanup: Destroying the instance used for testing"