# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright (c) 2010 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ A fake (in-memory) hypervisor+api. Allows nova testing w/o a hypervisor. This module also documents the semantics of real hypervisor connections. """ from oslo.config import cfg from nova.compute import power_state from nova.compute import task_states from nova import db from nova import exception from nova.openstack.common import log as logging from nova.virt import driver from nova.virt import virtapi CONF = cfg.CONF CONF.import_opt('host', 'nova.netconf') LOG = logging.getLogger(__name__) _FAKE_NODES = None def set_nodes(nodes): """Sets FakeDriver's node.list. It has effect on the following methods: get_available_nodes() get_available_resource get_host_stats() To restore the change, call restore_nodes() """ global _FAKE_NODES _FAKE_NODES = nodes def restore_nodes(): """Resets FakeDriver's node list modified by set_nodes(). Usually called from tearDown(). """ global _FAKE_NODES _FAKE_NODES = [CONF.host] class FakeInstance(object): def __init__(self, name, state): self.name = name self.state = state def __getitem__(self, key): return getattr(self, key) class FakeDriver(driver.ComputeDriver): capabilities = { "has_imagecache": True, "supports_recreate": True, } """Fake hypervisor driver.""" def __init__(self, virtapi, read_only=False): super(FakeDriver, self).__init__(virtapi) self.instances = {} self.host_status_base = { 'host_name-description': 'Fake Host', 'host_hostname': CONF.host, 'host_memory_total': 8000000000, 'host_memory_overhead': 10000000, 'host_memory_free': 7900000000, 'host_memory_free_computed': 7900000000, 'host_other_config': {}, 'host_ip_address': '192.168.1.109', 'host_cpu_info': {}, 'disk_available': 500000000000, 'disk_total': 600000000000, 'disk_used': 100000000000, 'host_uuid': 'cedb9b39-9388-41df-8891-c5c9a0c0fe5f', 'host_name_label': 'fake-host', 'hypervisor_hostname': CONF.host, } self._mounts = {} self._interfaces = {} if not _FAKE_NODES: set_nodes([CONF.host]) def init_host(self, host): return def list_instances(self): return self.instances.keys() def plug_vifs(self, instance, network_info): """Plug VIFs into networks.""" pass def unplug_vifs(self, instance, network_info): """Unplug VIFs from networks.""" pass def spawn(self, context, instance, image_meta, injected_files, admin_password, network_info=None, block_device_info=None): name = instance['name'] state = power_state.RUNNING fake_instance = FakeInstance(name, state) self.instances[name] = fake_instance def snapshot(self, context, instance, name, update_task_state): if instance['name'] not in self.instances: raise exception.InstanceNotRunning(instance_id=instance['uuid']) update_task_state(task_state=task_states.IMAGE_UPLOADING) def reboot(self, context, instance, network_info, reboot_type, block_device_info=None, bad_volumes_callback=None): pass @staticmethod def get_host_ip_addr(): return '192.168.0.1' def set_admin_password(self, instance, new_pass): pass def inject_file(self, instance, b64_path, b64_contents): pass def resume_state_on_host_boot(self, context, instance, network_info, block_device_info=None): pass def rescue(self, context, instance, network_info, image_meta, rescue_password): pass def unrescue(self, instance, network_info): pass def poll_rebooting_instances(self, timeout, instances): pass def migrate_disk_and_power_off(self, context, instance, dest, instance_type, network_info, block_device_info=None): pass def finish_revert_migration(self, instance, network_info, block_device_info=None, power_on=True): pass def post_live_migration_at_destination(self, context, instance, network_info, block_migration=False, block_device_info=None): pass def power_off(self, instance): pass def power_on(self, instance): pass def soft_delete(self, instance): pass def restore(self, instance): pass def pause(self, instance): pass def unpause(self, instance): pass def suspend(self, instance): pass def resume(self, instance, network_info, block_device_info=None): pass def destroy(self, instance, network_info, block_device_info=None, destroy_disks=True): key = instance['name'] if key in self.instances: del self.instances[key] else: LOG.warning(_("Key '%(key)s' not in instances '%(inst)s'") % {'key': key, 'inst': self.instances}, instance=instance) def attach_volume(self, connection_info, instance, mountpoint): """Attach the disk to the instance at mountpoint using info.""" instance_name = instance['name'] if instance_name not in self._mounts: self._mounts[instance_name] = {} self._mounts[instance_name][mountpoint] = connection_info return True def detach_volume(self, connection_info, instance, mountpoint): """Detach the disk attached to the instance.""" try: del self._mounts[instance['name']][mountpoint] except KeyError: pass return True def attach_interface(self, instance, image_meta, network_info): for (network, mapping) in network_info: if mapping['vif_uuid'] in self._interfaces: raise exception.InterfaceAttachFailed('duplicate') self._interfaces[mapping['vif_uuid']] = mapping def detach_interface(self, instance, network_info): for (network, mapping) in network_info: try: del self._interfaces[mapping['vif_uuid']] except KeyError: raise exception.InterfaceDetachFailed('not attached') def get_info(self, instance): if instance['name'] not in self.instances: raise exception.InstanceNotFound(instance_id=instance['name']) i = self.instances[instance['name']] return {'state': i.state, 'max_mem': 0, 'mem': 0, 'num_cpu': 2, 'cpu_time': 0} def get_diagnostics(self, instance_name): return {'cpu0_time': 17300000000, 'memory': 524288, 'vda_errors': -1, 'vda_read': 262144, 'vda_read_req': 112, 'vda_write': 5778432, 'vda_write_req': 488, 'vnet1_rx': 2070139, 'vnet1_rx_drop': 0, 'vnet1_rx_errors': 0, 'vnet1_rx_packets': 26701, 'vnet1_tx': 140208, 'vnet1_tx_drop': 0, 'vnet1_tx_errors': 0, 'vnet1_tx_packets': 662, } def get_all_bw_counters(self, instances): """Return bandwidth usage counters for each interface on each running VM""" bw = [] return bw def get_all_volume_usage(self, context, compute_host_bdms): """Return usage info for volumes attached to vms on a given host""" volusage = [] return volusage def block_stats(self, instance_name, disk_id): return [0L, 0L, 0L, 0L, None] def interface_stats(self, instance_name, iface_id): return [0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L] def get_console_output(self, instance): return 'FAKE CONSOLE OUTPUT\nANOTHER\nLAST LINE' def get_vnc_console(self, instance): return {'internal_access_path': 'FAKE', 'host': 'fakevncconsole.com', 'port': 6969} def get_spice_console(self, instance): return {'internal_access_path': 'FAKE', 'host': 'fakespiceconsole.com', 'port': 6969, 'tlsPort': 6970} def get_console_pool_info(self, console_type): return {'address': '127.0.0.1', 'username': 'fakeuser', 'password': 'fakepassword'} def refresh_security_group_rules(self, security_group_id): return True def refresh_security_group_members(self, security_group_id): return True def refresh_instance_security_rules(self, instance): return True def refresh_provider_fw_rules(self): pass def get_available_resource(self, nodename): """Updates compute manager resource info on ComputeNode table. Since we don't have a real hypervisor, pretend we have lots of disk and ram. """ if nodename not in _FAKE_NODES: return {} dic = {'vcpus': 1, 'memory_mb': 8192, 'local_gb': 1028, 'vcpus_used': 0, 'memory_mb_used': 0, 'local_gb_used': 0, 'hypervisor_type': 'fake', 'hypervisor_version': '1.0', 'hypervisor_hostname': nodename, 'cpu_info': '?'} return dic def ensure_filtering_rules_for_instance(self, instance_ref, network_info): """This method is supported only by libvirt.""" raise NotImplementedError('This method is supported only by libvirt.') def get_instance_disk_info(self, instance_name): return def live_migration(self, context, instance_ref, dest, post_method, recover_method, block_migration=False, migrate_data=None): return def check_can_live_migrate_destination_cleanup(self, ctxt, dest_check_data): return def check_can_live_migrate_destination(self, ctxt, instance_ref, src_compute_info, dst_compute_info, block_migration=False, disk_over_commit=False): return {} def check_can_live_migrate_source(self, ctxt, instance_ref, dest_check_data): return def finish_migration(self, context, migration, instance, disk_info, network_info, image_meta, resize_instance, block_device_info=None, power_on=True): return def confirm_migration(self, migration, instance, network_info): return def pre_live_migration(self, context, instance_ref, block_device_info, network_info, migrate_data=None): return def unfilter_instance(self, instance_ref, network_info): """This method is supported only by libvirt.""" raise NotImplementedError('This method is supported only by libvirt.') def test_remove_vm(self, instance_name): """Removes the named VM, as if it crashed. For testing.""" self.instances.pop(instance_name) def get_host_stats(self, refresh=False): """Return fake Host Status of ram, disk, network.""" stats = [] for nodename in _FAKE_NODES: host_status = self.host_status_base.copy() host_status['hypervisor_hostname'] = nodename host_status['host_hostname'] = nodename host_status['host_name_label'] = nodename stats.append(host_status) if len(stats) == 0: raise exception.NovaException("FakeDriver has no node") elif len(stats) == 1: return stats[0] else: return stats def host_power_action(self, host, action): """Reboots, shuts down or powers up the host.""" return action def host_maintenance_mode(self, host, mode): """Start/Stop host maintenance window. On start, it triggers guest VMs evacuation.""" if not mode: return 'off_maintenance' return 'on_maintenance' def set_host_enabled(self, host, enabled): """Sets the specified host's ability to accept new instances.""" if enabled: return 'enabled' return 'disabled' def get_disk_available_least(self): pass def get_volume_connector(self, instance): return {'ip': '127.0.0.1', 'initiator': 'fake', 'host': 'fakehost'} def get_available_nodes(self): return _FAKE_NODES def instance_on_disk(self, instance): return False def list_instance_uuids(self): return [] def legacy_nwinfo(self): return True class FakeVirtAPI(virtapi.VirtAPI): def instance_update(self, context, instance_uuid, updates): return db.instance_update_and_get_original(context, instance_uuid, updates) def aggregate_get_by_host(self, context, host, key=None): return db.aggregate_get_by_host(context, host, key=key) def aggregate_metadata_add(self, context, aggregate, metadata, set_delete=False): return db.aggregate_metadata_add(context, aggregate['id'], metadata, set_delete=set_delete) def aggregate_metadata_delete(self, context, aggregate, key): return db.aggregate_metadata_delete(context, aggregate['id'], key) def security_group_get_by_instance(self, context, instance): return db.security_group_get_by_instance(context, instance['id']) def security_group_rule_get_by_security_group(self, context, security_group): return db.security_group_rule_get_by_security_group( context, security_group['id']) def provider_fw_rule_get_all(self, context): return db.provider_fw_rule_get_all(context) def agent_build_get_by_triple(self, context, hypervisor, os, architecture): return db.agent_build_get_by_triple(context, hypervisor, os, architecture) def instance_type_get(self, context, instance_type_id): return db.instance_type_get(context, instance_type_id)