From 4aaef726dc1598334d5cd9cc7853e301ff77d284 Mon Sep 17 00:00:00 2001 From: joey5678 Date: Mon, 29 Dec 2014 11:38:57 +0800 Subject: [PATCH] Remove instance_mapping_uuid_patch Because of the change in mapping relationship between cascading and cascaded, the instance_mapping_uuid_patch is no need exists. Change-Id: I72606bb8f1d7238a53e0e5caae0671fa4ed105fa --- .../instance_mapping_uuid_patch/README.md | 63 - .../installation/install.sh | 91 - .../installation/uninstall.sh | 19 - .../nova/conductor/manager.py | 759 --------- ...255_add_mapping_uuid_column_to_instance.py | 20 - .../nova/db/sqlalchemy/models.py | 1417 ---------------- .../nova/network/neutronv2/api.py | 1496 ----------------- .../nova/objects/instance.py | 802 --------- 8 files changed, 4667 deletions(-) delete mode 100644 juno-patches/nova/instance_mapping_uuid_patch/README.md delete mode 100644 juno-patches/nova/instance_mapping_uuid_patch/installation/install.sh delete mode 100644 juno-patches/nova/instance_mapping_uuid_patch/installation/uninstall.sh delete mode 100644 juno-patches/nova/instance_mapping_uuid_patch/nova/conductor/manager.py delete mode 100644 juno-patches/nova/instance_mapping_uuid_patch/nova/db/sqlalchemy/migrate_repo/versions/255_add_mapping_uuid_column_to_instance.py delete mode 100644 juno-patches/nova/instance_mapping_uuid_patch/nova/db/sqlalchemy/models.py delete mode 100644 juno-patches/nova/instance_mapping_uuid_patch/nova/network/neutronv2/api.py delete mode 100644 juno-patches/nova/instance_mapping_uuid_patch/nova/objects/instance.py diff --git a/juno-patches/nova/instance_mapping_uuid_patch/README.md b/juno-patches/nova/instance_mapping_uuid_patch/README.md deleted file mode 100644 index adbcfb0..0000000 --- a/juno-patches/nova/instance_mapping_uuid_patch/README.md +++ /dev/null @@ -1,63 +0,0 @@ -Nova instance mapping_uuid patch -=============================== -add instance mapping_uuid attribute patch,it will be patched in cascading level's control node - -How can we manage the servers in cascading level? To solve this problem,nova proxy must can get relation of cascading and cascaded server.So we can do this through adding instance attribute mapping_uuid - -Key modules ------------ - -* adding mapping_uuid column in nova instance table,when nova synchronizes db: - - nova\db\sqlalchemy\migrate_repo\versions\234_add_mapping_uuid_column_to_instance.py - nova\db\sqlalchemy\models.py - nova-2014.1\nova\objects\instance.py - nova\network\neutronv2\api.py - -* allowing nova proxy update instance mapping_uuid through conductor - nova\conductor\manager.py - -Requirements ------------- -* openstack of juno-version has been installed - -Installation ------------- - -We provide two ways to install the instance_mapping_uuid patch code. In this section, we will guide you through installing the instance_mapping_uuid patch. - -* **Note:** - - - Make sure you have an existing installation of **Openstack Juno**. - - We recommend that you Do backup at least the following files before installation, because they are to be overwritten or modified: - -* **Manual Installation** - - - Make sure you have performed backups properly. - - - Navigate to the local repository and copy the contents in 'nova' sub-directory to the corresponding places in existing nova, e.g. - ```cp -r $LOCAL_REPOSITORY_DIR/nova $NOVA_PARENT_DIR``` - (replace the $... with actual directory name.) - - - synchronize the nova db. - ``` - mysql -u root -p$MYSQL_PASS -e "DROP DATABASE if exists nova; - CREATE DATABASE nova; - GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY '$NOVA_PASSWORD'; - GRANT ALL PRIVILEGES ON *.* TO 'nova'@'%'IDENTIFIED BY '$NOVA_PASSWORD'; - nova-manage db sync - ``` - - - Done. The nova proxy should be working with a demo configuration. - -* **Automatic Installation** - - - Make sure you have performed backups properly. - - - Navigate to the installation directory and run installation script. - ``` - cd $LOCAL_REPOSITORY_DIR/installation - sudo bash ./install.sh - ``` - (replace the $... with actual directory name.) - diff --git a/juno-patches/nova/instance_mapping_uuid_patch/installation/install.sh b/juno-patches/nova/instance_mapping_uuid_patch/installation/install.sh deleted file mode 100644 index b8c58b3..0000000 --- a/juno-patches/nova/instance_mapping_uuid_patch/installation/install.sh +++ /dev/null @@ -1,91 +0,0 @@ -#!/bin/bash - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# Copyright (c) 2014 Huawei Technologies. - -_MYSQL_PASS="1234" -_NOVA_INSTALL="/usr/lib64/python2.6/site-packages" -_NOVA_DIR="${_NOVA_INSTALL}/nova" -# if you did not make changes to the installation files, -# please do not edit the following directories. -_CODE_DIR="../nova" -_BACKUP_DIR="${_NOVA_INSTALL}/.instance_mapping_uuid_patch-installation-backup" - -_SCRIPT_LOGFILE="/var/log/instance_mapping_uuid_patch/installation/install.log" - -function log() -{ - log_path=`dirname ${_SCRIPT_LOGFILE}` - if [ ! -d $log_path ] ; then - mkdir -p $log_path - fi - echo "$@" - echo "`date -u +'%Y-%m-%d %T.%N'`: $@" >> $_SCRIPT_LOGFILE -} - -if [[ ${EUID} -ne 0 ]]; then - log "Please run as root." - exit 1 -fi - - -cd `dirname $0` - -log "checking installation directories..." -if [ ! -d "${_NOVA_DIR}" ] ; then - log "Could not find the nova installation. Please check the variables in the beginning of the script." - log "aborted." - exit 1 -fi - -log "checking previous installation..." -if [ -d "${_BACKUP_DIR}/nova" ] ; then - log "It seems nova-proxy has already been installed!" - log "Please check README for solution if this is not true." - exit 1 -fi - -log "backing up current files that might be overwritten..." -cp -r "${_NOVA_DIR}/" "${_BACKUP_DIR}/" -if [ $? -ne 0 ] ; then - rm -r "${_BACKUP_DIR}/nova" - echo "Error in code backup, aborted." - exit 1 -fi - -log "copying in new files..." -cp -r "${_CODE_DIR}" `dirname ${_NOVA_DIR}` -if [ $? -ne 0 ] ; then - log "Error in copying, aborted." - log "Recovering original files..." - cp -r "${_BACKUP_DIR}/nova" `dirname ${_NOVA_DIR}` && rm -r "${_BACKUP_DIR}/nova" - if [ $? -ne 0 ] ; then - log "Recovering failed! Please install manually." - fi - exit 1 -fi - -log "syc nova db..." -mysql -u root -p$_MYSQL_PASS -e "DROP DATABASE if exists nova;CREATE DATABASE nova;GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'Galax8800';GRANT ALL PRIVILEGES ON *.* TO 'nova'@'%'IDENTIFIED BY 'Galax8800';" - -nova-manage db sync - -if [ $? -ne 0 ] ; then - log "There was an error in restarting the service, please restart nova scheduler manually." - exit 1 -fi - -log "Completed." -log "See README to get started." -exit 0 - diff --git a/juno-patches/nova/instance_mapping_uuid_patch/installation/uninstall.sh b/juno-patches/nova/instance_mapping_uuid_patch/installation/uninstall.sh deleted file mode 100644 index f770fd9..0000000 --- a/juno-patches/nova/instance_mapping_uuid_patch/installation/uninstall.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# Copyright (c) 2014 Huawei Technologies. - - -# The uninstallation script don't had been realization, -# it will be supplied if needed. -exit 1 \ No newline at end of file diff --git a/juno-patches/nova/instance_mapping_uuid_patch/nova/conductor/manager.py b/juno-patches/nova/instance_mapping_uuid_patch/nova/conductor/manager.py deleted file mode 100644 index 480414b..0000000 --- a/juno-patches/nova/instance_mapping_uuid_patch/nova/conductor/manager.py +++ /dev/null @@ -1,759 +0,0 @@ -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Handles database requests from other nova services.""" - -import copy -import itertools - -from oslo import messaging -import six - -from nova.api.ec2 import ec2utils -from nova import block_device -from nova.cells import rpcapi as cells_rpcapi -from nova.compute import api as compute_api -from nova.compute import rpcapi as compute_rpcapi -from nova.compute import task_states -from nova.compute import utils as compute_utils -from nova.compute import vm_states -from nova.conductor.tasks import live_migrate -from nova.db import base -from nova import exception -from nova.i18n import _ -from nova import image -from nova import manager -from nova import network -from nova.network.security_group import openstack_driver -from nova import notifications -from nova import objects -from nova.objects import base as nova_object -from nova.openstack.common import excutils -from nova.openstack.common import jsonutils -from nova.openstack.common import log as logging -from nova.openstack.common import timeutils -from nova import quota -from nova.scheduler import client as scheduler_client -from nova.scheduler import driver as scheduler_driver -from nova.scheduler import utils as scheduler_utils - -LOG = logging.getLogger(__name__) - -# Instead of having a huge list of arguments to instance_update(), we just -# accept a dict of fields to update and use this whitelist to validate it. -allowed_updates = ['task_state', 'vm_state', 'expected_task_state', - 'power_state', 'access_ip_v4', 'access_ip_v6', - 'launched_at', 'terminated_at', 'host', 'node', - 'memory_mb', 'vcpus', 'root_gb', 'ephemeral_gb', - 'instance_type_id', 'root_device_name', 'launched_on', - 'progress', 'vm_mode', 'default_ephemeral_device', - 'default_swap_device', 'root_device_name', - 'system_metadata', 'updated_at', 'mapping_uuid' - ] - -# Fields that we want to convert back into a datetime object. -datetime_fields = ['launched_at', 'terminated_at', 'updated_at'] - - -class ConductorManager(manager.Manager): - """Mission: Conduct things. - - The methods in the base API for nova-conductor are various proxy operations - performed on behalf of the nova-compute service running on compute nodes. - Compute nodes are not allowed to directly access the database, so this set - of methods allows them to get specific work done without locally accessing - the database. - - The nova-conductor service also exposes an API in the 'compute_task' - namespace. See the ComputeTaskManager class for details. - """ - - target = messaging.Target(version='2.0') - - def __init__(self, *args, **kwargs): - super(ConductorManager, self).__init__(service_name='conductor', - *args, **kwargs) - self.security_group_api = ( - openstack_driver.get_openstack_security_group_driver()) - self._network_api = None - self._compute_api = None - self.compute_task_mgr = ComputeTaskManager() - self.cells_rpcapi = cells_rpcapi.CellsAPI() - self.additional_endpoints.append(self.compute_task_mgr) - - @property - def network_api(self): - # NOTE(danms): We need to instantiate our network_api on first use - # to avoid the circular dependency that exists between our init - # and network_api's - if self._network_api is None: - self._network_api = network.API() - return self._network_api - - @property - def compute_api(self): - if self._compute_api is None: - self._compute_api = compute_api.API() - return self._compute_api - - def ping(self, context, arg): - # NOTE(russellb) This method can be removed in 2.0 of this API. It is - # now a part of the base rpc API. - return jsonutils.to_primitive({'service': 'conductor', 'arg': arg}) - - @messaging.expected_exceptions(KeyError, ValueError, - exception.InvalidUUID, - exception.InstanceNotFound, - exception.UnexpectedTaskStateError) - def instance_update(self, context, instance_uuid, - updates, service): - for key, value in updates.iteritems(): - if key not in allowed_updates: - LOG.error(_("Instance update attempted for " - "'%(key)s' on %(instance_uuid)s"), - {'key': key, 'instance_uuid': instance_uuid}) - raise KeyError("unexpected update keyword '%s'" % key) - if key in datetime_fields and isinstance(value, six.string_types): - updates[key] = timeutils.parse_strtime(value) - - old_ref, instance_ref = self.db.instance_update_and_get_original( - context, instance_uuid, updates) - notifications.send_update(context, old_ref, instance_ref, service) - return jsonutils.to_primitive(instance_ref) - - @messaging.expected_exceptions(exception.InstanceNotFound) - def instance_get_by_uuid(self, context, instance_uuid, - columns_to_join): - return jsonutils.to_primitive( - self.db.instance_get_by_uuid(context, instance_uuid, - columns_to_join)) - - def instance_get_all_by_host(self, context, host, node, - columns_to_join): - if node is not None: - result = self.db.instance_get_all_by_host_and_node( - context.elevated(), host, node) - else: - result = self.db.instance_get_all_by_host(context.elevated(), host, - columns_to_join) - return jsonutils.to_primitive(result) - - def migration_get_in_progress_by_host_and_node(self, context, - host, node): - migrations = self.db.migration_get_in_progress_by_host_and_node( - context, host, node) - return jsonutils.to_primitive(migrations) - - @messaging.expected_exceptions(exception.AggregateHostExists) - def aggregate_host_add(self, context, aggregate, host): - host_ref = self.db.aggregate_host_add(context.elevated(), - aggregate['id'], host) - - return jsonutils.to_primitive(host_ref) - - @messaging.expected_exceptions(exception.AggregateHostNotFound) - def aggregate_host_delete(self, context, aggregate, host): - self.db.aggregate_host_delete(context.elevated(), - aggregate['id'], host) - - def aggregate_metadata_get_by_host(self, context, host, - key='availability_zone'): - result = self.db.aggregate_metadata_get_by_host(context, host, key) - return jsonutils.to_primitive(result) - - def bw_usage_update(self, context, uuid, mac, start_period, - bw_in, bw_out, last_ctr_in, last_ctr_out, - last_refreshed, update_cells): - if [bw_in, bw_out, last_ctr_in, last_ctr_out].count(None) != 4: - self.db.bw_usage_update(context, uuid, mac, start_period, - bw_in, bw_out, last_ctr_in, last_ctr_out, - last_refreshed, - update_cells=update_cells) - usage = self.db.bw_usage_get(context, uuid, start_period, mac) - return jsonutils.to_primitive(usage) - - def provider_fw_rule_get_all(self, context): - rules = self.db.provider_fw_rule_get_all(context) - return jsonutils.to_primitive(rules) - - # NOTE(danms): This can be removed in version 3.0 of the RPC API - def agent_build_get_by_triple(self, context, hypervisor, os, architecture): - info = self.db.agent_build_get_by_triple(context, hypervisor, os, - architecture) - return jsonutils.to_primitive(info) - - def block_device_mapping_update_or_create(self, context, values, create): - if create is None: - bdm = self.db.block_device_mapping_update_or_create(context, - values) - elif create is True: - bdm = self.db.block_device_mapping_create(context, values) - else: - bdm = self.db.block_device_mapping_update(context, - values['id'], - values) - bdm_obj = objects.BlockDeviceMapping._from_db_object( - context, objects.BlockDeviceMapping(), bdm) - self.cells_rpcapi.bdm_update_or_create_at_top(context, bdm_obj, - create=create) - - def block_device_mapping_get_all_by_instance(self, context, instance, - legacy): - bdms = self.db.block_device_mapping_get_all_by_instance( - context, instance['uuid']) - if legacy: - bdms = block_device.legacy_mapping(bdms) - return jsonutils.to_primitive(bdms) - - def instance_get_all_by_filters(self, context, filters, sort_key, - sort_dir, columns_to_join, - use_slave): - result = self.db.instance_get_all_by_filters( - context, filters, sort_key, sort_dir, - columns_to_join=columns_to_join, use_slave=use_slave) - return jsonutils.to_primitive(result) - - def instance_get_active_by_window(self, context, begin, end, - project_id, host): - # Unused, but cannot remove until major RPC version bump - result = self.db.instance_get_active_by_window(context, begin, end, - project_id, host) - return jsonutils.to_primitive(result) - - def instance_get_active_by_window_joined(self, context, begin, end, - project_id, host): - result = self.db.instance_get_active_by_window_joined( - context, begin, end, project_id, host) - return jsonutils.to_primitive(result) - - def instance_destroy(self, context, instance): - result = self.db.instance_destroy(context, instance['uuid']) - return jsonutils.to_primitive(result) - - def instance_fault_create(self, context, values): - result = self.db.instance_fault_create(context, values) - return jsonutils.to_primitive(result) - - # NOTE(kerrin): The last_refreshed argument is unused by this method - # and can be removed in v3.0 of the RPC API. - def vol_usage_update(self, context, vol_id, rd_req, rd_bytes, wr_req, - wr_bytes, instance, last_refreshed, update_totals): - vol_usage = self.db.vol_usage_update(context, vol_id, - rd_req, rd_bytes, - wr_req, wr_bytes, - instance['uuid'], - instance['project_id'], - instance['user_id'], - instance['availability_zone'], - update_totals) - - # We have just updated the database, so send the notification now - self.notifier.info(context, 'volume.usage', - compute_utils.usage_volume_info(vol_usage)) - - @messaging.expected_exceptions(exception.ComputeHostNotFound, - exception.HostBinaryNotFound) - def service_get_all_by(self, context, topic, host, binary): - if not any((topic, host, binary)): - result = self.db.service_get_all(context) - elif all((topic, host)): - if topic == 'compute': - result = self.db.service_get_by_compute_host(context, host) - # FIXME(comstud) Potentially remove this on bump to v3.0 - result = [result] - else: - result = self.db.service_get_by_host_and_topic(context, - host, topic) - elif all((host, binary)): - result = self.db.service_get_by_args(context, host, binary) - elif topic: - result = self.db.service_get_all_by_topic(context, topic) - elif host: - result = self.db.service_get_all_by_host(context, host) - - return jsonutils.to_primitive(result) - - @messaging.expected_exceptions(exception.InstanceActionNotFound) - def action_event_start(self, context, values): - evt = self.db.action_event_start(context, values) - return jsonutils.to_primitive(evt) - - @messaging.expected_exceptions(exception.InstanceActionNotFound, - exception.InstanceActionEventNotFound) - def action_event_finish(self, context, values): - evt = self.db.action_event_finish(context, values) - return jsonutils.to_primitive(evt) - - def service_create(self, context, values): - svc = self.db.service_create(context, values) - return jsonutils.to_primitive(svc) - - @messaging.expected_exceptions(exception.ServiceNotFound) - def service_destroy(self, context, service_id): - self.db.service_destroy(context, service_id) - - def compute_node_create(self, context, values): - result = self.db.compute_node_create(context, values) - return jsonutils.to_primitive(result) - - def compute_node_update(self, context, node, values): - result = self.db.compute_node_update(context, node['id'], values) - return jsonutils.to_primitive(result) - - def compute_node_delete(self, context, node): - result = self.db.compute_node_delete(context, node['id']) - return jsonutils.to_primitive(result) - - @messaging.expected_exceptions(exception.ServiceNotFound) - def service_update(self, context, service, values): - svc = self.db.service_update(context, service['id'], values) - return jsonutils.to_primitive(svc) - - def task_log_get(self, context, task_name, begin, end, host, state): - result = self.db.task_log_get(context, task_name, begin, end, host, - state) - return jsonutils.to_primitive(result) - - def task_log_begin_task(self, context, task_name, begin, end, host, - task_items, message): - result = self.db.task_log_begin_task(context.elevated(), task_name, - begin, end, host, task_items, - message) - return jsonutils.to_primitive(result) - - def task_log_end_task(self, context, task_name, begin, end, host, - errors, message): - result = self.db.task_log_end_task(context.elevated(), task_name, - begin, end, host, errors, message) - return jsonutils.to_primitive(result) - - def notify_usage_exists(self, context, instance, current_period, - ignore_missing_network_data, - system_metadata, extra_usage_info): - compute_utils.notify_usage_exists(self.notifier, context, instance, - current_period, - ignore_missing_network_data, - system_metadata, extra_usage_info) - - def security_groups_trigger_handler(self, context, event, args): - self.security_group_api.trigger_handler(event, context, *args) - - def security_groups_trigger_members_refresh(self, context, group_ids): - self.security_group_api.trigger_members_refresh(context, group_ids) - - def network_migrate_instance_start(self, context, instance, migration): - self.network_api.migrate_instance_start(context, instance, migration) - - def network_migrate_instance_finish(self, context, instance, migration): - self.network_api.migrate_instance_finish(context, instance, migration) - - def quota_commit(self, context, reservations, project_id=None, - user_id=None): - quota.QUOTAS.commit(context, reservations, project_id=project_id, - user_id=user_id) - - def quota_rollback(self, context, reservations, project_id=None, - user_id=None): - quota.QUOTAS.rollback(context, reservations, project_id=project_id, - user_id=user_id) - - def get_ec2_ids(self, context, instance): - ec2_ids = {} - - ec2_ids['instance-id'] = ec2utils.id_to_ec2_inst_id(instance['uuid']) - ec2_ids['ami-id'] = ec2utils.glance_id_to_ec2_id(context, - instance['image_ref']) - for image_type in ['kernel', 'ramdisk']: - image_id = instance.get('%s_id' % image_type) - if image_id is not None: - ec2_image_type = ec2utils.image_type(image_type) - ec2_id = ec2utils.glance_id_to_ec2_id(context, image_id, - ec2_image_type) - ec2_ids['%s-id' % image_type] = ec2_id - - return ec2_ids - - def compute_unrescue(self, context, instance): - self.compute_api.unrescue(context, instance) - - def _object_dispatch(self, target, method, context, args, kwargs): - """Dispatch a call to an object method. - - This ensures that object methods get called and any exception - that is raised gets wrapped in an ExpectedException for forwarding - back to the caller (without spamming the conductor logs). - """ - try: - # NOTE(danms): Keep the getattr inside the try block since - # a missing method is really a client problem - return getattr(target, method)(context, *args, **kwargs) - except Exception: - raise messaging.ExpectedException() - - def object_class_action(self, context, objname, objmethod, - objver, args, kwargs): - """Perform a classmethod action on an object.""" - objclass = nova_object.NovaObject.obj_class_from_name(objname, - objver) - result = self._object_dispatch(objclass, objmethod, context, - args, kwargs) - # NOTE(danms): The RPC layer will convert to primitives for us, - # but in this case, we need to honor the version the client is - # asking for, so we do it before returning here. - return (result.obj_to_primitive(target_version=objver) - if isinstance(result, nova_object.NovaObject) else result) - - def object_action(self, context, objinst, objmethod, args, kwargs): - """Perform an action on an object.""" - oldobj = objinst.obj_clone() - result = self._object_dispatch(objinst, objmethod, context, - args, kwargs) - updates = dict() - # NOTE(danms): Diff the object with the one passed to us and - # generate a list of changes to forward back - for name, field in objinst.fields.items(): - if not objinst.obj_attr_is_set(name): - # Avoid demand-loading anything - continue - if (not oldobj.obj_attr_is_set(name) or - oldobj[name] != objinst[name]): - updates[name] = field.to_primitive(objinst, name, - objinst[name]) - # This is safe since a field named this would conflict with the - # method anyway - updates['obj_what_changed'] = objinst.obj_what_changed() - return updates, result - - def object_backport(self, context, objinst, target_version): - return objinst.obj_to_primitive(target_version=target_version) - - -class ComputeTaskManager(base.Base): - """Namespace for compute methods. - - This class presents an rpc API for nova-conductor under the 'compute_task' - namespace. The methods here are compute operations that are invoked - by the API service. These methods see the operation to completion, which - may involve coordinating activities on multiple compute nodes. - """ - - target = messaging.Target(namespace='compute_task', version='1.9') - - def __init__(self): - super(ComputeTaskManager, self).__init__() - self.compute_rpcapi = compute_rpcapi.ComputeAPI() - self.image_api = image.API() - self.scheduler_client = scheduler_client.SchedulerClient() - - @messaging.expected_exceptions(exception.NoValidHost, - exception.ComputeServiceUnavailable, - exception.InvalidHypervisorType, - exception.InvalidCPUInfo, - exception.UnableToMigrateToSelf, - exception.DestinationHypervisorTooOld, - exception.InvalidLocalStorage, - exception.InvalidSharedStorage, - exception.HypervisorUnavailable, - exception.InstanceNotRunning, - exception.MigrationPreCheckError) - def migrate_server(self, context, instance, scheduler_hint, live, rebuild, - flavor, block_migration, disk_over_commit, reservations=None): - if instance and not isinstance(instance, nova_object.NovaObject): - # NOTE(danms): Until v2 of the RPC API, we need to tolerate - # old-world instance objects here - attrs = ['metadata', 'system_metadata', 'info_cache', - 'security_groups'] - instance = objects.Instance._from_db_object( - context, objects.Instance(), instance, - expected_attrs=attrs) - if live and not rebuild and not flavor: - self._live_migrate(context, instance, scheduler_hint, - block_migration, disk_over_commit) - elif not live and not rebuild and flavor: - instance_uuid = instance['uuid'] - with compute_utils.EventReporter(context, 'cold_migrate', - instance_uuid): - self._cold_migrate(context, instance, flavor, - scheduler_hint['filter_properties'], - reservations) - else: - raise NotImplementedError() - - def _cold_migrate(self, context, instance, flavor, filter_properties, - reservations): - image_ref = instance.image_ref - image = compute_utils.get_image_metadata( - context, self.image_api, image_ref, instance) - - request_spec = scheduler_utils.build_request_spec( - context, image, [instance], instance_type=flavor) - - quotas = objects.Quotas.from_reservations(context, - reservations, - instance=instance) - try: - scheduler_utils.populate_retry(filter_properties, instance['uuid']) - hosts = self.scheduler_client.select_destinations( - context, request_spec, filter_properties) - host_state = hosts[0] - except exception.NoValidHost as ex: - vm_state = instance['vm_state'] - if not vm_state: - vm_state = vm_states.ACTIVE - updates = {'vm_state': vm_state, 'task_state': None} - self._set_vm_state_and_notify(context, 'migrate_server', - updates, ex, request_spec) - quotas.rollback() - - # if the flavor IDs match, it's migrate; otherwise resize - if flavor['id'] == instance['instance_type_id']: - msg = _("No valid host found for cold migrate") - else: - msg = _("No valid host found for resize") - raise exception.NoValidHost(reason=msg) - - try: - scheduler_utils.populate_filter_properties(filter_properties, - host_state) - # context is not serializable - filter_properties.pop('context', None) - - # TODO(timello): originally, instance_type in request_spec - # on compute.api.resize does not have 'extra_specs', so we - # remove it for now to keep tests backward compatibility. - request_spec['instance_type'].pop('extra_specs') - - (host, node) = (host_state['host'], host_state['nodename']) - self.compute_rpcapi.prep_resize( - context, image, instance, - flavor, host, - reservations, request_spec=request_spec, - filter_properties=filter_properties, node=node) - except Exception as ex: - with excutils.save_and_reraise_exception(): - updates = {'vm_state': instance['vm_state'], - 'task_state': None} - self._set_vm_state_and_notify(context, 'migrate_server', - updates, ex, request_spec) - quotas.rollback() - - def _set_vm_state_and_notify(self, context, method, updates, ex, - request_spec): - scheduler_utils.set_vm_state_and_notify( - context, 'compute_task', method, updates, - ex, request_spec, self.db) - - def _live_migrate(self, context, instance, scheduler_hint, - block_migration, disk_over_commit): - destination = scheduler_hint.get("host") - try: - live_migrate.execute(context, instance, destination, - block_migration, disk_over_commit) - except (exception.NoValidHost, - exception.ComputeServiceUnavailable, - exception.InvalidHypervisorType, - exception.InvalidCPUInfo, - exception.UnableToMigrateToSelf, - exception.DestinationHypervisorTooOld, - exception.InvalidLocalStorage, - exception.InvalidSharedStorage, - exception.HypervisorUnavailable, - exception.InstanceNotRunning, - exception.MigrationPreCheckError) as ex: - with excutils.save_and_reraise_exception(): - # TODO(johngarbutt) - eventually need instance actions here - request_spec = {'instance_properties': { - 'uuid': instance['uuid'], }, - } - scheduler_utils.set_vm_state_and_notify(context, - 'compute_task', 'migrate_server', - dict(vm_state=instance['vm_state'], - task_state=None, - expected_task_state=task_states.MIGRATING,), - ex, request_spec, self.db) - except Exception as ex: - LOG.error(_('Migration of instance %(instance_id)s to host' - ' %(dest)s unexpectedly failed.'), - {'instance_id': instance['uuid'], 'dest': destination}, - exc_info=True) - raise exception.MigrationError(reason=ex) - - def build_instances(self, context, instances, image, filter_properties, - admin_password, injected_files, requested_networks, - security_groups, block_device_mapping=None, legacy_bdm=True): - # TODO(ndipanov): Remove block_device_mapping and legacy_bdm in version - # 2.0 of the RPC API. - request_spec = scheduler_utils.build_request_spec(context, image, - instances) - # TODO(danms): Remove this in version 2.0 of the RPC API - if (requested_networks and - not isinstance(requested_networks, - objects.NetworkRequestList)): - requested_networks = objects.NetworkRequestList( - objects=[objects.NetworkRequest.from_tuple(t) - for t in requested_networks]) - - try: - # check retry policy. Rather ugly use of instances[0]... - # but if we've exceeded max retries... then we really only - # have a single instance. - scheduler_utils.populate_retry(filter_properties, - instances[0].uuid) - hosts = self.scheduler_client.select_destinations(context, - request_spec, filter_properties) - except Exception as exc: - for instance in instances: - scheduler_driver.handle_schedule_error(context, exc, - instance.uuid, request_spec) - return - - for (instance, host) in itertools.izip(instances, hosts): - try: - instance.refresh() - except (exception.InstanceNotFound, - exception.InstanceInfoCacheNotFound): - LOG.debug('Instance deleted during build', instance=instance) - continue - local_filter_props = copy.deepcopy(filter_properties) - scheduler_utils.populate_filter_properties(local_filter_props, - host) - # The block_device_mapping passed from the api doesn't contain - # instance specific information - bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( - context, instance.uuid) - - self.compute_rpcapi.build_and_run_instance(context, - instance=instance, host=host['host'], image=image, - request_spec=request_spec, - filter_properties=local_filter_props, - admin_password=admin_password, - injected_files=injected_files, - requested_networks=requested_networks, - security_groups=security_groups, - block_device_mapping=bdms, node=host['nodename'], - limits=host['limits']) - - def _delete_image(self, context, image_id): - return self.image_api.delete(context, image_id) - - def _schedule_instances(self, context, image, filter_properties, - *instances): - request_spec = scheduler_utils.build_request_spec(context, image, - instances) - hosts = self.scheduler_client.select_destinations(context, - request_spec, filter_properties) - return hosts - - def unshelve_instance(self, context, instance): - sys_meta = instance.system_metadata - - def safe_image_show(ctx, image_id): - if image_id: - return self.image_api.get(ctx, image_id) - - if instance.vm_state == vm_states.SHELVED: - instance.task_state = task_states.POWERING_ON - instance.save(expected_task_state=task_states.UNSHELVING) - self.compute_rpcapi.start_instance(context, instance) - snapshot_id = sys_meta.get('shelved_image_id') - if snapshot_id: - self._delete_image(context, snapshot_id) - elif instance.vm_state == vm_states.SHELVED_OFFLOADED: - image_id = sys_meta.get('shelved_image_id') - with compute_utils.EventReporter( - context, 'get_image_info', instance.uuid): - try: - image = safe_image_show(context, image_id) - except exception.ImageNotFound: - instance.vm_state = vm_states.ERROR - instance.save() - reason = _('Unshelve attempted but the image %s ' - 'cannot be found.') % image_id - LOG.error(reason, instance=instance) - raise exception.UnshelveException( - instance_id=instance.uuid, reason=reason) - - try: - with compute_utils.EventReporter(context, 'schedule_instances', - instance.uuid): - filter_properties = {} - hosts = self._schedule_instances(context, image, - filter_properties, - instance) - host_state = hosts[0] - scheduler_utils.populate_filter_properties( - filter_properties, host_state) - (host, node) = (host_state['host'], host_state['nodename']) - self.compute_rpcapi.unshelve_instance( - context, instance, host, image=image, - filter_properties=filter_properties, node=node) - except exception.NoValidHost: - instance.task_state = None - instance.save() - LOG.warning(_("No valid host found for unshelve instance"), - instance=instance) - return - else: - LOG.error(_('Unshelve attempted but vm_state not SHELVED or ' - 'SHELVED_OFFLOADED'), instance=instance) - instance.vm_state = vm_states.ERROR - instance.save() - return - - for key in ['shelved_at', 'shelved_image_id', 'shelved_host']: - if key in sys_meta: - del(sys_meta[key]) - instance.system_metadata = sys_meta - instance.save() - - def rebuild_instance(self, context, instance, orig_image_ref, image_ref, - injected_files, new_pass, orig_sys_metadata, - bdms, recreate, on_shared_storage, - preserve_ephemeral=False, host=None): - - with compute_utils.EventReporter(context, 'rebuild_server', - instance.uuid): - if not host: - # NOTE(lcostantino): Retrieve scheduler filters for the - # instance when the feature is available - filter_properties = {'ignore_hosts': [instance.host]} - request_spec = scheduler_utils.build_request_spec(context, - image_ref, - [instance]) - try: - hosts = self.scheduler_client.select_destinations(context, - request_spec, - filter_properties) - host = hosts.pop(0)['host'] - except exception.NoValidHost as ex: - with excutils.save_and_reraise_exception(): - self._set_vm_state_and_notify(context, - 'rebuild_server', - {'vm_state': instance.vm_state, - 'task_state': None}, ex, request_spec) - LOG.warning(_("No valid host found for rebuild"), - instance=instance) - - self.compute_rpcapi.rebuild_instance(context, - instance=instance, - new_pass=new_pass, - injected_files=injected_files, - image_ref=image_ref, - orig_image_ref=orig_image_ref, - orig_sys_metadata=orig_sys_metadata, - bdms=bdms, - recreate=recreate, - on_shared_storage=on_shared_storage, - preserve_ephemeral=preserve_ephemeral, - host=host) diff --git a/juno-patches/nova/instance_mapping_uuid_patch/nova/db/sqlalchemy/migrate_repo/versions/255_add_mapping_uuid_column_to_instance.py b/juno-patches/nova/instance_mapping_uuid_patch/nova/db/sqlalchemy/migrate_repo/versions/255_add_mapping_uuid_column_to_instance.py deleted file mode 100644 index 5c0db16..0000000 --- a/juno-patches/nova/instance_mapping_uuid_patch/nova/db/sqlalchemy/migrate_repo/versions/255_add_mapping_uuid_column_to_instance.py +++ /dev/null @@ -1,20 +0,0 @@ -from sqlalchemy import Column, String, MetaData, Table - - -def upgrade(migrate_engine): - meta = MetaData() - meta.bind = migrate_engine - - instances = Table('instances', meta, autoload=True) - mapping_uuid = Column('mapping_uuid', - String(length=36)) - instances.create_column(mapping_uuid) - - -def downgrade(migrate_engine): - meta = MetaData() - meta.bind = migrate_engine - - instances = Table('instances', meta, autoload=True) - mapping_uuid = instances.columns.mapping_uuid - mapping_uuid.drop() diff --git a/juno-patches/nova/instance_mapping_uuid_patch/nova/db/sqlalchemy/models.py b/juno-patches/nova/instance_mapping_uuid_patch/nova/db/sqlalchemy/models.py deleted file mode 100644 index 786582f..0000000 --- a/juno-patches/nova/instance_mapping_uuid_patch/nova/db/sqlalchemy/models.py +++ /dev/null @@ -1,1417 +0,0 @@ -# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# Copyright 2011 Piston Cloud Computing, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -SQLAlchemy models for nova data. -""" - -from oslo.config import cfg -from oslo.db.sqlalchemy import models -from sqlalchemy import Column, Index, Integer, BigInteger, Enum, String, schema -from sqlalchemy.dialects.mysql import MEDIUMTEXT -from sqlalchemy.ext.declarative import declarative_base -from sqlalchemy import orm -from sqlalchemy import ForeignKey, DateTime, Boolean, Text, Float - -from nova.db.sqlalchemy import types -from nova.openstack.common import timeutils - -CONF = cfg.CONF -BASE = declarative_base() - - -def MediumText(): - return Text().with_variant(MEDIUMTEXT(), 'mysql') - - -class NovaBase(models.SoftDeleteMixin, - models.TimestampMixin, - models.ModelBase): - metadata = None - - # TODO(ekudryashova): remove this after both nova and oslo.db - # will use oslo.utils library - # NOTE: Both projects(nova and oslo.db) use `timeutils.utcnow`, which - # returns specified time(if override_time is set). Time overriding is - # only used by unit tests, but in a lot of places, temporarily overriding - # this columns helps to avoid lots of calls of timeutils.set_override - # from different places in unit tests. - created_at = Column(DateTime, default=lambda: timeutils.utcnow()) - updated_at = Column(DateTime, onupdate=lambda: timeutils.utcnow()) - - def save(self, session=None): - from nova.db.sqlalchemy import api - - if session is None: - session = api.get_session() - - super(NovaBase, self).save(session=session) - - -class Service(BASE, NovaBase): - """Represents a running service on a host.""" - - __tablename__ = 'services' - __table_args__ = ( - schema.UniqueConstraint("host", "topic", "deleted", - name="uniq_services0host0topic0deleted"), - schema.UniqueConstraint("host", "binary", "deleted", - name="uniq_services0host0binary0deleted") - ) - - id = Column(Integer, primary_key=True) - host = Column(String(255)) # , ForeignKey('hosts.id')) - binary = Column(String(255)) - topic = Column(String(255)) - report_count = Column(Integer, nullable=False, default=0) - disabled = Column(Boolean, default=False) - disabled_reason = Column(String(255)) - - -class ComputeNode(BASE, NovaBase): - """Represents a running compute service on a host.""" - - __tablename__ = 'compute_nodes' - __table_args__ = () - id = Column(Integer, primary_key=True) - service_id = Column(Integer, ForeignKey('services.id'), nullable=False) - service = orm.relationship(Service, - backref=orm.backref('compute_node'), - foreign_keys=service_id, - primaryjoin='and_(' - 'ComputeNode.service_id == Service.id,' - 'ComputeNode.deleted == 0)') - - vcpus = Column(Integer, nullable=False) - memory_mb = Column(Integer, nullable=False) - local_gb = Column(Integer, nullable=False) - vcpus_used = Column(Integer, nullable=False) - memory_mb_used = Column(Integer, nullable=False) - local_gb_used = Column(Integer, nullable=False) - hypervisor_type = Column(MediumText(), nullable=False) - hypervisor_version = Column(Integer, nullable=False) - hypervisor_hostname = Column(String(255)) - - # Free Ram, amount of activity (resize, migration, boot, etc) and - # the number of running VM's are a good starting point for what's - # important when making scheduling decisions. - free_ram_mb = Column(Integer) - free_disk_gb = Column(Integer) - current_workload = Column(Integer) - running_vms = Column(Integer) - - # Note(masumotok): Expected Strings example: - # - # '{"arch":"x86_64", - # "model":"Nehalem", - # "topology":{"sockets":1, "threads":2, "cores":3}, - # "features":["tdtscp", "xtpr"]}' - # - # Points are "json translatable" and it must have all dictionary keys - # above, since it is copied from tag of getCapabilities() - # (See libvirt.virtConnection). - cpu_info = Column(MediumText(), nullable=False) - disk_available_least = Column(Integer) - host_ip = Column(types.IPAddress()) - supported_instances = Column(Text) - metrics = Column(Text) - - # Note(yongli): json string PCI Stats - # '{"vendor_id":"8086", "product_id":"1234", "count":3 }' - pci_stats = Column(Text) - - # extra_resources is a json string containing arbitrary - # data about additional resources. - extra_resources = Column(Text) - - # json-encode string containing compute node statistics - stats = Column(Text, default='{}') - - # json-encoded dict that contains NUMA topology as generated by - # nova.virt.hardware.VirtNUMAHostTopology.to_json() - numa_topology = Column(Text) - - -class Certificate(BASE, NovaBase): - """Represents a x509 certificate.""" - __tablename__ = 'certificates' - __table_args__ = ( - Index('certificates_project_id_deleted_idx', 'project_id', 'deleted'), - Index('certificates_user_id_deleted_idx', 'user_id', 'deleted') - ) - id = Column(Integer, primary_key=True) - - user_id = Column(String(255)) - project_id = Column(String(255)) - file_name = Column(String(255)) - - -class Instance(BASE, NovaBase): - """Represents a guest VM.""" - __tablename__ = 'instances' - __table_args__ = ( - Index('uuid', 'uuid', unique=True), - Index('project_id', 'project_id'), - Index('instances_host_deleted_idx', - 'host', 'deleted'), - Index('instances_reservation_id_idx', - 'reservation_id'), - Index('instances_terminated_at_launched_at_idx', - 'terminated_at', 'launched_at'), - Index('instances_uuid_deleted_idx', - 'uuid', 'deleted'), - Index('instances_task_state_updated_at_idx', - 'task_state', 'updated_at'), - Index('instances_host_node_deleted_idx', - 'host', 'node', 'deleted'), - Index('instances_host_deleted_cleaned_idx', - 'host', 'deleted', 'cleaned'), - ) - injected_files = [] - - id = Column(Integer, primary_key=True, autoincrement=True) - - @property - def name(self): - try: - base_name = CONF.instance_name_template % self.id - except TypeError: - # Support templates like "uuid-%(uuid)s", etc. - info = {} - # NOTE(russellb): Don't use self.iteritems() here, as it will - # result in infinite recursion on the name property. - for column in iter(orm.object_mapper(self).columns): - key = column.name - # prevent recursion if someone specifies %(name)s - # %(name)s will not be valid. - if key == 'name': - continue - info[key] = self[key] - try: - base_name = CONF.instance_name_template % info - except KeyError: - base_name = self.uuid - return base_name - - @property - def _extra_keys(self): - return ['name'] - - user_id = Column(String(255)) - project_id = Column(String(255)) - - image_ref = Column(String(255)) - kernel_id = Column(String(255)) - ramdisk_id = Column(String(255)) - hostname = Column(String(255)) - - launch_index = Column(Integer) - key_name = Column(String(255)) - key_data = Column(MediumText()) - - power_state = Column(Integer) - vm_state = Column(String(255)) - task_state = Column(String(255)) - - memory_mb = Column(Integer) - vcpus = Column(Integer) - root_gb = Column(Integer) - ephemeral_gb = Column(Integer) - ephemeral_key_uuid = Column(String(36)) - - # This is not related to hostname, above. It refers - # to the nova node. - host = Column(String(255)) # , ForeignKey('hosts.id')) - # To identify the "ComputeNode" which the instance resides in. - # This equals to ComputeNode.hypervisor_hostname. - node = Column(String(255)) - - # *not* flavorid, this is the internal primary_key - instance_type_id = Column(Integer) - - user_data = Column(MediumText()) - - reservation_id = Column(String(255)) - - scheduled_at = Column(DateTime) - launched_at = Column(DateTime) - terminated_at = Column(DateTime) - - availability_zone = Column(String(255)) - - # User editable field for display in user-facing UIs - display_name = Column(String(255)) - display_description = Column(String(255)) - - # To remember on which host an instance booted. - # An instance may have moved to another host by live migration. - launched_on = Column(MediumText()) - - # NOTE(jdillaman): locked deprecated in favor of locked_by, - # to be removed in Icehouse - locked = Column(Boolean) - locked_by = Column(Enum('owner', 'admin')) - - os_type = Column(String(255)) - architecture = Column(String(255)) - vm_mode = Column(String(255)) - uuid = Column(String(36)) - mapping_uuid = Column(String(36)) - - root_device_name = Column(String(255)) - default_ephemeral_device = Column(String(255)) - default_swap_device = Column(String(255)) - config_drive = Column(String(255)) - - # User editable field meant to represent what ip should be used - # to connect to the instance - access_ip_v4 = Column(types.IPAddress()) - access_ip_v6 = Column(types.IPAddress()) - - auto_disk_config = Column(Boolean()) - progress = Column(Integer) - - # EC2 instance_initiated_shutdown_terminate - # True: -> 'terminate' - # False: -> 'stop' - # Note(maoy): currently Nova will always stop instead of terminate - # no matter what the flag says. So we set the default to False. - shutdown_terminate = Column(Boolean(), default=False) - - # EC2 disable_api_termination - disable_terminate = Column(Boolean(), default=False) - - # OpenStack compute cell name. This will only be set at the top of - # the cells tree and it'll be a full cell name such as 'api!hop1!hop2' - cell_name = Column(String(255)) - internal_id = Column(Integer) - - # Records whether an instance has been deleted from disk - cleaned = Column(Integer, default=0) - - -class InstanceInfoCache(BASE, NovaBase): - """Represents a cache of information about an instance - """ - __tablename__ = 'instance_info_caches' - __table_args__ = ( - schema.UniqueConstraint( - "instance_uuid", - name="uniq_instance_info_caches0instance_uuid"),) - id = Column(Integer, primary_key=True, autoincrement=True) - - # text column used for storing a json object of network data for api - network_info = Column(MediumText()) - - instance_uuid = Column(String(36), ForeignKey('instances.uuid'), - nullable=False) - instance = orm.relationship(Instance, - backref=orm.backref('info_cache', uselist=False), - foreign_keys=instance_uuid, - primaryjoin=instance_uuid == Instance.uuid) - - -class InstanceExtra(BASE, NovaBase): - __tablename__ = 'instance_extra' - __table_args__ = ( - Index('instance_extra_idx', 'instance_uuid'),) - id = Column(Integer, primary_key=True, autoincrement=True) - instance_uuid = Column(String(36), ForeignKey('instances.uuid'), - nullable=False) - numa_topology = Column(Text) - pci_requests = Column(Text) - instance = orm.relationship(Instance, - backref=orm.backref('numa_topology', - uselist=False), - foreign_keys=instance_uuid, - primaryjoin=instance_uuid == Instance.uuid) - - -class InstanceTypes(BASE, NovaBase): - """Represents possible flavors for instances. - - Note: instance_type and flavor are synonyms and the term instance_type is - deprecated and in the process of being removed. - """ - __tablename__ = "instance_types" - - __table_args__ = ( - schema.UniqueConstraint("flavorid", "deleted", - name="uniq_instance_types0flavorid0deleted"), - schema.UniqueConstraint("name", "deleted", - name="uniq_instance_types0name0deleted") - ) - - # Internal only primary key/id - id = Column(Integer, primary_key=True) - name = Column(String(255)) - memory_mb = Column(Integer, nullable=False) - vcpus = Column(Integer, nullable=False) - root_gb = Column(Integer) - ephemeral_gb = Column(Integer) - # Public facing id will be renamed public_id - flavorid = Column(String(255)) - swap = Column(Integer, nullable=False, default=0) - rxtx_factor = Column(Float, default=1) - vcpu_weight = Column(Integer) - disabled = Column(Boolean, default=False) - is_public = Column(Boolean, default=True) - - -class Volume(BASE, NovaBase): - """Represents a block storage device that can be attached to a VM.""" - __tablename__ = 'volumes' - __table_args__ = ( - Index('volumes_instance_uuid_idx', 'instance_uuid'), - ) - id = Column(String(36), primary_key=True, nullable=False) - deleted = Column(String(36), default="") - - @property - def name(self): - return CONF.volume_name_template % self.id - - ec2_id = Column(String(255)) - user_id = Column(String(255)) - project_id = Column(String(255)) - - snapshot_id = Column(String(36)) - - host = Column(String(255)) - size = Column(Integer) - availability_zone = Column(String(255)) - instance_uuid = Column(String(36)) - mountpoint = Column(String(255)) - attach_time = Column(DateTime) - status = Column(String(255)) # TODO(vish): enum? - attach_status = Column(String(255)) # TODO(vish): enum - - scheduled_at = Column(DateTime) - launched_at = Column(DateTime) - terminated_at = Column(DateTime) - - display_name = Column(String(255)) - display_description = Column(String(255)) - - provider_location = Column(String(256)) - provider_auth = Column(String(256)) - - volume_type_id = Column(Integer) - - -class Quota(BASE, NovaBase): - """Represents a single quota override for a project. - - If there is no row for a given project id and resource, then the - default for the quota class is used. If there is no row for a - given quota class and resource, then the default for the - deployment is used. If the row is present but the hard limit is - Null, then the resource is unlimited. - """ - - __tablename__ = 'quotas' - __table_args__ = ( - schema.UniqueConstraint("project_id", "resource", "deleted", - name="uniq_quotas0project_id0resource0deleted" - ), - ) - id = Column(Integer, primary_key=True) - - project_id = Column(String(255)) - - resource = Column(String(255), nullable=False) - hard_limit = Column(Integer) - - -class ProjectUserQuota(BASE, NovaBase): - """Represents a single quota override for a user with in a project.""" - - __tablename__ = 'project_user_quotas' - uniq_name = "uniq_project_user_quotas0user_id0project_id0resource0deleted" - __table_args__ = ( - schema.UniqueConstraint("user_id", "project_id", "resource", "deleted", - name=uniq_name), - Index('project_user_quotas_project_id_deleted_idx', - 'project_id', 'deleted'), - Index('project_user_quotas_user_id_deleted_idx', - 'user_id', 'deleted') - ) - id = Column(Integer, primary_key=True, nullable=False) - - project_id = Column(String(255), nullable=False) - user_id = Column(String(255), nullable=False) - - resource = Column(String(255), nullable=False) - hard_limit = Column(Integer) - - -class QuotaClass(BASE, NovaBase): - """Represents a single quota override for a quota class. - - If there is no row for a given quota class and resource, then the - default for the deployment is used. If the row is present but the - hard limit is Null, then the resource is unlimited. - """ - - __tablename__ = 'quota_classes' - __table_args__ = ( - Index('ix_quota_classes_class_name', 'class_name'), - ) - id = Column(Integer, primary_key=True) - - class_name = Column(String(255)) - - resource = Column(String(255)) - hard_limit = Column(Integer) - - -class QuotaUsage(BASE, NovaBase): - """Represents the current usage for a given resource.""" - - __tablename__ = 'quota_usages' - __table_args__ = ( - Index('ix_quota_usages_project_id', 'project_id'), - ) - id = Column(Integer, primary_key=True) - - project_id = Column(String(255)) - user_id = Column(String(255)) - resource = Column(String(255), nullable=False) - - in_use = Column(Integer, nullable=False) - reserved = Column(Integer, nullable=False) - - @property - def total(self): - return self.in_use + self.reserved - - until_refresh = Column(Integer) - - -class Reservation(BASE, NovaBase): - """Represents a resource reservation for quotas.""" - - __tablename__ = 'reservations' - __table_args__ = ( - Index('ix_reservations_project_id', 'project_id'), - Index('reservations_uuid_idx', 'uuid'), - Index('reservations_deleted_expire_idx', 'deleted', 'expire'), - ) - id = Column(Integer, primary_key=True, nullable=False) - uuid = Column(String(36), nullable=False) - - usage_id = Column(Integer, ForeignKey('quota_usages.id'), nullable=False) - - project_id = Column(String(255)) - user_id = Column(String(255)) - resource = Column(String(255)) - - delta = Column(Integer, nullable=False) - expire = Column(DateTime) - - usage = orm.relationship( - "QuotaUsage", - foreign_keys=usage_id, - primaryjoin='and_(Reservation.usage_id == QuotaUsage.id,' - 'QuotaUsage.deleted == 0)') - - -class Snapshot(BASE, NovaBase): - """Represents a block storage device that can be attached to a VM.""" - __tablename__ = 'snapshots' - __table_args__ = () - id = Column(String(36), primary_key=True, nullable=False) - deleted = Column(String(36), default="") - - @property - def name(self): - return CONF.snapshot_name_template % self.id - - @property - def volume_name(self): - return CONF.volume_name_template % self.volume_id - - user_id = Column(String(255)) - project_id = Column(String(255)) - - volume_id = Column(String(36), nullable=False) - status = Column(String(255)) - progress = Column(String(255)) - volume_size = Column(Integer) - scheduled_at = Column(DateTime) - - display_name = Column(String(255)) - display_description = Column(String(255)) - - -class BlockDeviceMapping(BASE, NovaBase): - """Represents block device mapping that is defined by EC2.""" - __tablename__ = "block_device_mapping" - __table_args__ = ( - Index('snapshot_id', 'snapshot_id'), - Index('volume_id', 'volume_id'), - Index('block_device_mapping_instance_uuid_device_name_idx', - 'instance_uuid', 'device_name'), - Index('block_device_mapping_instance_uuid_volume_id_idx', - 'instance_uuid', 'volume_id'), - Index('block_device_mapping_instance_uuid_idx', 'instance_uuid'), - # TODO(sshturm) Should be dropped. `virtual_name` was dropped - # in 186 migration, - # Duplicates `block_device_mapping_instance_uuid_device_name_idx` - # index. - Index("block_device_mapping_instance_uuid_virtual_name" - "_device_name_idx", 'instance_uuid', 'device_name'), - ) - id = Column(Integer, primary_key=True, autoincrement=True) - - instance_uuid = Column(String(36), ForeignKey('instances.uuid')) - instance = orm.relationship(Instance, - backref=orm.backref('block_device_mapping'), - foreign_keys=instance_uuid, - primaryjoin='and_(BlockDeviceMapping.' - 'instance_uuid==' - 'Instance.uuid,' - 'BlockDeviceMapping.deleted==' - '0)') - - source_type = Column(String(255)) - destination_type = Column(String(255)) - guest_format = Column(String(255)) - device_type = Column(String(255)) - disk_bus = Column(String(255)) - - boot_index = Column(Integer) - - device_name = Column(String(255)) - - # default=False for compatibility of the existing code. - # With EC2 API, - # default True for ami specified device. - # default False for created with other timing. - # TODO(sshturm) add default in db - delete_on_termination = Column(Boolean, default=False) - - snapshot_id = Column(String(36)) - - volume_id = Column(String(36)) - volume_size = Column(Integer) - - image_id = Column(String(36)) - - # for no device to suppress devices. - no_device = Column(Boolean) - - connection_info = Column(MediumText()) - - -class IscsiTarget(BASE, NovaBase): - """Represents an iscsi target for a given host.""" - __tablename__ = 'iscsi_targets' - __table_args__ = ( - Index('iscsi_targets_volume_id_fkey', 'volume_id'), - Index('iscsi_targets_host_idx', 'host'), - Index('iscsi_targets_host_volume_id_deleted_idx', 'host', 'volume_id', - 'deleted') - ) - id = Column(Integer, primary_key=True, nullable=False) - target_num = Column(Integer) - host = Column(String(255)) - volume_id = Column(String(36), ForeignKey('volumes.id')) - volume = orm.relationship(Volume, - backref=orm.backref('iscsi_target', uselist=False), - foreign_keys=volume_id, - primaryjoin='and_(IscsiTarget.volume_id==Volume.id,' - 'IscsiTarget.deleted==0)') - - -class SecurityGroupInstanceAssociation(BASE, NovaBase): - __tablename__ = 'security_group_instance_association' - __table_args__ = ( - Index('security_group_instance_association_instance_uuid_idx', - 'instance_uuid'), - ) - id = Column(Integer, primary_key=True, nullable=False) - security_group_id = Column(Integer, ForeignKey('security_groups.id')) - instance_uuid = Column(String(36), ForeignKey('instances.uuid')) - - -class SecurityGroup(BASE, NovaBase): - """Represents a security group.""" - __tablename__ = 'security_groups' - __table_args__ = ( - Index('uniq_security_groups0project_id0name0deleted', 'project_id', - 'name', 'deleted'), - ) - id = Column(Integer, primary_key=True) - - name = Column(String(255)) - description = Column(String(255)) - user_id = Column(String(255)) - project_id = Column(String(255)) - - instances = orm.relationship(Instance, - secondary="security_group_instance_association", - primaryjoin='and_(' - 'SecurityGroup.id == ' - 'SecurityGroupInstanceAssociation.security_group_id,' - 'SecurityGroupInstanceAssociation.deleted == 0,' - 'SecurityGroup.deleted == 0)', - secondaryjoin='and_(' - 'SecurityGroupInstanceAssociation.instance_uuid == Instance.uuid,' - # (anthony) the condition below shouldn't be necessary now that the - # association is being marked as deleted. However, removing this - # may cause existing deployments to choke, so I'm leaving it - 'Instance.deleted == 0)', - backref='security_groups') - - -class SecurityGroupIngressRule(BASE, NovaBase): - """Represents a rule in a security group.""" - __tablename__ = 'security_group_rules' - __table_args__ = () - id = Column(Integer, primary_key=True) - - parent_group_id = Column(Integer, ForeignKey('security_groups.id')) - parent_group = orm.relationship("SecurityGroup", backref="rules", - foreign_keys=parent_group_id, - primaryjoin='and_(' - 'SecurityGroupIngressRule.parent_group_id == SecurityGroup.id,' - 'SecurityGroupIngressRule.deleted == 0)') - - protocol = Column(String(255)) - from_port = Column(Integer) - to_port = Column(Integer) - cidr = Column(types.CIDR()) - - # Note: This is not the parent SecurityGroup. It's SecurityGroup we're - # granting access for. - group_id = Column(Integer, ForeignKey('security_groups.id')) - grantee_group = orm.relationship("SecurityGroup", - foreign_keys=group_id, - primaryjoin='and_(' - 'SecurityGroupIngressRule.group_id == SecurityGroup.id,' - 'SecurityGroupIngressRule.deleted == 0)') - - -class SecurityGroupIngressDefaultRule(BASE, NovaBase): - __tablename__ = 'security_group_default_rules' - __table_args__ = () - id = Column(Integer, primary_key=True, nullable=False) - protocol = Column(String(5)) # "tcp", "udp" or "icmp" - from_port = Column(Integer) - to_port = Column(Integer) - cidr = Column(types.CIDR()) - - -class ProviderFirewallRule(BASE, NovaBase): - """Represents a rule in a security group.""" - __tablename__ = 'provider_fw_rules' - __table_args__ = () - id = Column(Integer, primary_key=True, nullable=False) - - protocol = Column(String(5)) # "tcp", "udp", or "icmp" - from_port = Column(Integer) - to_port = Column(Integer) - cidr = Column(types.CIDR()) - - -class KeyPair(BASE, NovaBase): - """Represents a public key pair for ssh.""" - __tablename__ = 'key_pairs' - __table_args__ = ( - schema.UniqueConstraint("user_id", "name", "deleted", - name="uniq_key_pairs0user_id0name0deleted"), - ) - id = Column(Integer, primary_key=True, nullable=False) - - name = Column(String(255)) - - user_id = Column(String(255)) - - fingerprint = Column(String(255)) - public_key = Column(MediumText()) - - -class Migration(BASE, NovaBase): - """Represents a running host-to-host migration.""" - __tablename__ = 'migrations' - __table_args__ = ( - Index('migrations_instance_uuid_and_status_idx', 'instance_uuid', - 'status'), - Index('migrations_by_host_nodes_and_status_idx', 'deleted', - 'source_compute', 'dest_compute', 'source_node', 'dest_node', - 'status'), - ) - id = Column(Integer, primary_key=True, nullable=False) - # NOTE(tr3buchet): the ____compute variables are instance['host'] - source_compute = Column(String(255)) - dest_compute = Column(String(255)) - # nodes are equivalent to a compute node's 'hypervisor_hostname' - source_node = Column(String(255)) - dest_node = Column(String(255)) - # NOTE(tr3buchet): dest_host, btw, is an ip address - dest_host = Column(String(255)) - old_instance_type_id = Column(Integer()) - new_instance_type_id = Column(Integer()) - instance_uuid = Column(String(36), ForeignKey('instances.uuid')) - # TODO(_cerberus_): enum - status = Column(String(255)) - - instance = orm.relationship("Instance", foreign_keys=instance_uuid, - primaryjoin='and_(Migration.instance_uuid == ' - 'Instance.uuid, Instance.deleted == ' - '0)') - - -class Network(BASE, NovaBase): - """Represents a network.""" - __tablename__ = 'networks' - __table_args__ = ( - schema.UniqueConstraint("vlan", "deleted", - name="uniq_networks0vlan0deleted"), - Index('networks_bridge_deleted_idx', 'bridge', 'deleted'), - Index('networks_host_idx', 'host'), - Index('networks_project_id_deleted_idx', 'project_id', 'deleted'), - Index('networks_uuid_project_id_deleted_idx', 'uuid', - 'project_id', 'deleted'), - Index('networks_vlan_deleted_idx', 'vlan', 'deleted'), - Index('networks_cidr_v6_idx', 'cidr_v6') - ) - - id = Column(Integer, primary_key=True, nullable=False) - label = Column(String(255)) - - injected = Column(Boolean, default=False) - cidr = Column(types.CIDR()) - cidr_v6 = Column(types.CIDR()) - multi_host = Column(Boolean, default=False) - - gateway_v6 = Column(types.IPAddress()) - netmask_v6 = Column(types.IPAddress()) - netmask = Column(types.IPAddress()) - bridge = Column(String(255)) - bridge_interface = Column(String(255)) - gateway = Column(types.IPAddress()) - broadcast = Column(types.IPAddress()) - dns1 = Column(types.IPAddress()) - dns2 = Column(types.IPAddress()) - - vlan = Column(Integer) - vpn_public_address = Column(types.IPAddress()) - vpn_public_port = Column(Integer) - vpn_private_address = Column(types.IPAddress()) - dhcp_start = Column(types.IPAddress()) - - rxtx_base = Column(Integer) - - project_id = Column(String(255)) - priority = Column(Integer) - host = Column(String(255)) # , ForeignKey('hosts.id')) - uuid = Column(String(36)) - - mtu = Column(Integer) - dhcp_server = Column(types.IPAddress()) - enable_dhcp = Column(Boolean, default=True) - share_address = Column(Boolean, default=False) - - -class VirtualInterface(BASE, NovaBase): - """Represents a virtual interface on an instance.""" - __tablename__ = 'virtual_interfaces' - __table_args__ = ( - schema.UniqueConstraint("address", "deleted", - name="uniq_virtual_interfaces0address0deleted"), - Index('network_id', 'network_id'), - Index('virtual_interfaces_instance_uuid_fkey', 'instance_uuid'), - ) - id = Column(Integer, primary_key=True, nullable=False) - address = Column(String(255)) - network_id = Column(Integer) - instance_uuid = Column(String(36), ForeignKey('instances.uuid')) - uuid = Column(String(36)) - - -# TODO(vish): can these both come from the same baseclass? -class FixedIp(BASE, NovaBase): - """Represents a fixed ip for an instance.""" - __tablename__ = 'fixed_ips' - __table_args__ = ( - schema.UniqueConstraint( - "address", "deleted", name="uniq_fixed_ips0address0deleted"), - Index('fixed_ips_virtual_interface_id_fkey', 'virtual_interface_id'), - Index('network_id', 'network_id'), - Index('address', 'address'), - Index('fixed_ips_instance_uuid_fkey', 'instance_uuid'), - Index('fixed_ips_host_idx', 'host'), - Index('fixed_ips_network_id_host_deleted_idx', 'network_id', 'host', - 'deleted'), - Index('fixed_ips_address_reserved_network_id_deleted_idx', - 'address', 'reserved', 'network_id', 'deleted'), - Index('fixed_ips_deleted_allocated_idx', 'address', 'deleted', - 'allocated') - ) - id = Column(Integer, primary_key=True) - address = Column(types.IPAddress()) - network_id = Column(Integer) - virtual_interface_id = Column(Integer) - instance_uuid = Column(String(36), ForeignKey('instances.uuid')) - # associated means that a fixed_ip has its instance_id column set - # allocated means that a fixed_ip has its virtual_interface_id column set - # TODO(sshturm) add default in db - allocated = Column(Boolean, default=False) - # leased means dhcp bridge has leased the ip - # TODO(sshturm) add default in db - leased = Column(Boolean, default=False) - # TODO(sshturm) add default in db - reserved = Column(Boolean, default=False) - host = Column(String(255)) - network = orm.relationship(Network, - backref=orm.backref('fixed_ips'), - foreign_keys=network_id, - primaryjoin='and_(' - 'FixedIp.network_id == Network.id,' - 'FixedIp.deleted == 0,' - 'Network.deleted == 0)') - instance = orm.relationship(Instance, - foreign_keys=instance_uuid, - primaryjoin='and_(' - 'FixedIp.instance_uuid == Instance.uuid,' - 'FixedIp.deleted == 0,' - 'Instance.deleted == 0)') - - -class FloatingIp(BASE, NovaBase): - """Represents a floating ip that dynamically forwards to a fixed ip.""" - __tablename__ = 'floating_ips' - __table_args__ = ( - schema.UniqueConstraint("address", "deleted", - name="uniq_floating_ips0address0deleted"), - Index('fixed_ip_id', 'fixed_ip_id'), - Index('floating_ips_host_idx', 'host'), - Index('floating_ips_project_id_idx', 'project_id'), - Index('floating_ips_pool_deleted_fixed_ip_id_project_id_idx', - 'pool', 'deleted', 'fixed_ip_id', 'project_id') - ) - id = Column(Integer, primary_key=True) - address = Column(types.IPAddress()) - fixed_ip_id = Column(Integer) - project_id = Column(String(255)) - host = Column(String(255)) # , ForeignKey('hosts.id')) - auto_assigned = Column(Boolean, default=False) - # TODO(sshturm) add default in db - pool = Column(String(255)) - interface = Column(String(255)) - fixed_ip = orm.relationship(FixedIp, - backref=orm.backref('floating_ips'), - foreign_keys=fixed_ip_id, - primaryjoin='and_(' - 'FloatingIp.fixed_ip_id == FixedIp.id,' - 'FloatingIp.deleted == 0,' - 'FixedIp.deleted == 0)') - - -class DNSDomain(BASE, NovaBase): - """Represents a DNS domain with availability zone or project info.""" - __tablename__ = 'dns_domains' - __table_args__ = ( - Index('project_id', 'project_id'), - Index('dns_domains_domain_deleted_idx', 'domain', 'deleted'), - ) - deleted = Column(Boolean, default=False) - domain = Column(String(255), primary_key=True) - scope = Column(String(255)) - availability_zone = Column(String(255)) - project_id = Column(String(255)) - - -class ConsolePool(BASE, NovaBase): - """Represents pool of consoles on the same physical node.""" - __tablename__ = 'console_pools' - __table_args__ = ( - schema.UniqueConstraint( - "host", "console_type", "compute_host", "deleted", - name="uniq_console_pools0host0console_type0compute_host0deleted"), - ) - id = Column(Integer, primary_key=True) - address = Column(types.IPAddress()) - username = Column(String(255)) - password = Column(String(255)) - console_type = Column(String(255)) - public_hostname = Column(String(255)) - host = Column(String(255)) - compute_host = Column(String(255)) - - -class Console(BASE, NovaBase): - """Represents a console session for an instance.""" - __tablename__ = 'consoles' - __table_args__ = ( - Index('consoles_instance_uuid_idx', 'instance_uuid'), - ) - id = Column(Integer, primary_key=True) - instance_name = Column(String(255)) - instance_uuid = Column(String(36), ForeignKey('instances.uuid')) - password = Column(String(255)) - port = Column(Integer) - pool_id = Column(Integer, ForeignKey('console_pools.id')) - pool = orm.relationship(ConsolePool, backref=orm.backref('consoles')) - - -class InstanceMetadata(BASE, NovaBase): - """Represents a user-provided metadata key/value pair for an instance.""" - __tablename__ = 'instance_metadata' - __table_args__ = ( - Index('instance_metadata_instance_uuid_idx', 'instance_uuid'), - ) - id = Column(Integer, primary_key=True) - key = Column(String(255)) - value = Column(String(255)) - instance_uuid = Column(String(36), ForeignKey('instances.uuid')) - instance = orm.relationship(Instance, backref="metadata", - foreign_keys=instance_uuid, - primaryjoin='and_(' - 'InstanceMetadata.instance_uuid == ' - 'Instance.uuid,' - 'InstanceMetadata.deleted == 0)') - - -class InstanceSystemMetadata(BASE, NovaBase): - """Represents a system-owned metadata key/value pair for an instance.""" - __tablename__ = 'instance_system_metadata' - __table_args__ = () - id = Column(Integer, primary_key=True) - key = Column(String(255), nullable=False) - value = Column(String(255)) - instance_uuid = Column(String(36), - ForeignKey('instances.uuid'), - nullable=False) - - primary_join = ('and_(InstanceSystemMetadata.instance_uuid == ' - 'Instance.uuid, InstanceSystemMetadata.deleted == 0)') - instance = orm.relationship(Instance, backref="system_metadata", - foreign_keys=instance_uuid, - primaryjoin=primary_join) - - -class InstanceTypeProjects(BASE, NovaBase): - """Represent projects associated instance_types.""" - __tablename__ = "instance_type_projects" - __table_args__ = (schema.UniqueConstraint( - "instance_type_id", "project_id", "deleted", - name="uniq_instance_type_projects0instance_type_id0project_id0deleted" - ), - ) - id = Column(Integer, primary_key=True) - instance_type_id = Column(Integer, ForeignKey('instance_types.id'), - nullable=False) - project_id = Column(String(255)) - - instance_type = orm.relationship(InstanceTypes, backref="projects", - foreign_keys=instance_type_id, - primaryjoin='and_(' - 'InstanceTypeProjects.instance_type_id == InstanceTypes.id,' - 'InstanceTypeProjects.deleted == 0)') - - -class InstanceTypeExtraSpecs(BASE, NovaBase): - """Represents additional specs as key/value pairs for an instance_type.""" - __tablename__ = 'instance_type_extra_specs' - __table_args__ = ( - Index('instance_type_extra_specs_instance_type_id_key_idx', - 'instance_type_id', 'key'), - schema.UniqueConstraint( - "instance_type_id", "key", "deleted", - name=("uniq_instance_type_extra_specs0" - "instance_type_id0key0deleted") - ), - ) - id = Column(Integer, primary_key=True) - key = Column(String(255)) - value = Column(String(255)) - instance_type_id = Column(Integer, ForeignKey('instance_types.id'), - nullable=False) - instance_type = orm.relationship(InstanceTypes, backref="extra_specs", - foreign_keys=instance_type_id, - primaryjoin='and_(' - 'InstanceTypeExtraSpecs.instance_type_id == InstanceTypes.id,' - 'InstanceTypeExtraSpecs.deleted == 0)') - - -class Cell(BASE, NovaBase): - """Represents parent and child cells of this cell. Cells can - have multiple parents and children, so there could be any number - of entries with is_parent=True or False - """ - __tablename__ = 'cells' - __table_args__ = (schema.UniqueConstraint( - "name", "deleted", name="uniq_cells0name0deleted" - ), - ) - id = Column(Integer, primary_key=True) - # Name here is the 'short name' of a cell. For instance: 'child1' - name = Column(String(255)) - api_url = Column(String(255)) - - transport_url = Column(String(255), nullable=False) - - weight_offset = Column(Float(), default=0.0) - weight_scale = Column(Float(), default=1.0) - is_parent = Column(Boolean()) - - -class AggregateHost(BASE, NovaBase): - """Represents a host that is member of an aggregate.""" - __tablename__ = 'aggregate_hosts' - __table_args__ = (schema.UniqueConstraint( - "host", "aggregate_id", "deleted", - name="uniq_aggregate_hosts0host0aggregate_id0deleted" - ), - ) - id = Column(Integer, primary_key=True, autoincrement=True) - host = Column(String(255)) - aggregate_id = Column(Integer, ForeignKey('aggregates.id'), nullable=False) - - -class AggregateMetadata(BASE, NovaBase): - """Represents a metadata key/value pair for an aggregate.""" - __tablename__ = 'aggregate_metadata' - __table_args__ = ( - schema.UniqueConstraint("aggregate_id", "key", "deleted", - name="uniq_aggregate_metadata0aggregate_id0key0deleted" - ), - Index('aggregate_metadata_key_idx', 'key'), - ) - id = Column(Integer, primary_key=True) - key = Column(String(255), nullable=False) - value = Column(String(255), nullable=False) - aggregate_id = Column(Integer, ForeignKey('aggregates.id'), nullable=False) - - -class Aggregate(BASE, NovaBase): - """Represents a cluster of hosts that exists in this zone.""" - __tablename__ = 'aggregates' - __table_args__ = () - id = Column(Integer, primary_key=True, autoincrement=True) - name = Column(String(255)) - _hosts = orm.relationship(AggregateHost, - primaryjoin='and_(' - 'Aggregate.id == AggregateHost.aggregate_id,' - 'AggregateHost.deleted == 0,' - 'Aggregate.deleted == 0)') - - _metadata = orm.relationship(AggregateMetadata, - primaryjoin='and_(' - 'Aggregate.id == AggregateMetadata.aggregate_id,' - 'AggregateMetadata.deleted == 0,' - 'Aggregate.deleted == 0)') - - @property - def _extra_keys(self): - return ['hosts', 'metadetails', 'availability_zone'] - - @property - def hosts(self): - return [h.host for h in self._hosts] - - @property - def metadetails(self): - return dict([(m.key, m.value) for m in self._metadata]) - - @property - def availability_zone(self): - if 'availability_zone' not in self.metadetails: - return None - return self.metadetails['availability_zone'] - - -class AgentBuild(BASE, NovaBase): - """Represents an agent build.""" - __tablename__ = 'agent_builds' - __table_args__ = ( - Index('agent_builds_hypervisor_os_arch_idx', 'hypervisor', 'os', - 'architecture'), - schema.UniqueConstraint("hypervisor", "os", "architecture", "deleted", - name="uniq_agent_builds0hypervisor0os0architecture0deleted"), - ) - id = Column(Integer, primary_key=True) - hypervisor = Column(String(255)) - os = Column(String(255)) - architecture = Column(String(255)) - version = Column(String(255)) - url = Column(String(255)) - md5hash = Column(String(255)) - - -class BandwidthUsage(BASE, NovaBase): - """Cache for instance bandwidth usage data pulled from the hypervisor.""" - __tablename__ = 'bw_usage_cache' - __table_args__ = ( - Index('bw_usage_cache_uuid_start_period_idx', 'uuid', - 'start_period'), - ) - id = Column(Integer, primary_key=True, nullable=False) - uuid = Column(String(36)) - mac = Column(String(255)) - start_period = Column(DateTime, nullable=False) - last_refreshed = Column(DateTime) - bw_in = Column(BigInteger) - bw_out = Column(BigInteger) - last_ctr_in = Column(BigInteger) - last_ctr_out = Column(BigInteger) - - -class VolumeUsage(BASE, NovaBase): - """Cache for volume usage data pulled from the hypervisor.""" - __tablename__ = 'volume_usage_cache' - __table_args__ = () - id = Column(Integer, primary_key=True, nullable=False) - volume_id = Column(String(36), nullable=False) - instance_uuid = Column(String(36)) - project_id = Column(String(36)) - user_id = Column(String(64)) - availability_zone = Column(String(255)) - tot_last_refreshed = Column(DateTime) - tot_reads = Column(BigInteger, default=0) - tot_read_bytes = Column(BigInteger, default=0) - tot_writes = Column(BigInteger, default=0) - tot_write_bytes = Column(BigInteger, default=0) - curr_last_refreshed = Column(DateTime) - curr_reads = Column(BigInteger, default=0) - curr_read_bytes = Column(BigInteger, default=0) - curr_writes = Column(BigInteger, default=0) - curr_write_bytes = Column(BigInteger, default=0) - - -class S3Image(BASE, NovaBase): - """Compatibility layer for the S3 image service talking to Glance.""" - __tablename__ = 's3_images' - __table_args__ = () - id = Column(Integer, primary_key=True, nullable=False, autoincrement=True) - uuid = Column(String(36), nullable=False) - - -class VolumeIdMapping(BASE, NovaBase): - """Compatibility layer for the EC2 volume service.""" - __tablename__ = 'volume_id_mappings' - __table_args__ = () - id = Column(Integer, primary_key=True, nullable=False, autoincrement=True) - uuid = Column(String(36), nullable=False) - - -class SnapshotIdMapping(BASE, NovaBase): - """Compatibility layer for the EC2 snapshot service.""" - __tablename__ = 'snapshot_id_mappings' - __table_args__ = () - id = Column(Integer, primary_key=True, nullable=False, autoincrement=True) - uuid = Column(String(36), nullable=False) - - -class InstanceFault(BASE, NovaBase): - __tablename__ = 'instance_faults' - __table_args__ = ( - Index('instance_faults_host_idx', 'host'), - Index('instance_faults_instance_uuid_deleted_created_at_idx', - 'instance_uuid', 'deleted', 'created_at') - ) - - id = Column(Integer, primary_key=True, nullable=False) - instance_uuid = Column(String(36), - ForeignKey('instances.uuid')) - code = Column(Integer(), nullable=False) - message = Column(String(255)) - details = Column(MediumText()) - host = Column(String(255)) - - -class InstanceAction(BASE, NovaBase): - """Track client actions on an instance. - - The intention is that there will only be one of these per user request. A - lookup by (instance_uuid, request_id) should always return a single result. - """ - __tablename__ = 'instance_actions' - __table_args__ = ( - Index('instance_uuid_idx', 'instance_uuid'), - Index('request_id_idx', 'request_id') - ) - - id = Column(Integer, primary_key=True, nullable=False, autoincrement=True) - action = Column(String(255)) - instance_uuid = Column(String(36), - ForeignKey('instances.uuid')) - request_id = Column(String(255)) - user_id = Column(String(255)) - project_id = Column(String(255)) - start_time = Column(DateTime, default=timeutils.utcnow) - finish_time = Column(DateTime) - message = Column(String(255)) - - -class InstanceActionEvent(BASE, NovaBase): - """Track events that occur during an InstanceAction.""" - __tablename__ = 'instance_actions_events' - __table_args__ = () - - id = Column(Integer, primary_key=True, nullable=False, autoincrement=True) - event = Column(String(255)) - action_id = Column(Integer, ForeignKey('instance_actions.id')) - start_time = Column(DateTime, default=timeutils.utcnow) - finish_time = Column(DateTime) - result = Column(String(255)) - traceback = Column(Text) - host = Column(String(255)) - details = Column(Text) - - -class InstanceIdMapping(BASE, NovaBase): - """Compatibility layer for the EC2 instance service.""" - __tablename__ = 'instance_id_mappings' - __table_args__ = ( - Index('ix_instance_id_mappings_uuid', 'uuid'), - ) - id = Column(Integer, primary_key=True, nullable=False, autoincrement=True) - uuid = Column(String(36), nullable=False) - - -class TaskLog(BASE, NovaBase): - """Audit log for background periodic tasks.""" - __tablename__ = 'task_log' - __table_args__ = ( - schema.UniqueConstraint( - 'task_name', 'host', 'period_beginning', 'period_ending', - name="uniq_task_log0task_name0host0period_beginning0period_ending" - ), - Index('ix_task_log_period_beginning', 'period_beginning'), - Index('ix_task_log_host', 'host'), - Index('ix_task_log_period_ending', 'period_ending'), - ) - id = Column(Integer, primary_key=True, nullable=False, autoincrement=True) - task_name = Column(String(255), nullable=False) - state = Column(String(255), nullable=False) - host = Column(String(255), nullable=False) - period_beginning = Column(DateTime, default=timeutils.utcnow, - nullable=False) - period_ending = Column(DateTime, default=timeutils.utcnow, - nullable=False) - message = Column(String(255), nullable=False) - task_items = Column(Integer(), default=0) - errors = Column(Integer(), default=0) - - -class InstanceGroupMember(BASE, NovaBase): - """Represents the members for an instance group.""" - __tablename__ = 'instance_group_member' - __table_args__ = ( - Index('instance_group_member_instance_idx', 'instance_id'), - ) - id = Column(Integer, primary_key=True, nullable=False) - instance_id = Column(String(255)) - group_id = Column(Integer, ForeignKey('instance_groups.id'), - nullable=False) - - -class InstanceGroupPolicy(BASE, NovaBase): - """Represents the policy type for an instance group.""" - __tablename__ = 'instance_group_policy' - __table_args__ = ( - Index('instance_group_policy_policy_idx', 'policy'), - ) - id = Column(Integer, primary_key=True, nullable=False) - policy = Column(String(255)) - group_id = Column(Integer, ForeignKey('instance_groups.id'), - nullable=False) - - -class InstanceGroup(BASE, NovaBase): - """Represents an instance group. - - A group will maintain a collection of instances and the relationship - between them. - """ - - __tablename__ = 'instance_groups' - __table_args__ = ( - schema.UniqueConstraint("uuid", "deleted", - name="uniq_instance_groups0uuid0deleted"), - ) - - id = Column(Integer, primary_key=True, autoincrement=True) - user_id = Column(String(255)) - project_id = Column(String(255)) - uuid = Column(String(36), nullable=False) - name = Column(String(255)) - _policies = orm.relationship(InstanceGroupPolicy, primaryjoin='and_(' - 'InstanceGroup.id == InstanceGroupPolicy.group_id,' - 'InstanceGroupPolicy.deleted == 0,' - 'InstanceGroup.deleted == 0)') - _members = orm.relationship(InstanceGroupMember, primaryjoin='and_(' - 'InstanceGroup.id == InstanceGroupMember.group_id,' - 'InstanceGroupMember.deleted == 0,' - 'InstanceGroup.deleted == 0)') - - @property - def policies(self): - return [p.policy for p in self._policies] - - @property - def members(self): - return [m.instance_id for m in self._members] - - -class PciDevice(BASE, NovaBase): - """Represents a PCI host device that can be passed through to instances. - """ - __tablename__ = 'pci_devices' - __table_args__ = ( - Index('ix_pci_devices_compute_node_id_deleted', - 'compute_node_id', 'deleted'), - Index('ix_pci_devices_instance_uuid_deleted', - 'instance_uuid', 'deleted'), - schema.UniqueConstraint( - "compute_node_id", "address", "deleted", - name="uniq_pci_devices0compute_node_id0address0deleted") - ) - id = Column(Integer, primary_key=True) - - compute_node_id = Column(Integer, ForeignKey('compute_nodes.id'), - nullable=False) - - # physical address of device domain:bus:slot.func (0000:09:01.1) - address = Column(String(12), nullable=False) - - vendor_id = Column(String(4), nullable=False) - product_id = Column(String(4), nullable=False) - dev_type = Column(String(8), nullable=False) - dev_id = Column(String(255)) - - # label is abstract device name, that is used to unify devices with the - # same functionality with different addresses or host. - label = Column(String(255), nullable=False) - - status = Column(String(36), nullable=False) - # the request_id is used to identify a device that is allocated for a - # particular request - request_id = Column(String(36), nullable=True) - - extra_info = Column(Text) - - instance_uuid = Column(String(36)) - instance = orm.relationship(Instance, backref="pci_devices", - foreign_keys=instance_uuid, - primaryjoin='and_(' - 'PciDevice.instance_uuid == Instance.uuid,' - 'PciDevice.deleted == 0)') diff --git a/juno-patches/nova/instance_mapping_uuid_patch/nova/network/neutronv2/api.py b/juno-patches/nova/instance_mapping_uuid_patch/nova/network/neutronv2/api.py deleted file mode 100644 index 383c0c5..0000000 --- a/juno-patches/nova/instance_mapping_uuid_patch/nova/network/neutronv2/api.py +++ /dev/null @@ -1,1496 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# All Rights Reserved -# Copyright (c) 2012 NEC Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import time -import uuid - -from neutronclient.common import exceptions as neutron_client_exc -from oslo.config import cfg - -from nova.api.openstack import extensions -from nova.compute import flavors -from nova.compute import utils as compute_utils -from nova import conductor -from nova import exception -from nova.i18n import _, _LE, _LW -from nova.network import base_api -from nova.network import model as network_model -from nova.network import neutronv2 -from nova.network.neutronv2 import constants -from nova.network.security_group import openstack_driver -from nova import objects -from nova.openstack.common import excutils -from nova.openstack.common import lockutils -from nova.openstack.common import log as logging -from nova.openstack.common import uuidutils -from nova.pci import pci_manager -from nova.pci import pci_request -from nova.pci import pci_whitelist - -neutron_opts = [ - cfg.StrOpt('url', - default='http://127.0.0.1:9696', - help='URL for connecting to neutron', - deprecated_group='DEFAULT', - deprecated_name='neutron_url'), - cfg.IntOpt('url_timeout', - default=30, - help='Timeout value for connecting to neutron in seconds', - deprecated_group='DEFAULT', - deprecated_name='neutron_url_timeout'), - cfg.StrOpt('admin_user_id', - help='User id for connecting to neutron in admin context'), - cfg.StrOpt('admin_username', - help='Username for connecting to neutron in admin context', - deprecated_group='DEFAULT', - deprecated_name='neutron_admin_username'), - cfg.StrOpt('admin_password', - help='Password for connecting to neutron in admin context', - secret=True, - deprecated_group='DEFAULT', - deprecated_name='neutron_admin_password'), - cfg.StrOpt('admin_tenant_id', - help='Tenant id for connecting to neutron in admin context', - deprecated_group='DEFAULT', - deprecated_name='neutron_admin_tenant_id'), - cfg.StrOpt('admin_tenant_name', - help='Tenant name for connecting to neutron in admin context. ' - 'This option will be ignored if neutron_admin_tenant_id ' - 'is set. Note that with Keystone V3 tenant names are ' - 'only unique within a domain.', - deprecated_group='DEFAULT', - deprecated_name='neutron_admin_tenant_name'), - cfg.StrOpt('region_name', - help='Region name for connecting to neutron in admin context', - deprecated_group='DEFAULT', - deprecated_name='neutron_region_name'), - cfg.StrOpt('admin_auth_url', - default='http://localhost:5000/v2.0', - help='Authorization URL for connecting to neutron in admin ' - 'context', - deprecated_group='DEFAULT', - deprecated_name='neutron_admin_auth_url'), - cfg.BoolOpt('api_insecure', - default=False, - help='If set, ignore any SSL validation issues', - deprecated_group='DEFAULT', - deprecated_name='neutron_api_insecure'), - cfg.StrOpt('auth_strategy', - default='keystone', - help='Authorization strategy for connecting to ' - 'neutron in admin context', - deprecated_group='DEFAULT', - deprecated_name='neutron_auth_strategy'), - # TODO(berrange) temporary hack until Neutron can pass over the - # name of the OVS bridge it is configured with - cfg.StrOpt('ovs_bridge', - default='br-int', - help='Name of Integration Bridge used by Open vSwitch', - deprecated_group='DEFAULT', - deprecated_name='neutron_ovs_bridge'), - cfg.IntOpt('extension_sync_interval', - default=600, - help='Number of seconds before querying neutron for' - ' extensions', - deprecated_group='DEFAULT', - deprecated_name='neutron_extension_sync_interval'), - cfg.StrOpt('ca_certificates_file', - help='Location of CA certificates file to use for ' - 'neutron client requests.', - deprecated_group='DEFAULT', - deprecated_name='neutron_ca_certificates_file'), - cfg.BoolOpt('allow_duplicate_networks', - default=False, - help='Allow an instance to have multiple vNICs attached to ' - 'the same Neutron network.'), - ] - -CONF = cfg.CONF -# neutron_opts options in the DEFAULT group were deprecated in Juno -CONF.register_opts(neutron_opts, 'neutron') -CONF.import_opt('default_floating_pool', 'nova.network.floating_ips') -CONF.import_opt('flat_injected', 'nova.network.manager') -LOG = logging.getLogger(__name__) - -soft_external_network_attach_authorize = extensions.soft_core_authorizer( - 'network', 'attach_external_network') - - -class API(base_api.NetworkAPI): - """API for interacting with the neutron 2.x API.""" - - def __init__(self): - super(API, self).__init__() - self.last_neutron_extension_sync = None - self.extensions = {} - self.conductor_api = conductor.API() - self.security_group_api = ( - openstack_driver.get_openstack_security_group_driver()) - - def setup_networks_on_host(self, context, instance, host=None, - teardown=False): - """Setup or teardown the network structures.""" - - def _get_available_networks(self, context, project_id, - net_ids=None, neutron=None): - """Return a network list available for the tenant. - The list contains networks owned by the tenant and public networks. - If net_ids specified, it searches networks with requested IDs only. - """ - if not neutron: - neutron = neutronv2.get_client(context) - - if net_ids: - # If user has specified to attach instance only to specific - # networks then only add these to **search_opts. This search will - # also include 'shared' networks. - search_opts = {'id': net_ids} - nets = neutron.list_networks(**search_opts).get('networks', []) - else: - # (1) Retrieve non-public network list owned by the tenant. - search_opts = {'tenant_id': project_id, 'shared': False} - nets = neutron.list_networks(**search_opts).get('networks', []) - # (2) Retrieve public network list. - search_opts = {'shared': True} - nets += neutron.list_networks(**search_opts).get('networks', []) - - _ensure_requested_network_ordering( - lambda x: x['id'], - nets, - net_ids) - - return nets - - def _create_port(self, port_client, instance, network_id, port_req_body, - fixed_ip=None, security_group_ids=None, - available_macs=None, dhcp_opts=None): - """Attempts to create a port for the instance on the given network. - - :param port_client: The client to use to create the port. - :param instance: Create the port for the given instance. - :param network_id: Create the port on the given network. - :param port_req_body: Pre-populated port request. Should have the - device_id, device_owner, and any required neutron extension values. - :param fixed_ip: Optional fixed IP to use from the given network. - :param security_group_ids: Optional list of security group IDs to - apply to the port. - :param available_macs: Optional set of available MAC addresses to use. - :param dhcp_opts: Optional DHCP options. - :returns: ID of the created port. - :raises PortLimitExceeded: If neutron fails with an OverQuota error. - :raises NoMoreFixedIps: If neutron fails with - IpAddressGenerationFailure error. - """ - try: - if fixed_ip: - port_req_body['port']['fixed_ips'] = [{'ip_address': fixed_ip}] - port_req_body['port']['network_id'] = network_id - port_req_body['port']['admin_state_up'] = True - port_req_body['port']['tenant_id'] = instance['project_id'] - if security_group_ids: - port_req_body['port']['security_groups'] = security_group_ids - if available_macs is not None: - if not available_macs: - raise exception.PortNotFree( - instance=instance['uuid']) - mac_address = available_macs.pop() - port_req_body['port']['mac_address'] = mac_address - if dhcp_opts is not None: - port_req_body['port']['extra_dhcp_opts'] = dhcp_opts - port_id = port_client.create_port(port_req_body)['port']['id'] - LOG.debug('Successfully created port: %s', port_id, - instance=instance) - return port_id - except neutron_client_exc.OverQuotaClient: - LOG.warning(_LW( - 'Neutron error: Port quota exceeded in tenant: %s'), - port_req_body['port']['tenant_id'], instance=instance) - raise exception.PortLimitExceeded() - except neutron_client_exc.IpAddressGenerationFailureClient: - LOG.warning(_LW('Neutron error: No more fixed IPs in network: %s'), - network_id, instance=instance) - raise exception.NoMoreFixedIps() - except neutron_client_exc.MacAddressInUseClient: - LOG.warning(_LW('Neutron error: MAC address %(mac)s is already ' - 'in use on network %(network)s.') % - {'mac': mac_address, 'network': network_id}, - instance=instance) - raise exception.PortInUse(port_id=mac_address) - except neutron_client_exc.NeutronClientException: - with excutils.save_and_reraise_exception(): - LOG.exception(_LE('Neutron error creating port on network %s'), - network_id, instance=instance) - - def _check_external_network_attach(self, context, nets): - """Check if attaching to external network is permitted.""" - if not soft_external_network_attach_authorize(context): - for net in nets: - # Perform this check here rather than in validate_networks to - # ensure the check is performed every time - # allocate_for_instance is invoked - if net.get('router:external'): - raise exception.ExternalNetworkAttachForbidden( - network_uuid=net['id']) - - def allocate_for_instance(self, context, instance, **kwargs): - """Allocate network resources for the instance. - - :param context: The request context. - :param instance: nova.objects.instance.Instance object. - :param requested_networks: optional value containing - network_id, fixed_ip, and port_id - :param security_groups: security groups to allocate for instance - :param macs: None or a set of MAC addresses that the instance - should use. macs is supplied by the hypervisor driver (contrast - with requested_networks which is user supplied). - NB: NeutronV2 currently assigns hypervisor supplied MAC addresses - to arbitrary networks, which requires openflow switches to - function correctly if more than one network is being used with - the bare metal hypervisor (which is the only one known to limit - MAC addresses). - :param dhcp_options: None or a set of key/value pairs that should - determine the DHCP BOOTP response, eg. for PXE booting an instance - configured with the baremetal hypervisor. It is expected that these - are already formatted for the neutron v2 api. - See nova/virt/driver.py:dhcp_options_for_instance for an example. - """ - hypervisor_macs = kwargs.get('macs', None) - available_macs = None - if hypervisor_macs is not None: - # Make a copy we can mutate: records macs that have not been used - # to create a port on a network. If we find a mac with a - # pre-allocated port we also remove it from this set. - available_macs = set(hypervisor_macs) - neutron = neutronv2.get_client(context) - LOG.debug('allocate_for_instance()', instance=instance) - if not instance.project_id: - msg = _('empty project id for instance %s') - raise exception.InvalidInput( - reason=msg % instance.uuid) - requested_networks = kwargs.get('requested_networks') - dhcp_opts = kwargs.get('dhcp_options', None) - ports = {} - net_ids = [] - ordered_networks = [] - if requested_networks: - for request in requested_networks: - if request.port_id: - port = neutron.show_port(request.port_id)['port'] - if port.get('device_id'): - raise exception.PortInUse(port_id=request.port_id) - if hypervisor_macs is not None: - if port['mac_address'] not in hypervisor_macs: - raise exception.PortNotUsable( - port_id=request.port_id, - instance=instance.uuid) - else: - # Don't try to use this MAC if we need to create a - # port on the fly later. Identical MACs may be - # configured by users into multiple ports so we - # discard rather than popping. - available_macs.discard(port['mac_address']) - request.network_id = port['network_id'] - ports[request.port_id] = port - if request.network_id: - net_ids.append(request.network_id) - ordered_networks.append(request) - - nets = self._get_available_networks(context, instance.project_id, - net_ids) - if not nets: - LOG.warn(_LW("No network configured!"), instance=instance) - return network_model.NetworkInfo([]) - - # if this function is directly called without a requested_network param - # or if it is indirectly called through allocate_port_for_instance() - # with None params=(network_id=None, requested_ip=None, port_id=None, - # pci_request_id=None): - if (not requested_networks - or requested_networks.is_single_unspecified): - # bug/1267723 - if no network is requested and more - # than one is available then raise NetworkAmbiguous Exception - if len(nets) > 1: - msg = _("Multiple possible networks found, use a Network " - "ID to be more specific.") - raise exception.NetworkAmbiguous(msg) - ordered_networks.append( - objects.NetworkRequest(network_id=nets[0]['id'])) - - # NOTE(melwitt): check external net attach permission after the - # check for ambiguity, there could be another - # available net which is permitted bug/1364344 - self._check_external_network_attach(context, nets) - - security_groups = kwargs.get('security_groups', []) - security_group_ids = [] - - # TODO(arosen) Should optimize more to do direct query for security - # group if len(security_groups) == 1 - if len(security_groups): - search_opts = {'tenant_id': instance.project_id} - user_security_groups = neutron.list_security_groups( - **search_opts).get('security_groups') - - for security_group in security_groups: - name_match = None - uuid_match = None - for user_security_group in user_security_groups: - if user_security_group['name'] == security_group: - if name_match: - raise exception.NoUniqueMatch( - _("Multiple security groups found matching" - " '%s'. Use an ID to be more specific.") % - security_group) - - name_match = user_security_group['id'] - if user_security_group['id'] == security_group: - uuid_match = user_security_group['id'] - - # If a user names the security group the same as - # another's security groups uuid, the name takes priority. - if not name_match and not uuid_match: - raise exception.SecurityGroupNotFound( - security_group_id=security_group) - elif name_match: - security_group_ids.append(name_match) - elif uuid_match: - security_group_ids.append(uuid_match) - - touched_port_ids = [] - created_port_ids = [] - ports_in_requested_order = [] - nets_in_requested_order = [] - for request in ordered_networks: - # Network lookup for available network_id - network = None - for net in nets: - if net['id'] == request.network_id: - network = net - break - # if network_id did not pass validate_networks() and not available - # here then skip it safely not continuing with a None Network - else: - continue - - nets_in_requested_order.append(network) - # If security groups are requested on an instance then the - # network must has a subnet associated with it. Some plugins - # implement the port-security extension which requires - # 'port_security_enabled' to be True for security groups. - # That is why True is returned if 'port_security_enabled' - # is not found. - if (security_groups and not ( - network['subnets'] - and network.get('port_security_enabled', True))): - - raise exception.SecurityGroupCannotBeApplied() - request.network_id = network['id'] - zone = 'compute:%s' % instance.availability_zone - port_req_body = {'port': {'device_id': instance.uuid, - 'device_owner': zone}} - try: - self._populate_neutron_extension_values(context, - instance, - request.pci_request_id, - port_req_body) - # Requires admin creds to set port bindings - port_client = (neutron if not - self._has_port_binding_extension(context) else - neutronv2.get_client(context, admin=True)) - if request.port_id: - port = ports[request.port_id] - if 'binding:profile' in port: - port_req_body['port']['binding:profile'] = \ - port['binding:profile'] - port_client.update_port(port['id'], port_req_body) - touched_port_ids.append(port['id']) - ports_in_requested_order.append(port['id']) - else: - created_port = self._create_port( - port_client, instance, request.network_id, - port_req_body, request.address, - security_group_ids, available_macs, dhcp_opts) - created_port_ids.append(created_port) - ports_in_requested_order.append(created_port) - except Exception: - with excutils.save_and_reraise_exception(): - for port_id in touched_port_ids: - try: - port_req_body = {'port': {'device_id': ''}} - # Requires admin creds to set port bindings - if self._has_port_binding_extension(context): - port_req_body['port']['binding:host_id'] = None - port_client = neutronv2.get_client( - context, admin=True) - else: - port_client = neutron - port_client.update_port(port_id, port_req_body) - except Exception: - msg = _LE("Failed to update port %s") - LOG.exception(msg, port_id) - - self._delete_ports(neutron, instance, created_port_ids) - - nw_info = self.get_instance_nw_info(context, instance, - networks=nets_in_requested_order, - port_ids=ports_in_requested_order) - # NOTE(danms): Only return info about ports we created in this run. - # In the initial allocation case, this will be everything we created, - # and in later runs will only be what was created that time. Thus, - # this only affects the attach case, not the original use for this - # method. - return network_model.NetworkInfo([vif for vif in nw_info - if vif['id'] in created_port_ids + - touched_port_ids]) - - def _refresh_neutron_extensions_cache(self, context): - """Refresh the neutron extensions cache when necessary.""" - if (not self.last_neutron_extension_sync or - ((time.time() - self.last_neutron_extension_sync) - >= CONF.neutron.extension_sync_interval)): - neutron = neutronv2.get_client(context) - extensions_list = neutron.list_extensions()['extensions'] - self.last_neutron_extension_sync = time.time() - self.extensions.clear() - self.extensions = dict((ext['name'], ext) - for ext in extensions_list) - - def _has_port_binding_extension(self, context, refresh_cache=False): - if refresh_cache: - self._refresh_neutron_extensions_cache(context) - return constants.PORTBINDING_EXT in self.extensions - - @staticmethod - def _populate_neutron_binding_profile(instance, pci_request_id, - port_req_body): - """Populate neutron binding:profile. - - Populate it with SR-IOV related information - """ - if pci_request_id: - pci_dev = pci_manager.get_instance_pci_devs( - instance, pci_request_id).pop() - devspec = pci_whitelist.get_pci_device_devspec(pci_dev) - profile = {'pci_vendor_info': "%s:%s" % (pci_dev.vendor_id, - pci_dev.product_id), - 'pci_slot': pci_dev.address, - 'physical_network': - devspec.get_tags().get('physical_network') - } - port_req_body['port']['binding:profile'] = profile - - def _populate_neutron_extension_values(self, context, instance, - pci_request_id, port_req_body): - """Populate neutron extension values for the instance. - - If the extensions loaded contain QOS_QUEUE then pass the rxtx_factor. - """ - self._refresh_neutron_extensions_cache(context) - if constants.QOS_QUEUE in self.extensions: - flavor = flavors.extract_flavor(instance) - rxtx_factor = flavor.get('rxtx_factor') - port_req_body['port']['rxtx_factor'] = rxtx_factor - if self._has_port_binding_extension(context): - port_req_body['port']['binding:host_id'] = instance.get('host') - self._populate_neutron_binding_profile(instance, - pci_request_id, - port_req_body) - - def _delete_ports(self, neutron, instance, ports, raise_if_fail=False): - exceptions = [] - for port in ports: - try: - neutron.delete_port(port) - except neutronv2.exceptions.NeutronClientException as e: - if e.status_code == 404: - LOG.warning(_LW("Port %s does not exist"), port) - else: - exceptions.append(e) - LOG.warning( - _LW("Failed to delete port %s for instance."), - port, instance=instance, exc_info=True) - if len(exceptions) > 0 and raise_if_fail: - raise exceptions[0] - - def deallocate_for_instance(self, context, instance, **kwargs): - """Deallocate all network resources related to the instance.""" - LOG.debug('deallocate_for_instance()', instance=instance) - search_opts = {'device_id': instance.uuid} - neutron = neutronv2.get_client(context) - data = neutron.list_ports(**search_opts) - ports = [port['id'] for port in data.get('ports', [])] - - requested_networks = kwargs.get('requested_networks') or [] - # NOTE(danms): Temporary and transitional - if isinstance(requested_networks, objects.NetworkRequestList): - requested_networks = requested_networks.as_tuples() - ports_to_skip = [port_id for nets, fips, port_id, pci_request_id - in requested_networks] - ports = set(ports) - set(ports_to_skip) - # Reset device_id and device_owner for the ports that are skipped - for port in ports_to_skip: - port_req_body = {'port': {'device_id': '', 'device_owner': ''}} - try: - neutronv2.get_client(context).update_port(port, - port_req_body) - except Exception: - LOG.info(_('Unable to reset device ID for port %s'), port, - instance=instance) - - self._delete_ports(neutron, instance, ports, raise_if_fail=True) - - # NOTE(arosen): This clears out the network_cache only if the instance - # hasn't already been deleted. This is needed when an instance fails to - # launch and is rescheduled onto another compute node. If the instance - # has already been deleted this call does nothing. - base_api.update_instance_cache_with_nw_info(self, context, instance, - network_model.NetworkInfo([])) - - def allocate_port_for_instance(self, context, instance, port_id, - network_id=None, requested_ip=None): - """Allocate a port for the instance.""" - requested_networks = objects.NetworkRequestList( - objects=[objects.NetworkRequest(network_id=network_id, - address=requested_ip, - port_id=port_id, - pci_request_id=None)]) - return self.allocate_for_instance(context, instance, - requested_networks=requested_networks) - - def deallocate_port_for_instance(self, context, instance, port_id): - """Remove a specified port from the instance. - - Return network information for the instance - """ - neutron = neutronv2.get_client(context) - self._delete_ports(neutron, instance, [port_id], raise_if_fail=True) - return self.get_instance_nw_info(context, instance) - - def list_ports(self, context, **search_opts): - """List ports for the client based on search options.""" - return neutronv2.get_client(context).list_ports(**search_opts) - - def show_port(self, context, port_id): - """Return the port for the client given the port id.""" - return neutronv2.get_client(context).show_port(port_id) - - def get_instance_nw_info(self, context, instance, networks=None, - port_ids=None, use_slave=False): - """Return network information for specified instance - and update cache. - """ - # NOTE(geekinutah): It would be nice if use_slave had us call - # special APIs that pummeled slaves instead of - # the master. For now we just ignore this arg. - with lockutils.lock('refresh_cache-%s' % instance['uuid']): - result = self._get_instance_nw_info(context, instance, networks, - port_ids) - base_api.update_instance_cache_with_nw_info(self, context, - instance, - nw_info=result, - update_cells=False) - return result - - def _get_instance_nw_info(self, context, instance, networks=None, - port_ids=None): - # NOTE(danms): This is an inner method intended to be called - # by other code that updates instance nwinfo. It *must* be - # called with the refresh_cache-%(instance_uuid) lock held! - LOG.debug('get_instance_nw_info()', instance=instance) - nw_info = self._build_network_info_model(context, instance, networks, - port_ids) - return network_model.NetworkInfo.hydrate(nw_info) - - def _gather_port_ids_and_networks(self, context, instance, networks=None, - port_ids=None): - """Return an instance's complete list of port_ids and networks.""" - - if ((networks is None and port_ids is not None) or - (port_ids is None and networks is not None)): - message = ("This method needs to be called with either " - "networks=None and port_ids=None or port_ids and " - " networks as not none.") - raise exception.NovaException(message=message) - - ifaces = compute_utils.get_nw_info_for_instance(instance) - # This code path is only done when refreshing the network_cache - if port_ids is None: - port_ids = [iface['id'] for iface in ifaces] - net_ids = [iface['network']['id'] for iface in ifaces] - - if networks is None: - networks = self._get_available_networks(context, - instance['project_id'], - net_ids) - # an interface was added/removed from instance. - else: - # Since networks does not contain the existing networks on the - # instance we use their values from the cache and add it. - networks = networks + [ - {'id': iface['network']['id'], - 'name': iface['network']['label'], - 'tenant_id': iface['network']['meta']['tenant_id']} - for iface in ifaces] - - # Include existing interfaces so they are not removed from the db. - port_ids = [iface['id'] for iface in ifaces] + port_ids - - return networks, port_ids - - @base_api.refresh_cache - def add_fixed_ip_to_instance(self, context, instance, network_id): - """Add a fixed ip to the instance from specified network.""" - search_opts = {'network_id': network_id} - data = neutronv2.get_client(context).list_subnets(**search_opts) - ipam_subnets = data.get('subnets', []) - if not ipam_subnets: - raise exception.NetworkNotFoundForInstance( - instance_id=instance['uuid']) - - zone = 'compute:%s' % instance['availability_zone'] - search_opts = {'device_id': instance['uuid'], - 'device_owner': zone, - 'network_id': network_id} - data = neutronv2.get_client(context).list_ports(**search_opts) - ports = data['ports'] - for p in ports: - for subnet in ipam_subnets: - fixed_ips = p['fixed_ips'] - fixed_ips.append({'subnet_id': subnet['id']}) - port_req_body = {'port': {'fixed_ips': fixed_ips}} - try: - neutronv2.get_client(context).update_port(p['id'], - port_req_body) - return self._get_instance_nw_info(context, instance) - except Exception as ex: - msg = ("Unable to update port %(portid)s on subnet " - "%(subnet_id)s with failure: %(exception)s") - LOG.debug(msg, {'portid': p['id'], - 'subnet_id': subnet['id'], - 'exception': ex}) - - raise exception.NetworkNotFoundForInstance( - instance_id=instance['uuid']) - - @base_api.refresh_cache - def remove_fixed_ip_from_instance(self, context, instance, address): - """Remove a fixed ip from the instance.""" - zone = 'compute:%s' % instance['availability_zone'] - search_opts = {'device_id': instance['uuid'], - 'device_owner': zone, - 'fixed_ips': 'ip_address=%s' % address} - data = neutronv2.get_client(context).list_ports(**search_opts) - ports = data['ports'] - for p in ports: - fixed_ips = p['fixed_ips'] - new_fixed_ips = [] - for fixed_ip in fixed_ips: - if fixed_ip['ip_address'] != address: - new_fixed_ips.append(fixed_ip) - port_req_body = {'port': {'fixed_ips': new_fixed_ips}} - try: - neutronv2.get_client(context).update_port(p['id'], - port_req_body) - except Exception as ex: - msg = ("Unable to update port %(portid)s with" - " failure: %(exception)s") - LOG.debug(msg, {'portid': p['id'], 'exception': ex}) - return self._get_instance_nw_info(context, instance) - - raise exception.FixedIpNotFoundForSpecificInstance( - instance_uuid=instance['uuid'], ip=address) - - def _get_port_vnic_info(self, context, neutron, port_id): - """Retrieve port vnic info - - Invoked with a valid port_id. - Return vnic type and the attached physical network name. - """ - phynet_name = None - vnic_type = None - port = neutron.show_port(port_id, - fields=['binding:vnic_type', 'network_id']).get('port') - vnic_type = port.get('binding:vnic_type', - network_model.VNIC_TYPE_NORMAL) - if vnic_type != network_model.VNIC_TYPE_NORMAL: - net_id = port['network_id'] - net = neutron.show_network(net_id, - fields='provider:physical_network').get('network') - phynet_name = net.get('provider:physical_network') - return vnic_type, phynet_name - - def create_pci_requests_for_sriov_ports(self, context, pci_requests, - requested_networks): - """Check requested networks for any SR-IOV port request. - - Create a PCI request object for each SR-IOV port, and add it to the - pci_requests object that contains a list of PCI request object. - """ - if not requested_networks: - return - - neutron = neutronv2.get_client(context, admin=True) - for request_net in requested_networks: - phynet_name = None - vnic_type = network_model.VNIC_TYPE_NORMAL - - if request_net.port_id: - vnic_type, phynet_name = self._get_port_vnic_info( - context, neutron, request_net.port_id) - pci_request_id = None - if vnic_type != network_model.VNIC_TYPE_NORMAL: - request = objects.InstancePCIRequest( - count=1, - spec=[{pci_request.PCI_NET_TAG: phynet_name}], - request_id=str(uuid.uuid4())) - pci_requests.requests.append(request) - pci_request_id = request.request_id - - # Add pci_request_id into the requested network - request_net.pci_request_id = pci_request_id - - def validate_networks(self, context, requested_networks, num_instances): - """Validate that the tenant can use the requested networks. - - Return the number of instances than can be successfully allocated - with the requested network configuration. - """ - LOG.debug('validate_networks() for %s', - requested_networks) - - neutron = neutronv2.get_client(context) - ports_needed_per_instance = 0 - - if requested_networks is None or len(requested_networks) == 0: - nets = self._get_available_networks(context, context.project_id, - neutron=neutron) - if len(nets) > 1: - # Attaching to more than one network by default doesn't - # make sense, as the order will be arbitrary and the guest OS - # won't know which to configure - msg = _("Multiple possible networks found, use a Network " - "ID to be more specific.") - raise exception.NetworkAmbiguous(msg) - else: - ports_needed_per_instance = 1 - - else: - instance_on_net_ids = [] - net_ids_requested = [] - - # TODO(danms): Remove me when all callers pass an object - if isinstance(requested_networks[0], tuple): - requested_networks = objects.NetworkRequestList( - objects=[objects.NetworkRequest.from_tuple(t) - for t in requested_networks]) - - for request in requested_networks: - if request.port_id: - try: - port = neutron.show_port(request.port_id).get('port') - except neutron_client_exc.NeutronClientException as e: - if e.status_code == 404: - port = None - else: - with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Failed to access port %s"), - request.port_id) - if not port: - raise exception.PortNotFound(port_id=request.port_id) - if port.get('device_id', None): - raise exception.PortInUse(port_id=request.port_id) - if not port.get('fixed_ips'): - raise exception.PortRequiresFixedIP( - port_id=request.port_id) - request.network_id = port['network_id'] - else: - ports_needed_per_instance += 1 - net_ids_requested.append(request.network_id) - - # NOTE(jecarey) There is currently a race condition. - # That is, if you have more than one request for a specific - # fixed IP at the same time then only one will be allocated - # the ip. The fixed IP will be allocated to only one of the - # instances that will run. The second instance will fail on - # spawn. That instance will go into error state. - # TODO(jecarey) Need to address this race condition once we - # have the ability to update mac addresses in Neutron. - if request.address: - # TODO(jecarey) Need to look at consolidating list_port - # calls once able to OR filters. - search_opts = {'network_id': request.network_id, - 'fixed_ips': 'ip_address=%s' % ( - request.address), - 'fields': 'device_id'} - existing_ports = neutron.list_ports( - **search_opts)['ports'] - if existing_ports: - i_uuid = existing_ports[0]['device_id'] - raise exception.FixedIpAlreadyInUse( - address=request.address, - instance_uuid=i_uuid) - - if (not CONF.neutron.allow_duplicate_networks and - request.network_id in instance_on_net_ids): - raise exception.NetworkDuplicated( - network_id=request.network_id) - instance_on_net_ids.append(request.network_id) - - # Now check to see if all requested networks exist - if net_ids_requested: - nets = self._get_available_networks( - context, context.project_id, net_ids_requested, - neutron=neutron) - - for net in nets: - if not net.get('subnets'): - raise exception.NetworkRequiresSubnet( - network_uuid=net['id']) - - if len(nets) != len(net_ids_requested): - requested_netid_set = set(net_ids_requested) - returned_netid_set = set([net['id'] for net in nets]) - lostid_set = requested_netid_set - returned_netid_set - if lostid_set: - id_str = '' - for _id in lostid_set: - id_str = id_str and id_str + ', ' + _id or _id - raise exception.NetworkNotFound(network_id=id_str) - - # Note(PhilD): Ideally Nova would create all required ports as part of - # network validation, but port creation requires some details - # from the hypervisor. So we just check the quota and return - # how many of the requested number of instances can be created - if ports_needed_per_instance: - ports = neutron.list_ports(tenant_id=context.project_id)['ports'] - quotas = neutron.show_quota(tenant_id=context.project_id)['quota'] - if quotas.get('port', -1) == -1: - # Unlimited Port Quota - return num_instances - else: - free_ports = quotas.get('port') - len(ports) - ports_needed = ports_needed_per_instance * num_instances - if free_ports >= ports_needed: - return num_instances - else: - return free_ports // ports_needed_per_instance - return num_instances - - def _get_instance_uuids_by_ip(self, context, address): - """Retrieve instance uuids associated with the given ip address. - - :returns: A list of dicts containing the uuids keyed by 'instance_uuid' - e.g. [{'instance_uuid': uuid}, ...] - """ - search_opts = {"fixed_ips": 'ip_address=%s' % address} - data = neutronv2.get_client(context).list_ports(**search_opts) - ports = data.get('ports', []) - return [{'instance_uuid': port['device_id']} for port in ports - if port['device_id']] - - def get_instance_uuids_by_ip_filter(self, context, filters): - """Return a list of dicts in the form of - [{'instance_uuid': uuid}] that matched the ip filter. - """ - # filters['ip'] is composed as '^%s$' % fixed_ip.replace('.', '\\.') - ip = filters.get('ip') - # we remove ^$\ in the ip filer - if ip[0] == '^': - ip = ip[1:] - if ip[-1] == '$': - ip = ip[:-1] - ip = ip.replace('\\.', '.') - return self._get_instance_uuids_by_ip(context, ip) - - def _get_port_id_by_fixed_address(self, client, - instance, address): - """Return port_id from a fixed address.""" - zone = 'compute:%s' % instance['availability_zone'] - search_opts = {'device_id': instance['uuid'], - 'device_owner': zone} - data = client.list_ports(**search_opts) - ports = data['ports'] - port_id = None - for p in ports: - for ip in p['fixed_ips']: - if ip['ip_address'] == address: - port_id = p['id'] - break - if not port_id: - raise exception.FixedIpNotFoundForAddress(address=address) - return port_id - - @base_api.refresh_cache - def associate_floating_ip(self, context, instance, - floating_address, fixed_address, - affect_auto_assigned=False): - """Associate a floating ip with a fixed ip.""" - - # Note(amotoki): 'affect_auto_assigned' is not respected - # since it is not used anywhere in nova code and I could - # find why this parameter exists. - - client = neutronv2.get_client(context) - port_id = self._get_port_id_by_fixed_address(client, instance, - fixed_address) - fip = self._get_floating_ip_by_address(client, floating_address) - param = {'port_id': port_id, - 'fixed_ip_address': fixed_address} - client.update_floatingip(fip['id'], {'floatingip': param}) - - if fip['port_id']: - port = client.show_port(fip['port_id'])['port'] - orig_instance_uuid = port['device_id'] - - msg_dict = dict(address=floating_address, - instance_id=orig_instance_uuid) - LOG.info(_('re-assign floating IP %(address)s from ' - 'instance %(instance_id)s') % msg_dict) - orig_instance = objects.Instance.get_by_uuid(context, - orig_instance_uuid) - - # purge cached nw info for the original instance - base_api.update_instance_cache_with_nw_info(self, context, - orig_instance) - - def get_all(self, context): - """Get all networks for client.""" - client = neutronv2.get_client(context) - networks = client.list_networks().get('networks') - for network in networks: - network['label'] = network['name'] - return networks - - def get(self, context, network_uuid): - """Get specific network for client.""" - client = neutronv2.get_client(context) - try: - network = client.show_network(network_uuid).get('network') or {} - except neutron_client_exc.NetworkNotFoundClient: - raise exception.NetworkNotFound(network_id=network_uuid) - network['label'] = network['name'] - return network - - def delete(self, context, network_uuid): - """Delete a network for client.""" - raise NotImplementedError() - - def disassociate(self, context, network_uuid): - """Disassociate a network for client.""" - raise NotImplementedError() - - def associate(self, context, network_uuid, host=base_api.SENTINEL, - project=base_api.SENTINEL): - """Associate a network for client.""" - raise NotImplementedError() - - def get_fixed_ip(self, context, id): - """Get a fixed ip from the id.""" - raise NotImplementedError() - - def get_fixed_ip_by_address(self, context, address): - """Return instance uuids given an address.""" - uuid_maps = self._get_instance_uuids_by_ip(context, address) - if len(uuid_maps) == 1: - return uuid_maps[0] - elif not uuid_maps: - raise exception.FixedIpNotFoundForAddress(address=address) - else: - raise exception.FixedIpAssociatedWithMultipleInstances( - address=address) - - def _setup_net_dict(self, client, network_id): - if not network_id: - return {} - pool = client.show_network(network_id)['network'] - return {pool['id']: pool} - - def _setup_port_dict(self, client, port_id): - if not port_id: - return {} - port = client.show_port(port_id)['port'] - return {port['id']: port} - - def _setup_pools_dict(self, client): - pools = self._get_floating_ip_pools(client) - return dict([(i['id'], i) for i in pools]) - - def _setup_ports_dict(self, client, project_id=None): - search_opts = {'tenant_id': project_id} if project_id else {} - ports = client.list_ports(**search_opts)['ports'] - return dict([(p['id'], p) for p in ports]) - - def get_floating_ip(self, context, id): - """Return floating ip object given the floating ip id.""" - client = neutronv2.get_client(context) - try: - fip = client.show_floatingip(id)['floatingip'] - except neutron_client_exc.NeutronClientException as e: - if e.status_code == 404: - raise exception.FloatingIpNotFound(id=id) - else: - with excutils.save_and_reraise_exception(): - LOG.exception(_LE('Unable to access floating IP %s'), id) - pool_dict = self._setup_net_dict(client, - fip['floating_network_id']) - port_dict = self._setup_port_dict(client, fip['port_id']) - return self._format_floating_ip_model(fip, pool_dict, port_dict) - - def _get_floating_ip_pools(self, client, project_id=None): - search_opts = {constants.NET_EXTERNAL: True} - if project_id: - search_opts.update({'tenant_id': project_id}) - data = client.list_networks(**search_opts) - return data['networks'] - - def get_floating_ip_pools(self, context): - """Return floating ip pool names.""" - client = neutronv2.get_client(context) - pools = self._get_floating_ip_pools(client) - # Note(salv-orlando): Return a list of names to be consistent with - # nova.network.api.get_floating_ip_pools - return [n['name'] or n['id'] for n in pools] - - def _format_floating_ip_model(self, fip, pool_dict, port_dict): - pool = pool_dict[fip['floating_network_id']] - result = {'id': fip['id'], - 'address': fip['floating_ip_address'], - 'pool': pool['name'] or pool['id'], - 'project_id': fip['tenant_id'], - # In Neutron v2, an exact fixed_ip_id does not exist. - 'fixed_ip_id': fip['port_id'], - } - # In Neutron v2 API fixed_ip_address and instance uuid - # (= device_id) are known here, so pass it as a result. - result['fixed_ip'] = {'address': fip['fixed_ip_address']} - if fip['port_id']: - instance_uuid = port_dict[fip['port_id']]['device_id'] - result['instance'] = {'uuid': instance_uuid} - else: - result['instance'] = None - return result - - def get_floating_ip_by_address(self, context, address): - """Return a floating ip given an address.""" - client = neutronv2.get_client(context) - fip = self._get_floating_ip_by_address(client, address) - pool_dict = self._setup_net_dict(client, - fip['floating_network_id']) - port_dict = self._setup_port_dict(client, fip['port_id']) - return self._format_floating_ip_model(fip, pool_dict, port_dict) - - def get_floating_ips_by_project(self, context): - client = neutronv2.get_client(context) - project_id = context.project_id - fips = client.list_floatingips(tenant_id=project_id)['floatingips'] - pool_dict = self._setup_pools_dict(client) - port_dict = self._setup_ports_dict(client, project_id) - return [self._format_floating_ip_model(fip, pool_dict, port_dict) - for fip in fips] - - def get_floating_ips_by_fixed_address(self, context, fixed_address): - raise NotImplementedError() - - def get_instance_id_by_floating_address(self, context, address): - """Return the instance id a floating ip's fixed ip is allocated to.""" - client = neutronv2.get_client(context) - fip = self._get_floating_ip_by_address(client, address) - if not fip['port_id']: - return None - port = client.show_port(fip['port_id'])['port'] - return port['device_id'] - - def get_vifs_by_instance(self, context, instance): - raise NotImplementedError() - - def get_vif_by_mac_address(self, context, mac_address): - raise NotImplementedError() - - def _get_floating_ip_pool_id_by_name_or_id(self, client, name_or_id): - search_opts = {constants.NET_EXTERNAL: True, 'fields': 'id'} - if uuidutils.is_uuid_like(name_or_id): - search_opts.update({'id': name_or_id}) - else: - search_opts.update({'name': name_or_id}) - data = client.list_networks(**search_opts) - nets = data['networks'] - - if len(nets) == 1: - return nets[0]['id'] - elif len(nets) == 0: - raise exception.FloatingIpPoolNotFound() - else: - msg = (_("Multiple floating IP pools matches found for name '%s'") - % name_or_id) - raise exception.NovaException(message=msg) - - def allocate_floating_ip(self, context, pool=None): - """Add a floating ip to a project from a pool.""" - client = neutronv2.get_client(context) - pool = pool or CONF.default_floating_pool - pool_id = self._get_floating_ip_pool_id_by_name_or_id(client, pool) - - param = {'floatingip': {'floating_network_id': pool_id}} - try: - fip = client.create_floatingip(param) - except (neutron_client_exc.IpAddressGenerationFailureClient, - neutron_client_exc.ExternalIpAddressExhaustedClient) as e: - raise exception.NoMoreFloatingIps(unicode(e)) - except neutron_client_exc.OverQuotaClient as e: - raise exception.FloatingIpLimitExceeded(unicode(e)) - - return fip['floatingip']['floating_ip_address'] - - def _get_floating_ip_by_address(self, client, address): - """Get floatingip from floating ip address.""" - if not address: - raise exception.FloatingIpNotFoundForAddress(address=address) - data = client.list_floatingips(floating_ip_address=address) - fips = data['floatingips'] - if len(fips) == 0: - raise exception.FloatingIpNotFoundForAddress(address=address) - elif len(fips) > 1: - raise exception.FloatingIpMultipleFoundForAddress(address=address) - return fips[0] - - def _get_floating_ips_by_fixed_and_port(self, client, fixed_ip, port): - """Get floatingips from fixed ip and port.""" - try: - data = client.list_floatingips(fixed_ip_address=fixed_ip, - port_id=port) - # If a neutron plugin does not implement the L3 API a 404 from - # list_floatingips will be raised. - except neutron_client_exc.NeutronClientException as e: - if e.status_code == 404: - return [] - with excutils.save_and_reraise_exception(): - LOG.exception(_LE('Unable to access floating IP %(fixed_ip)s ' - 'for port %(port_id)s'), - {'fixed_ip': fixed_ip, 'port_id': port}) - return data['floatingips'] - - def release_floating_ip(self, context, address, - affect_auto_assigned=False): - """Remove a floating ip with the given address from a project.""" - - # Note(amotoki): We cannot handle a case where multiple pools - # have overlapping IP address range. In this case we cannot use - # 'address' as a unique key. - # This is a limitation of the current nova. - - # Note(amotoki): 'affect_auto_assigned' is not respected - # since it is not used anywhere in nova code and I could - # find why this parameter exists. - - self._release_floating_ip(context, address) - - def disassociate_and_release_floating_ip(self, context, instance, - floating_ip): - """Removes (deallocates) and deletes the floating ip. - - This api call was added to allow this to be done in one operation - if using neutron. - """ - self._release_floating_ip(context, floating_ip['address'], - raise_if_associated=False) - - def _release_floating_ip(self, context, address, - raise_if_associated=True): - client = neutronv2.get_client(context) - fip = self._get_floating_ip_by_address(client, address) - - if raise_if_associated and fip['port_id']: - raise exception.FloatingIpAssociated(address=address) - client.delete_floatingip(fip['id']) - - @base_api.refresh_cache - def disassociate_floating_ip(self, context, instance, address, - affect_auto_assigned=False): - """Disassociate a floating ip from the instance.""" - - # Note(amotoki): 'affect_auto_assigned' is not respected - # since it is not used anywhere in nova code and I could - # find why this parameter exists. - - client = neutronv2.get_client(context) - fip = self._get_floating_ip_by_address(client, address) - client.update_floatingip(fip['id'], {'floatingip': {'port_id': None}}) - - def migrate_instance_start(self, context, instance, migration): - """Start to migrate the network of an instance.""" - # NOTE(wenjianhn): just pass to make migrate instance doesn't - # raise for now. - pass - - def migrate_instance_finish(self, context, instance, migration): - """Finish migrating the network of an instance.""" - if not self._has_port_binding_extension(context, refresh_cache=True): - return - neutron = neutronv2.get_client(context, admin=True) - search_opts = {'device_id': instance['uuid'], - 'tenant_id': instance['project_id']} - data = neutron.list_ports(**search_opts) - ports = data['ports'] - for p in ports: - port_req_body = {'port': {'binding:host_id': - migration['dest_compute']}} - try: - neutron.update_port(p['id'], port_req_body) - except Exception: - with excutils.save_and_reraise_exception(): - msg = _LE("Unable to update host of port %s") - LOG.exception(msg, p['id']) - - def add_network_to_project(self, context, project_id, network_uuid=None): - """Force add a network to the project.""" - raise NotImplementedError() - - def _nw_info_get_ips(self, client, port): - network_IPs = [] - for fixed_ip in port['fixed_ips']: - fixed = network_model.FixedIP(address=fixed_ip['ip_address']) - floats = self._get_floating_ips_by_fixed_and_port( - client, fixed_ip['ip_address'], port['id']) - for ip in floats: - fip = network_model.IP(address=ip['floating_ip_address'], - type='floating') - fixed.add_floating_ip(fip) - network_IPs.append(fixed) - return network_IPs - - def _nw_info_get_subnets(self, context, port, network_IPs): - subnets = self._get_subnets_from_port(context, port) - for subnet in subnets: - subnet['ips'] = [fixed_ip for fixed_ip in network_IPs - if fixed_ip.is_in_subnet(subnet)] - return subnets - - def _nw_info_build_network(self, port, networks, subnets): - network_name = None - for net in networks: - if port['network_id'] == net['id']: - network_name = net['name'] - tenant_id = net['tenant_id'] - break - else: - tenant_id = port['tenant_id'] - LOG.warning(_LW("Network %(id)s not matched with the tenants " - "network! The ports tenant %(tenant_id)s will be " - "used."), - {'id': port['network_id'], 'tenant_id': tenant_id}) - - bridge = None - ovs_interfaceid = None - # Network model metadata - should_create_bridge = None - vif_type = port.get('binding:vif_type') - # TODO(berrange) Neutron should pass the bridge name - # in another binding metadata field - if vif_type == network_model.VIF_TYPE_OVS: - bridge = CONF.neutron.ovs_bridge - ovs_interfaceid = port['id'] - elif vif_type == network_model.VIF_TYPE_BRIDGE: - bridge = "brq" + port['network_id'] - should_create_bridge = True - elif vif_type == network_model.VIF_TYPE_DVS: - if network_name is None: - bridge = port['network_id'] - else: - bridge = '%s-%s' % (network_name, port['network_id']) - - # Prune the bridge name if necessary. For the DVS this is not done - # as the bridge is a '-'. - if bridge is not None and vif_type != network_model.VIF_TYPE_DVS: - bridge = bridge[:network_model.NIC_NAME_LEN] - - network = network_model.Network( - id=port['network_id'], - bridge=bridge, - injected=CONF.flat_injected, - label=network_name, - tenant_id=tenant_id - ) - network['subnets'] = subnets - port_profile = port.get('binding:profile') - if port_profile: - physical_network = port_profile.get('physical_network') - if physical_network: - network['physical_network'] = physical_network - - if should_create_bridge is not None: - network['should_create_bridge'] = should_create_bridge - return network, ovs_interfaceid - - def _build_network_info_model(self, context, instance, networks=None, - port_ids=None): - """Return list of ordered VIFs attached to instance. - - :param context - request context. - :param instance - instance we are returning network info for. - :param networks - List of networks being attached to an instance. - If value is None this value will be populated - from the existing cached value. - :param port_ids - List of port_ids that are being attached to an - instance in order of attachment. If value is None - this value will be populated from the existing - cached value. - """ - - search_opts = {'tenant_id': instance['project_id'], - 'device_id': instance['uuid'], } - client = neutronv2.get_client(context, admin=True) - data = client.list_ports(**search_opts) - - current_neutron_ports = data.get('ports', []) - networks, port_ids = self._gather_port_ids_and_networks( - context, instance, networks, port_ids) - nw_info = network_model.NetworkInfo() - - current_neutron_port_map = {} - for current_neutron_port in current_neutron_ports: - current_neutron_port_map[current_neutron_port['id']] = ( - current_neutron_port) - - for port_id in port_ids: - current_neutron_port = current_neutron_port_map.get(port_id) - if current_neutron_port: - vif_active = False - if (current_neutron_port['admin_state_up'] is False - or current_neutron_port['status'] == 'ACTIVE'): - vif_active = True - - network_IPs = self._nw_info_get_ips(client, - current_neutron_port) - subnets = self._nw_info_get_subnets(context, - current_neutron_port, - network_IPs) - - devname = "tap" + current_neutron_port['id'] - devname = devname[:network_model.NIC_NAME_LEN] - - network, ovs_interfaceid = ( - self._nw_info_build_network(current_neutron_port, - networks, subnets)) - - nw_info.append(network_model.VIF( - id=current_neutron_port['id'], - address=current_neutron_port['mac_address'], - network=network, - vnic_type=current_neutron_port.get('binding:vnic_type', - network_model.VNIC_TYPE_NORMAL), - type=current_neutron_port.get('binding:vif_type'), - profile=current_neutron_port.get('binding:profile'), - details=current_neutron_port.get('binding:vif_details'), - ovs_interfaceid=ovs_interfaceid, - devname=devname, - active=vif_active)) - - return nw_info - - def _get_subnets_from_port(self, context, port): - """Return the subnets for a given port.""" - - fixed_ips = port['fixed_ips'] - # No fixed_ips for the port means there is no subnet associated - # with the network the port is created on. - # Since list_subnets(id=[]) returns all subnets visible for the - # current tenant, returned subnets may contain subnets which is not - # related to the port. To avoid this, the method returns here. - if not fixed_ips: - return [] - search_opts = {'id': [ip['subnet_id'] for ip in fixed_ips]} - data = neutronv2.get_client(context).list_subnets(**search_opts) - ipam_subnets = data.get('subnets', []) - subnets = [] - - for subnet in ipam_subnets: - subnet_dict = {'cidr': subnet['cidr'], - 'gateway': network_model.IP( - address=subnet['gateway_ip'], - type='gateway'), - } - - # attempt to populate DHCP server field - search_opts = {'network_id': subnet['network_id'], - 'device_owner': 'network:dhcp'} - data = neutronv2.get_client(context).list_ports(**search_opts) - dhcp_ports = data.get('ports', []) - for p in dhcp_ports: - for ip_pair in p['fixed_ips']: - if ip_pair['subnet_id'] == subnet['id']: - subnet_dict['dhcp_server'] = ip_pair['ip_address'] - break - - subnet_object = network_model.Subnet(**subnet_dict) - for dns in subnet.get('dns_nameservers', []): - subnet_object.add_dns( - network_model.IP(address=dns, type='dns')) - - for route in subnet.get('host_routes', []): - subnet_object.add_route( - network_model.Route(cidr=route['destination'], - gateway=network_model.IP( - address=route['nexthop'], - type='gateway'))) - - subnets.append(subnet_object) - return subnets - - def get_dns_domains(self, context): - """Return a list of available dns domains. - - These can be used to create DNS entries for floating ips. - """ - raise NotImplementedError() - - def add_dns_entry(self, context, address, name, dns_type, domain): - """Create specified DNS entry for address.""" - raise NotImplementedError() - - def modify_dns_entry(self, context, name, address, domain): - """Create specified DNS entry for address.""" - raise NotImplementedError() - - def delete_dns_entry(self, context, name, domain): - """Delete the specified dns entry.""" - raise NotImplementedError() - - def delete_dns_domain(self, context, domain): - """Delete the specified dns domain.""" - raise NotImplementedError() - - def get_dns_entries_by_address(self, context, address, domain): - """Get entries for address and domain.""" - raise NotImplementedError() - - def get_dns_entries_by_name(self, context, name, domain): - """Get entries for name and domain.""" - raise NotImplementedError() - - def create_private_dns_domain(self, context, domain, availability_zone): - """Create a private DNS domain with nova availability zone.""" - raise NotImplementedError() - - def create_public_dns_domain(self, context, domain, project=None): - """Create a private DNS domain with optional nova project.""" - raise NotImplementedError() - - -def _ensure_requested_network_ordering(accessor, unordered, preferred): - """Sort a list with respect to the preferred network ordering.""" - if preferred: - unordered.sort(key=lambda i: preferred.index(accessor(i))) diff --git a/juno-patches/nova/instance_mapping_uuid_patch/nova/objects/instance.py b/juno-patches/nova/instance_mapping_uuid_patch/nova/objects/instance.py deleted file mode 100644 index 92f33e5..0000000 --- a/juno-patches/nova/instance_mapping_uuid_patch/nova/objects/instance.py +++ /dev/null @@ -1,802 +0,0 @@ -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from nova.cells import opts as cells_opts -from nova.cells import rpcapi as cells_rpcapi -from nova.compute import flavors -from nova import db -from nova import exception -from nova.i18n import _LE -from nova import notifications -from nova import objects -from nova.objects import base -from nova.objects import fields -from nova.openstack.common import log as logging -from nova.openstack.common import timeutils -from nova import utils - -from oslo.config import cfg - - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - - -# List of fields that can be joined in DB layer. -_INSTANCE_OPTIONAL_JOINED_FIELDS = ['metadata', 'system_metadata', - 'info_cache', 'security_groups', - 'pci_devices'] -# These are fields that are optional but don't translate to db columns -_INSTANCE_OPTIONAL_NON_COLUMN_FIELDS = ['fault', 'numa_topology'] - -# These are fields that can be specified as expected_attrs -INSTANCE_OPTIONAL_ATTRS = (_INSTANCE_OPTIONAL_JOINED_FIELDS + - _INSTANCE_OPTIONAL_NON_COLUMN_FIELDS) -# These are fields that most query calls load by default -INSTANCE_DEFAULT_FIELDS = ['metadata', 'system_metadata', - 'info_cache', 'security_groups'] - - -def _expected_cols(expected_attrs): - """Return expected_attrs that are columns needing joining.""" - if not expected_attrs: - return expected_attrs - return [attr for attr in expected_attrs - if attr in _INSTANCE_OPTIONAL_JOINED_FIELDS] - - -class Instance(base.NovaPersistentObject, base.NovaObject): - # Version 1.0: Initial version - # Version 1.1: Added info_cache - # Version 1.2: Added security_groups - # Version 1.3: Added expected_vm_state and admin_state_reset to - # save() - # Version 1.4: Added locked_by and deprecated locked - # Version 1.5: Added cleaned - # Version 1.6: Added pci_devices - # Version 1.7: String attributes updated to support unicode - # Version 1.8: 'security_groups' and 'pci_devices' cannot be None - # Version 1.9: Make uuid a non-None real string - # Version 1.10: Added use_slave to refresh and get_by_uuid - # Version 1.11: Update instance from database during destroy - # Version 1.12: Added ephemeral_key_uuid - # Version 1.13: Added delete_metadata_key() - # Version 1.14: Added numa_topology - # Version 1.15: PciDeviceList 1.1 - VERSION = '1.15' - - fields = { - 'id': fields.IntegerField(), - - 'user_id': fields.StringField(nullable=True), - 'project_id': fields.StringField(nullable=True), - - 'image_ref': fields.StringField(nullable=True), - 'kernel_id': fields.StringField(nullable=True), - 'ramdisk_id': fields.StringField(nullable=True), - 'hostname': fields.StringField(nullable=True), - - 'launch_index': fields.IntegerField(nullable=True), - 'key_name': fields.StringField(nullable=True), - 'key_data': fields.StringField(nullable=True), - - 'power_state': fields.IntegerField(nullable=True), - 'vm_state': fields.StringField(nullable=True), - 'task_state': fields.StringField(nullable=True), - - 'memory_mb': fields.IntegerField(nullable=True), - 'vcpus': fields.IntegerField(nullable=True), - 'root_gb': fields.IntegerField(nullable=True), - 'ephemeral_gb': fields.IntegerField(nullable=True), - 'ephemeral_key_uuid': fields.UUIDField(nullable=True), - - 'host': fields.StringField(nullable=True), - 'node': fields.StringField(nullable=True), - - 'instance_type_id': fields.IntegerField(nullable=True), - - 'user_data': fields.StringField(nullable=True), - - 'reservation_id': fields.StringField(nullable=True), - - 'scheduled_at': fields.DateTimeField(nullable=True), - 'launched_at': fields.DateTimeField(nullable=True), - 'terminated_at': fields.DateTimeField(nullable=True), - - 'availability_zone': fields.StringField(nullable=True), - - 'display_name': fields.StringField(nullable=True), - 'display_description': fields.StringField(nullable=True), - - 'launched_on': fields.StringField(nullable=True), - - # NOTE(jdillaman): locked deprecated in favor of locked_by, - # to be removed in Icehouse - 'locked': fields.BooleanField(default=False), - 'locked_by': fields.StringField(nullable=True), - - 'os_type': fields.StringField(nullable=True), - 'architecture': fields.StringField(nullable=True), - 'vm_mode': fields.StringField(nullable=True), - 'uuid': fields.UUIDField(), - 'mapping_uuid': fields.UUIDField(nullable=True), - - 'root_device_name': fields.StringField(nullable=True), - 'default_ephemeral_device': fields.StringField(nullable=True), - 'default_swap_device': fields.StringField(nullable=True), - 'config_drive': fields.StringField(nullable=True), - - 'access_ip_v4': fields.IPV4AddressField(nullable=True), - 'access_ip_v6': fields.IPV6AddressField(nullable=True), - - 'auto_disk_config': fields.BooleanField(default=False), - 'progress': fields.IntegerField(nullable=True), - - 'shutdown_terminate': fields.BooleanField(default=False), - 'disable_terminate': fields.BooleanField(default=False), - - 'cell_name': fields.StringField(nullable=True), - - 'metadata': fields.DictOfStringsField(), - 'system_metadata': fields.DictOfNullableStringsField(), - - 'info_cache': fields.ObjectField('InstanceInfoCache', - nullable=True), - - 'security_groups': fields.ObjectField('SecurityGroupList'), - - 'fault': fields.ObjectField('InstanceFault', nullable=True), - - 'cleaned': fields.BooleanField(default=False), - - 'pci_devices': fields.ObjectField('PciDeviceList', nullable=True), - 'numa_topology': fields.ObjectField('InstanceNUMATopology', - nullable=True) - } - - obj_extra_fields = ['name'] - - def __init__(self, *args, **kwargs): - super(Instance, self).__init__(*args, **kwargs) - self._reset_metadata_tracking() - - def _reset_metadata_tracking(self, fields=None): - if fields is None or 'system_metadata' in fields: - self._orig_system_metadata = (dict(self.system_metadata) if - 'system_metadata' in self else {}) - if fields is None or 'metadata' in fields: - self._orig_metadata = (dict(self.metadata) if - 'metadata' in self else {}) - - def obj_reset_changes(self, fields=None): - super(Instance, self).obj_reset_changes(fields) - self._reset_metadata_tracking(fields=fields) - - def obj_what_changed(self): - changes = super(Instance, self).obj_what_changed() - if 'metadata' in self and self.metadata != self._orig_metadata: - changes.add('metadata') - if 'system_metadata' in self and (self.system_metadata != - self._orig_system_metadata): - changes.add('system_metadata') - return changes - - @classmethod - def _obj_from_primitive(cls, context, objver, primitive): - self = super(Instance, cls)._obj_from_primitive(context, objver, - primitive) - self._reset_metadata_tracking() - return self - - def obj_make_compatible(self, primitive, target_version): - target_version = utils.convert_version_to_tuple(target_version) - unicode_attributes = ['user_id', 'project_id', 'image_ref', - 'kernel_id', 'ramdisk_id', 'hostname', - 'key_name', 'key_data', 'host', 'node', - 'user_data', 'availability_zone', - 'display_name', 'display_description', - 'launched_on', 'locked_by', 'os_type', - 'architecture', 'vm_mode', 'root_device_name', - 'default_ephemeral_device', - 'default_swap_device', 'config_drive', - 'cell_name'] - if target_version < (1, 14) and 'numa_topology' in primitive: - del primitive['numa_topology'] - if target_version < (1, 10) and 'info_cache' in primitive: - # NOTE(danms): Instance <= 1.9 (havana) had info_cache 1.4 - self.info_cache.obj_make_compatible( - primitive['info_cache']['nova_object.data'], '1.4') - primitive['info_cache']['nova_object.version'] = '1.4' - if target_version < (1, 7): - # NOTE(danms): Before 1.7, we couldn't handle unicode in - # string fields, so squash it here - for field in [x for x in unicode_attributes if x in primitive - and primitive[x] is not None]: - primitive[field] = primitive[field].encode('ascii', 'replace') - if target_version < (1, 15) and 'pci_devices' in primitive: - # NOTE(baoli): Instance <= 1.14 (icehouse) had PciDeviceList 1.0 - self.pci_devices.obj_make_compatible( - primitive['pci_devices']['nova_object.data'], '1.0') - primitive['pci_devices']['nova_object.version'] = '1.0' - if target_version < (1, 6): - # NOTE(danms): Before 1.6 there was no pci_devices list - if 'pci_devices' in primitive: - del primitive['pci_devices'] - - @property - def name(self): - try: - base_name = CONF.instance_name_template % self.id - except TypeError: - # Support templates like "uuid-%(uuid)s", etc. - info = {} - # NOTE(russellb): Don't use self.iteritems() here, as it will - # result in infinite recursion on the name property. - for key in self.fields: - if key == 'name': - # NOTE(danms): prevent recursion - continue - elif not self.obj_attr_is_set(key): - # NOTE(danms): Don't trigger lazy-loads - continue - info[key] = self[key] - try: - base_name = CONF.instance_name_template % info - except KeyError: - base_name = self.uuid - return base_name - - @staticmethod - def _from_db_object(context, instance, db_inst, expected_attrs=None): - """Method to help with migration to objects. - - Converts a database entity to a formal object. - """ - instance._context = context - if expected_attrs is None: - expected_attrs = [] - # Most of the field names match right now, so be quick - for field in instance.fields: - if field in INSTANCE_OPTIONAL_ATTRS: - continue - elif field == 'deleted': - instance.deleted = db_inst['deleted'] == db_inst['id'] - elif field == 'cleaned': - instance.cleaned = db_inst['cleaned'] == 1 - else: - instance[field] = db_inst[field] - - if 'metadata' in expected_attrs: - instance['metadata'] = utils.instance_meta(db_inst) - if 'system_metadata' in expected_attrs: - instance['system_metadata'] = utils.instance_sys_meta(db_inst) - if 'fault' in expected_attrs: - instance['fault'] = ( - objects.InstanceFault.get_latest_for_instance( - context, instance.uuid)) - if 'numa_topology' in expected_attrs: - instance._load_numa_topology() - - if 'info_cache' in expected_attrs: - if db_inst['info_cache'] is None: - instance.info_cache = None - elif not instance.obj_attr_is_set('info_cache'): - # TODO(danms): If this ever happens on a backlevel instance - # passed to us by a backlevel service, things will break - instance.info_cache = objects.InstanceInfoCache(context) - if instance.info_cache is not None: - instance.info_cache._from_db_object(context, - instance.info_cache, - db_inst['info_cache']) - - # TODO(danms): If we are updating these on a backlevel instance, - # we'll end up sending back new versions of these objects (see - # above note for new info_caches - if 'pci_devices' in expected_attrs: - pci_devices = base.obj_make_list( - context, objects.PciDeviceList(context), - objects.PciDevice, db_inst['pci_devices']) - instance['pci_devices'] = pci_devices - if 'security_groups' in expected_attrs: - sec_groups = base.obj_make_list( - context, objects.SecurityGroupList(context), - objects.SecurityGroup, db_inst['security_groups']) - instance['security_groups'] = sec_groups - - instance.obj_reset_changes() - return instance - - @base.remotable_classmethod - def get_by_uuid(cls, context, uuid, expected_attrs=None, use_slave=False): - if expected_attrs is None: - expected_attrs = ['info_cache', 'security_groups'] - columns_to_join = _expected_cols(expected_attrs) - db_inst = db.instance_get_by_uuid(context, uuid, - columns_to_join=columns_to_join, - use_slave=use_slave) - return cls._from_db_object(context, cls(), db_inst, - expected_attrs) - - @base.remotable_classmethod - def get_by_id(cls, context, inst_id, expected_attrs=None): - if expected_attrs is None: - expected_attrs = ['info_cache', 'security_groups'] - columns_to_join = _expected_cols(expected_attrs) - db_inst = db.instance_get(context, inst_id, - columns_to_join=columns_to_join) - return cls._from_db_object(context, cls(), db_inst, - expected_attrs) - - @base.remotable - def create(self, context): - if self.obj_attr_is_set('id'): - raise exception.ObjectActionError(action='create', - reason='already created') - updates = self.obj_get_changes() - expected_attrs = [attr for attr in INSTANCE_DEFAULT_FIELDS - if attr in updates] - if 'security_groups' in updates: - updates['security_groups'] = [x.name for x in - updates['security_groups']] - if 'info_cache' in updates: - updates['info_cache'] = { - 'network_info': updates['info_cache'].network_info.json() - } - numa_topology = updates.pop('numa_topology', None) - db_inst = db.instance_create(context, updates) - if numa_topology: - expected_attrs.append('numa_topology') - numa_topology.instance_uuid = db_inst['uuid'] - numa_topology.create(context) - self._from_db_object(context, self, db_inst, expected_attrs) - - @base.remotable - def destroy(self, context): - if not self.obj_attr_is_set('id'): - raise exception.ObjectActionError(action='destroy', - reason='already destroyed') - if not self.obj_attr_is_set('uuid'): - raise exception.ObjectActionError(action='destroy', - reason='no uuid') - if not self.obj_attr_is_set('host') or not self.host: - # NOTE(danms): If our host is not set, avoid a race - constraint = db.constraint(host=db.equal_any(None)) - else: - constraint = None - - try: - db_inst = db.instance_destroy(context, self.uuid, - constraint=constraint) - self._from_db_object(context, self, db_inst) - except exception.ConstraintNotMet: - raise exception.ObjectActionError(action='destroy', - reason='host changed') - delattr(self, base.get_attrname('id')) - - def _save_info_cache(self, context): - self.info_cache.save(context) - - def _save_security_groups(self, context): - for secgroup in self.security_groups: - secgroup.save(context) - self.security_groups.obj_reset_changes() - - def _save_fault(self, context): - # NOTE(danms): I don't think we need to worry about this, do we? - pass - - def _save_numa_topology(self, context): - # NOTE(ndipanov): No need for this yet. - pass - - def _save_pci_devices(self, context): - # NOTE(yjiang5): All devices held by PCI tracker, only PCI tracker - # permitted to update the DB. all change to devices from here will - # be dropped. - pass - - @base.remotable - def save(self, context, expected_vm_state=None, - expected_task_state=None, admin_state_reset=False): - """Save updates to this instance - - Column-wise updates will be made based on the result of - self.what_changed(). If expected_task_state is provided, - it will be checked against the in-database copy of the - instance before updates are made. - - :param:context: Security context - :param:expected_task_state: Optional tuple of valid task states - for the instance to be in - :param:expected_vm_state: Optional tuple of valid vm states - for the instance to be in - :param admin_state_reset: True if admin API is forcing setting - of task_state/vm_state - - """ - - cell_type = cells_opts.get_cell_type() - if cell_type == 'api' and self.cell_name: - # NOTE(comstud): We need to stash a copy of ourselves - # before any updates are applied. When we call the save - # methods on nested objects, we will lose any changes to - # them. But we need to make sure child cells can tell - # what is changed. - # - # We also need to nuke any updates to vm_state and task_state - # unless admin_state_reset is True. compute cells are - # authoritative for their view of vm_state and task_state. - stale_instance = self.obj_clone() - - def _handle_cell_update_from_api(): - cells_api = cells_rpcapi.CellsAPI() - cells_api.instance_update_from_api(context, stale_instance, - expected_vm_state, - expected_task_state, - admin_state_reset) - else: - stale_instance = None - - updates = {} - changes = self.obj_what_changed() - for field in self.fields: - if (self.obj_attr_is_set(field) and - isinstance(self[field], base.NovaObject)): - try: - getattr(self, '_save_%s' % field)(context) - except AttributeError: - LOG.exception(_LE('No save handler for %s'), field, - instance=self) - elif field in changes: - updates[field] = self[field] - - if not updates: - if stale_instance: - _handle_cell_update_from_api() - return - - # Cleaned needs to be turned back into an int here - if 'cleaned' in updates: - if updates['cleaned']: - updates['cleaned'] = 1 - else: - updates['cleaned'] = 0 - - if expected_task_state is not None: - if (self.VERSION == '1.9' and - expected_task_state == 'image_snapshot'): - # NOTE(danms): Icehouse introduced a pending state which - # Havana doesn't know about. If we're an old instance, - # tolerate the pending state as well - expected_task_state = [ - expected_task_state, 'image_snapshot_pending'] - updates['expected_task_state'] = expected_task_state - if expected_vm_state is not None: - updates['expected_vm_state'] = expected_vm_state - - expected_attrs = [attr for attr in _INSTANCE_OPTIONAL_JOINED_FIELDS - if self.obj_attr_is_set(attr)] - if 'pci_devices' in expected_attrs: - # NOTE(danms): We don't refresh pci_devices on save right now - expected_attrs.remove('pci_devices') - - # NOTE(alaski): We need to pull system_metadata for the - # notification.send_update() below. If we don't there's a KeyError - # when it tries to extract the flavor. - if 'system_metadata' not in expected_attrs: - expected_attrs.append('system_metadata') - old_ref, inst_ref = db.instance_update_and_get_original( - context, self.uuid, updates, update_cells=False, - columns_to_join=_expected_cols(expected_attrs)) - - if stale_instance: - _handle_cell_update_from_api() - elif cell_type == 'compute': - cells_api = cells_rpcapi.CellsAPI() - cells_api.instance_update_at_top(context, inst_ref) - - self._from_db_object(context, self, inst_ref, - expected_attrs=expected_attrs) - notifications.send_update(context, old_ref, inst_ref) - self.obj_reset_changes() - - @base.remotable - def refresh(self, context, use_slave=False): - extra = [field for field in INSTANCE_OPTIONAL_ATTRS - if self.obj_attr_is_set(field)] - current = self.__class__.get_by_uuid(context, uuid=self.uuid, - expected_attrs=extra, - use_slave=use_slave) - # NOTE(danms): We orphan the instance copy so we do not unexpectedly - # trigger a lazy-load (which would mean we failed to calculate the - # expected_attrs properly) - current._context = None - - for field in self.fields: - if self.obj_attr_is_set(field): - if field == 'info_cache': - self.info_cache.refresh() - # NOTE(danms): Make sure this shows up as touched - self.info_cache = self.info_cache - elif self[field] != current[field]: - self[field] = current[field] - self.obj_reset_changes() - - def _load_generic(self, attrname): - instance = self.__class__.get_by_uuid(self._context, - uuid=self.uuid, - expected_attrs=[attrname]) - - # NOTE(danms): Never allow us to recursively-load - if instance.obj_attr_is_set(attrname): - self[attrname] = instance[attrname] - else: - raise exception.ObjectActionError( - action='obj_load_attr', - reason='loading %s requires recursion' % attrname) - - def _load_fault(self): - self.fault = objects.InstanceFault.get_latest_for_instance( - self._context, self.uuid) - - def _load_numa_topology(self): - try: - self.numa_topology = \ - objects.InstanceNUMATopology.get_by_instance_uuid( - self._context, self.uuid) - except exception.NumaTopologyNotFound: - self.numa_topology = None - - def obj_load_attr(self, attrname): - if attrname not in INSTANCE_OPTIONAL_ATTRS: - raise exception.ObjectActionError( - action='obj_load_attr', - reason='attribute %s not lazy-loadable' % attrname) - if not self._context: - raise exception.OrphanedObjectError(method='obj_load_attr', - objtype=self.obj_name()) - - LOG.debug("Lazy-loading `%(attr)s' on %(name)s uuid %(uuid)s", - {'attr': attrname, - 'name': self.obj_name(), - 'uuid': self.uuid, - }) - # FIXME(comstud): This should be optimized to only load the attr. - if attrname == 'fault': - # NOTE(danms): We handle fault differently here so that we - # can be more efficient - self._load_fault() - elif attrname == 'numa_topology': - self._load_numa_topology() - else: - self._load_generic(attrname) - self.obj_reset_changes([attrname]) - - def get_flavor(self, namespace=None): - prefix = ('%s_' % namespace) if namespace is not None else '' - - db_flavor = flavors.extract_flavor(self, prefix) - flavor = objects.Flavor(self._context) - for key in flavors.system_metadata_flavor_props: - flavor[key] = db_flavor[key] - return flavor - - def set_flavor(self, flavor, namespace=None): - prefix = ('%s_' % namespace) if namespace is not None else '' - - self.system_metadata = flavors.save_flavor_info( - self.system_metadata, flavor, prefix) - self.save() - - def delete_flavor(self, namespace): - self.system_metadata = flavors.delete_flavor_info( - self.system_metadata, "%s_" % namespace) - self.save() - - @base.remotable - def delete_metadata_key(self, context, key): - """Optimized metadata delete method. - - This provides a more efficient way to delete a single metadata - key, instead of just calling instance.save(). This should be called - with the key still present in self.metadata, which it will update - after completion. - """ - db.instance_metadata_delete(context, self.uuid, key) - md_was_changed = 'metadata' in self.obj_what_changed() - del self.metadata[key] - self._orig_metadata.pop(key, None) - instance_dict = base.obj_to_primitive(self) - notifications.send_update(context, instance_dict, instance_dict) - if not md_was_changed: - self.obj_reset_changes(['metadata']) - - -def _make_instance_list(context, inst_list, db_inst_list, expected_attrs): - get_fault = expected_attrs and 'fault' in expected_attrs - inst_faults = {} - if get_fault: - # Build an instance_uuid:latest-fault mapping - expected_attrs.remove('fault') - instance_uuids = [inst['uuid'] for inst in db_inst_list] - faults = objects.InstanceFaultList.get_by_instance_uuids( - context, instance_uuids) - for fault in faults: - if fault.instance_uuid not in inst_faults: - inst_faults[fault.instance_uuid] = fault - - inst_list.objects = [] - for db_inst in db_inst_list: - inst_obj = objects.Instance._from_db_object( - context, objects.Instance(context), db_inst, - expected_attrs=expected_attrs) - if get_fault: - inst_obj.fault = inst_faults.get(inst_obj.uuid, None) - inst_list.objects.append(inst_obj) - inst_list.obj_reset_changes() - return inst_list - - -class InstanceList(base.ObjectListBase, base.NovaObject): - # Version 1.0: Initial version - # Version 1.1: Added use_slave to get_by_host - # Instance <= version 1.9 - # Version 1.2: Instance <= version 1.11 - # Version 1.3: Added use_slave to get_by_filters - # Version 1.4: Instance <= version 1.12 - # Version 1.5: Added method get_active_by_window_joined. - # Version 1.6: Instance <= version 1.13 - # Version 1.7: Added use_slave to get_active_by_window_joined - # Version 1.8: Instance <= version 1.14 - # Version 1.9: Instance <= version 1.15 - VERSION = '1.9' - - fields = { - 'objects': fields.ListOfObjectsField('Instance'), - } - child_versions = { - '1.1': '1.9', - # NOTE(danms): Instance was at 1.9 before we added this - '1.2': '1.11', - '1.3': '1.11', - '1.4': '1.12', - '1.5': '1.12', - '1.6': '1.13', - '1.7': '1.13', - '1.8': '1.14', - '1.9': '1.15', - } - - @base.remotable_classmethod - def get_by_filters(cls, context, filters, - sort_key='created_at', sort_dir='desc', limit=None, - marker=None, expected_attrs=None, use_slave=False): - db_inst_list = db.instance_get_all_by_filters( - context, filters, sort_key, sort_dir, limit=limit, marker=marker, - columns_to_join=_expected_cols(expected_attrs), - use_slave=use_slave) - return _make_instance_list(context, cls(), db_inst_list, - expected_attrs) - - @base.remotable_classmethod - def get_by_host(cls, context, host, expected_attrs=None, use_slave=False): - db_inst_list = db.instance_get_all_by_host( - context, host, columns_to_join=_expected_cols(expected_attrs), - use_slave=use_slave) - return _make_instance_list(context, cls(), db_inst_list, - expected_attrs) - - @base.remotable_classmethod - def get_by_host_and_node(cls, context, host, node, expected_attrs=None): - db_inst_list = db.instance_get_all_by_host_and_node( - context, host, node) - return _make_instance_list(context, cls(), db_inst_list, - expected_attrs) - - @base.remotable_classmethod - def get_by_host_and_not_type(cls, context, host, type_id=None, - expected_attrs=None): - db_inst_list = db.instance_get_all_by_host_and_not_type( - context, host, type_id=type_id) - return _make_instance_list(context, cls(), db_inst_list, - expected_attrs) - - @base.remotable_classmethod - def get_hung_in_rebooting(cls, context, reboot_window, - expected_attrs=None): - db_inst_list = db.instance_get_all_hung_in_rebooting(context, - reboot_window) - return _make_instance_list(context, cls(), db_inst_list, - expected_attrs) - - @base.remotable_classmethod - def _get_active_by_window_joined(cls, context, begin, end=None, - project_id=None, host=None, - expected_attrs=None, - use_slave=False): - # NOTE(mriedem): We need to convert the begin/end timestamp strings - # to timezone-aware datetime objects for the DB API call. - begin = timeutils.parse_isotime(begin) - end = timeutils.parse_isotime(end) if end else None - db_inst_list = db.instance_get_active_by_window_joined(context, - begin, - end, - project_id, - host) - return _make_instance_list(context, cls(), db_inst_list, - expected_attrs) - - @classmethod - def get_active_by_window_joined(cls, context, begin, end=None, - project_id=None, host=None, - expected_attrs=None, - use_slave=False): - """Get instances and joins active during a certain time window. - - :param:context: nova request context - :param:begin: datetime for the start of the time window - :param:end: datetime for the end of the time window - :param:project_id: used to filter instances by project - :param:host: used to filter instances on a given compute host - :param:expected_attrs: list of related fields that can be joined - in the database layer when querying for instances - :param use_slave if True, ship this query off to a DB slave - :returns: InstanceList - - """ - # NOTE(mriedem): We have to convert the datetime objects to string - # primitives for the remote call. - begin = timeutils.isotime(begin) - end = timeutils.isotime(end) if end else None - return cls._get_active_by_window_joined(context, begin, end, - project_id, host, - expected_attrs, - use_slave=use_slave) - - @base.remotable_classmethod - def get_by_security_group_id(cls, context, security_group_id): - db_secgroup = db.security_group_get( - context, security_group_id, - columns_to_join=['instances.info_cache', - 'instances.system_metadata']) - return _make_instance_list(context, cls(), db_secgroup['instances'], - ['info_cache', 'system_metadata']) - - @classmethod - def get_by_security_group(cls, context, security_group): - return cls.get_by_security_group_id(context, security_group.id) - - def fill_faults(self): - """Batch query the database for our instances' faults. - - :returns: A list of instance uuids for which faults were found. - """ - uuids = [inst.uuid for inst in self] - faults = objects.InstanceFaultList.get_by_instance_uuids( - self._context, uuids) - faults_by_uuid = {} - for fault in faults: - if fault.instance_uuid not in faults_by_uuid: - faults_by_uuid[fault.instance_uuid] = fault - - for instance in self: - if instance.uuid in faults_by_uuid: - instance.fault = faults_by_uuid[instance.uuid] - else: - # NOTE(danms): Otherwise the caller will cause a lazy-load - # when checking it, and we know there are none - instance.fault = None - instance.obj_reset_changes(['fault']) - - return faults_by_uuid.keys()