Migrate compute node resource information to Inventory objects

This does a live migration of compute node resource information from the
current location (in compute_nodes columns) to the new desired location
of records in the inventories table. It only does this if all control
services have been upgraded to a level that understands this.

Related to blueprint compute-node-inventory-newton

Change-Id: Ieda099d3f617713f09315b2b1e932a7c1d6f45c4
This commit is contained in:
Dan Smith 2016-02-11 15:15:14 -08:00
parent 5826a13baf
commit f9b62dffe0
6 changed files with 361 additions and 32 deletions

View File

@ -13,7 +13,7 @@
"disabled_reason": null,
"report_count": 1,
"forced_down": false,
"version": 9
"version": 10
}
},
"event_type": "service.update",

View File

@ -21,6 +21,7 @@ from oslo_utils import versionutils
import nova.conf
from nova import db
from nova import exception
from nova.i18n import _LW
from nova import objects
from nova.objects import base
from nova.objects import fields
@ -165,6 +166,9 @@ class ComputeNode(base.NovaPersistentObject, base.NovaObject):
'supported_hv_specs',
'host',
'pci_device_pools',
'local_gb',
'memory_mb',
'vcpus',
])
fields = set(compute.fields) - special_cases
for key in fields:
@ -202,6 +206,13 @@ class ComputeNode(base.NovaPersistentObject, base.NovaObject):
value = 1.0
setattr(compute, key, value)
for key in ('vcpus', 'local_gb', 'memory_mb'):
inv_key = 'inv_%s' % key
if inv_key in db_compute and db_compute[inv_key] is not None:
setattr(compute, key, db_compute[inv_key])
else:
setattr(compute, key, db_compute[key])
stats = db_compute['stats']
if stats:
compute.stats = jsonutils.loads(stats)
@ -286,6 +297,112 @@ class ComputeNode(base.NovaPersistentObject, base.NovaObject):
pools = jsonutils.dumps(pools.obj_to_primitive())
updates['pci_stats'] = pools
def _should_manage_inventory(self):
related_binaries = ['nova-api', 'nova-conductor', 'nova-scheduler']
required_version = 10
min_ver = objects.Service.get_minimum_version_multi(self._context,
related_binaries)
return min_ver >= required_version
def _update_inventory(self, updates):
"""Update inventory records from legacy model values
:param updates: Legacy model update dict which will be modified when
we return
"""
# NOTE(danms): Here we update our inventory records with our
# resource information. Since this information is prepared in
# updates against our older compute_node columns, we need to
# zero those values after we have updated the inventory
# records so that it is clear that they have been migrated.
# We return True or False here based on whether we found
# inventory records to update. If not, then we need to signal
# to our caller that _create_inventory() needs to be called
# instead
inventory_list = \
objects.InventoryList.get_all_by_resource_provider_uuid(
self._context, self.uuid)
if not inventory_list:
return False
for inventory in inventory_list:
if inventory.resource_class == fields.ResourceClass.VCPU:
key = 'vcpus'
elif inventory.resource_class == fields.ResourceClass.MEMORY_MB:
key = 'memory_mb'
elif inventory.resource_class == fields.ResourceClass.DISK_GB:
key = 'local_gb'
else:
LOG.warning(_LW('Unknown inventory class %s for compute node'),
inventory.resource_class)
continue
if key in updates:
inventory.total = getattr(self, key)
updates[key] = 0
inventory.save()
return True
def _create_inventory(self, updates):
"""Create the initial inventory objects for this compute node.
This is only ever called once, either for the first time when a compute
is created, or after an upgrade where the required services have
reached the required version.
:param updates: Legacy model update dict which will be modified when
we return
"""
rp = objects.ResourceProvider(context=self._context, uuid=self.uuid)
rp.create()
# NOTE(danms): Until we remove the columns from compute_nodes,
# we need to constantly zero out each value in our updates to
# signal that we wrote the value into inventory instead.
cpu = objects.Inventory(context=self._context,
resource_provider=rp,
resource_class=fields.ResourceClass.VCPU,
total=self.vcpus,
reserved=0,
min_unit=1,
max_unit=1,
step_size=1,
allocation_ratio=self.cpu_allocation_ratio)
cpu.create()
updates['vcpus'] = 0
mem = objects.Inventory(context=self._context,
resource_provider=rp,
resource_class=fields.ResourceClass.MEMORY_MB,
total=self.memory_mb,
reserved=0,
min_unit=1,
max_unit=1,
step_size=1,
allocation_ratio=self.ram_allocation_ratio)
mem.create()
updates['memory_mb'] = 0
# FIXME(danms): Eventually we want to not write this record
# if the compute host is on shared storage. We'll need some
# indication from it to that effect, so for now we always
# write it so that we can make all the usual machinery depend
# on these records instead of the legacy columns.
disk = objects.Inventory(context=self._context,
resource_provider=rp,
resource_class=fields.ResourceClass.DISK_GB,
total=self.local_gb,
reserved=0,
min_unit=1,
max_unit=1,
step_size=1,
allocation_ratio=self.disk_allocation_ratio)
disk.create()
updates['local_gb'] = 0
@base.remotable
def create(self):
if self.obj_attr_is_set('id'):
@ -294,13 +411,23 @@ class ComputeNode(base.NovaPersistentObject, base.NovaObject):
updates = self.obj_get_changes()
if 'uuid' not in updates:
updates['uuid'] = uuidutils.generate_uuid()
self.uuid = updates['uuid']
self._convert_stats_to_db_format(updates)
self._convert_host_ip_to_db_format(updates)
self._convert_supported_instances_to_db_format(updates)
self._convert_pci_stats_to_db_format(updates)
if self._should_manage_inventory():
self._create_inventory(updates)
db_compute = db.compute_node_create(self._context, updates)
# NOTE(danms): compute_node_create() operates on (and returns) the
# compute node model only. We need to get the full inventory-based
# result in order to satisfy _from_db_object(). So, we do a double
# query here. This can be removed in Newton once we're sure that all
# compute nodes are inventory-based
db_compute = db.compute_node_get(self._context, db_compute['id'])
self._from_db_object(self._context, self, db_compute)
@base.remotable
@ -314,7 +441,17 @@ class ComputeNode(base.NovaPersistentObject, base.NovaObject):
self._convert_supported_instances_to_db_format(updates)
self._convert_pci_stats_to_db_format(updates)
if self._should_manage_inventory():
if not self._update_inventory(updates):
# NOTE(danms): This only happens once
self._create_inventory(updates)
db_compute = db.compute_node_update(self._context, self.id, updates)
# NOTE(danms): compute_node_update() operates on (and returns) the
# compute node model only. We need to get the full inventory-based
# result in order to satisfy _from_db_object(). So, we do a double
# query here. This can be removed in Newton once we're sure that all
# compute nodes are inventory-based
db_compute = db.compute_node_get(self._context, self.id)
self._from_db_object(self._context, self, db_compute)
@base.remotable

View File

@ -29,7 +29,7 @@ LOG = logging.getLogger(__name__)
# NOTE(danms): This is the global service version counter
SERVICE_VERSION = 9
SERVICE_VERSION = 10
# NOTE(danms): This is our SERVICE_VERSION history. The idea is that any
@ -71,6 +71,8 @@ SERVICE_VERSION_HISTORY = (
{'compute_rpc': '4.10'},
# Version 9: Allow block_migration and disk_over_commit be None
{'compute_rpc': '4.11'},
# Version 10: Compute node conversion to Inventories
{'compute_rpc': '4.11'},
)

View File

@ -0,0 +1,143 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import context
from nova import db
from nova import objects
from nova.objects import fields
from nova import test
class ComputeNodeTestCase(test.TestCase):
def setUp(self):
super(ComputeNodeTestCase, self).setUp()
self.context = context.RequestContext('fake-user', 'fake-project')
self.cn = objects.ComputeNode(context=self.context,
memory_mb=512, local_gb=1000, vcpus=8,
vcpus_used=0, local_gb_used=0,
memory_mb_used=0, free_ram_mb=0,
free_disk_gb=0, hypervisor_type='danvm',
hypervisor_version=1, cpu_info='barf',
cpu_allocation_ratio=1.0,
ram_allocation_ratio=1.0,
disk_allocation_ratio=1.0)
@mock.patch('nova.objects.Service.get_minimum_version_multi')
def test_create_creates_inventories(self, mock_minver):
mock_minver.return_value = 10
self.cn.create()
self.assertEqual(512, self.cn.memory_mb)
self.assertEqual(1000, self.cn.local_gb)
self.assertEqual(8, self.cn.vcpus)
db_cn = db.compute_node_get(self.context, self.cn.id)
self.assertEqual(0, db_cn['memory_mb'])
self.assertEqual(0, db_cn['local_gb'])
self.assertEqual(0, db_cn['vcpus'])
inventories = objects.InventoryList.get_all_by_resource_provider_uuid(
self.context, self.cn.uuid)
self.assertEqual(3, len(inventories))
inv = {i.resource_class: i.total for i in inventories}
expected = {
fields.ResourceClass.DISK_GB: 1000,
fields.ResourceClass.MEMORY_MB: 512,
fields.ResourceClass.VCPU: 8,
}
self.assertEqual(expected, inv)
@mock.patch('nova.objects.Service.get_minimum_version_multi')
def test_save_updates_inventories(self, mock_minver):
mock_minver.return_value = 10
self.cn.create()
self.cn.memory_mb = 2048
self.cn.local_gb = 2000
self.cn.save()
self.assertEqual(2048, self.cn.memory_mb)
self.assertEqual(2000, self.cn.local_gb)
self.assertEqual(8, self.cn.vcpus)
db_cn = db.compute_node_get(self.context, self.cn.id)
self.assertEqual(0, db_cn['memory_mb'])
self.assertEqual(0, db_cn['local_gb'])
self.assertEqual(0, db_cn['vcpus'])
inventories = objects.InventoryList.get_all_by_resource_provider_uuid(
self.context, self.cn.uuid)
self.assertEqual(3, len(inventories))
inv = {i.resource_class: i.total for i in inventories}
expected = {
fields.ResourceClass.DISK_GB: 2000,
fields.ResourceClass.MEMORY_MB: 2048,
fields.ResourceClass.VCPU: 8,
}
self.assertEqual(expected, inv)
@mock.patch('nova.objects.Service.get_minimum_version_multi')
def test_save_creates_inventories(self, mock_minver):
mock_minver.return_value = 7
self.cn.create()
inventories = objects.InventoryList.get_all_by_resource_provider_uuid(
self.context, self.cn.uuid)
self.assertEqual(0, len(inventories))
mock_minver.return_value = 10
self.cn.memory_mb = 2048
self.cn.local_gb = 2000
self.cn.save()
self.assertEqual(2048, self.cn.memory_mb)
self.assertEqual(2000, self.cn.local_gb)
self.assertEqual(8, self.cn.vcpus)
db_cn = db.compute_node_get(self.context, self.cn.id)
self.assertEqual(0, db_cn['memory_mb'])
self.assertEqual(0, db_cn['local_gb'])
self.assertEqual(0, db_cn['vcpus'])
inventories = objects.InventoryList.get_all_by_resource_provider_uuid(
self.context, self.cn.uuid)
self.assertEqual(3, len(inventories))
inv = {i.resource_class: i.total for i in inventories}
expected = {
fields.ResourceClass.DISK_GB: 2000,
fields.ResourceClass.MEMORY_MB: 2048,
fields.ResourceClass.VCPU: 8,
}
self.assertEqual(expected, inv)
@mock.patch('nova.objects.Service.get_minimum_version_multi')
def test_create_honors_version(self, mock_minver):
mock_minver.return_value = 7
self.cn.create()
self.assertEqual(512, self.cn.memory_mb)
self.assertEqual(1000, self.cn.local_gb)
self.assertEqual(8, self.cn.vcpus)
db_cn = db.compute_node_get(self.context, self.cn.id)
self.assertEqual(512, db_cn['memory_mb'])
self.assertEqual(1000, db_cn['local_gb'])
self.assertEqual(8, db_cn['vcpus'])
inventories = objects.InventoryList.get_all_by_resource_provider_uuid(
self.context, self.cn.uuid)
self.assertEqual(0, len(inventories))
@mock.patch('nova.objects.Service.get_minimum_version_multi')
def test_save_honors_version(self, mock_minver):
mock_minver.return_value = 7
self.cn.create()
self.cn.memory_mb = 2048
self.cn.local_gb = 2000
self.cn.save()
self.assertEqual(2048, self.cn.memory_mb)
self.assertEqual(2000, self.cn.local_gb)
self.assertEqual(8, self.cn.vcpus)
db_cn = db.compute_node_get(self.context, self.cn.id)
self.assertEqual(2048, db_cn['memory_mb'])
self.assertEqual(2000, db_cn['local_gb'])
self.assertEqual(8, db_cn['vcpus'])
inventories = objects.InventoryList.get_all_by_resource_provider_uuid(
self.context, self.cn.uuid)
self.assertEqual(0, len(inventories))

View File

@ -526,12 +526,19 @@ class MissingComputeNodeTestCase(BaseTestCase):
self._fake_compute_node_get_by_host_and_nodename)
self.stub_out('nova.db.compute_node_create',
self._fake_create_compute_node)
self.stub_out('nova.db.compute_node_get',
self._fake_compute_node_get)
self.tracker.scheduler_client.update_resource_stats = mock.Mock()
def _fake_create_compute_node(self, context, values):
self.created = True
self._values = values
return self._create_compute_node(values)
def _fake_compute_node_get(self, context, id):
if self.created:
return self._create_compute_node(self._values)
def _fake_service_get_by_compute_host(self, ctx, host):
# return a service with no joined compute
service = self._create_service()
@ -560,11 +567,14 @@ class BaseTrackerTestCase(BaseTestCase):
self.tracker = self._tracker()
self._migrations = {}
self._fake_inventories = {}
self.stub_out('nova.db.service_get_by_compute_host',
self._fake_service_get_by_compute_host)
self.stub_out('nova.db.compute_node_get_by_host_and_nodename',
self._fake_compute_node_get_by_host_and_nodename)
self.stub_out('nova.db.compute_node_get',
self._fake_compute_node_get)
self.stub_out('nova.db.compute_node_update',
self._fake_compute_node_update)
self.stub_out('nova.db.compute_node_delete',
@ -573,6 +583,10 @@ class BaseTrackerTestCase(BaseTestCase):
self._fake_migration_update)
self.stub_out('nova.db.migration_get_in_progress_by_host_and_node',
self._fake_migration_get_in_progress_by_host_and_node)
self.stub_out('nova.objects.resource_provider._create_inventory_in_db',
self._fake_inventory_create)
self.stub_out('nova.objects.resource_provider._create_rp_in_db',
self._fake_rp_create)
# Note that this must be called before the call to _init_tracker()
patcher = pci_fakes.fake_pci_whitelist()
@ -589,6 +603,9 @@ class BaseTrackerTestCase(BaseTestCase):
self.compute = self._create_compute_node()
return self.compute
def _fake_compute_node_get(self, ctx, id):
return self.compute
def _fake_compute_node_update(self, ctx, compute_node_id, values,
prune_stats=False):
self.update_call_count += 1
@ -596,6 +613,30 @@ class BaseTrackerTestCase(BaseTestCase):
self.compute.update(values)
return self.compute
def _fake_inventory_create(self, context, updates):
if self._fake_inventories:
new_id = max([x for x in self._fake_inventories.keys()])
else:
new_id = 1
updates['id'] = new_id
self._fake_inventories[new_id] = updates
legacy = {
fields.ResourceClass.VCPU: 'vcpus',
fields.ResourceClass.MEMORY_MB: 'memory_mb',
fields.ResourceClass.DISK_GB: 'local_gb',
}
legacy_key = legacy.get(fields.ResourceClass.from_index(
updates['resource_class_id']))
if legacy_key:
inv_key = 'inv_%s' % legacy_key
self.compute[inv_key] = updates['total']
return updates
def _fake_rp_create(self, context, updates):
return dict(updates, id=1)
def _fake_compute_node_delete(self, ctx, compute_node_id):
self.deleted = True
self.compute.update({'deleted': 1})
@ -1076,7 +1117,8 @@ class _MoveClaimTestCase(BaseTrackerTestCase):
@mock.patch('nova.objects.Instance.save')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_additive_claims(self, mock_get, mock_save):
@mock.patch('nova.objects.ComputeNode._create_inventory')
def test_additive_claims(self, mock_ci, mock_get, mock_save):
limits = self._limits(
2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD,
@ -1098,7 +1140,8 @@ class _MoveClaimTestCase(BaseTrackerTestCase):
@mock.patch('nova.objects.Instance.save')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_move_type_not_tracked(self, mock_get, mock_save):
@mock.patch('nova.objects.ComputeNode._create_inventory')
def test_move_type_not_tracked(self, mock_ci, mock_get, mock_save):
self.claim_method(self.context, self.instance, self.instance_type,
limits=self.limits, move_type="live-migration")
mock_save.assert_called_once_with()

View File

@ -227,7 +227,8 @@ class _TestComputeNodeObject(object):
compute_node.ComputeNode.get_first_node_by_host_for_old_compat,
self.context, 'fake')
def test_create(self):
@mock.patch('nova.db.compute_node_get', return_value=fake_compute_node)
def test_create(self, mock_get):
self.mox.StubOutWithMock(db, 'compute_node_create')
db.compute_node_create(
self.context,
@ -255,7 +256,8 @@ class _TestComputeNodeObject(object):
@mock.patch('nova.db.compute_node_create')
@mock.patch('oslo_utils.uuidutils.generate_uuid')
def test_create_allocates_uuid(self, mock_gu, mock_create):
@mock.patch('nova.db.compute_node_get', return_value=fake_compute_node)
def test_create_allocates_uuid(self, mock_get, mock_gu, mock_create):
mock_create.return_value = fake_compute_node
mock_gu.return_value = fake_compute_node['uuid']
obj = objects.ComputeNode(context=self.context)
@ -264,7 +266,8 @@ class _TestComputeNodeObject(object):
mock_create.assert_called_once_with(
self.context, {'uuid': fake_compute_node['uuid']})
def test_recreate_fails(self):
@mock.patch('nova.db.compute_node_get', return_value=fake_compute_node)
def test_recreate_fails(self, mock_get):
self.mox.StubOutWithMock(db, 'compute_node_create')
db.compute_node_create(
self.context, {'service_id': 456,
@ -277,7 +280,8 @@ class _TestComputeNodeObject(object):
compute.create()
self.assertRaises(exception.ObjectActionError, compute.create)
def test_save(self):
@mock.patch('nova.db.compute_node_get', return_value=fake_compute_node)
def test_save(self, mock_get):
self.mox.StubOutWithMock(db, 'compute_node_update')
db.compute_node_update(
self.context, 123,
@ -309,42 +313,42 @@ class _TestComputeNodeObject(object):
objects.PciDevicePoolList(objects=[]).obj_to_primitive())
compute_dict = fake_compute_node.copy()
compute_dict['pci_stats'] = fake_pci
mock_get.return_value = compute_dict
with mock.patch.object(
db, 'compute_node_update',
return_value=compute_dict) as mock_compute_node_update:
compute = compute_node.ComputeNode(context=self.context)
compute.id = 123
compute.pci_device_pools = objects.PciDevicePoolList(objects=[])
compute.save()
self.compare_obj(compute, compute_dict,
subs=self.subs(),
comparators=self.comparators())
compute = compute_node.ComputeNode(context=self.context)
compute.id = 123
compute.pci_device_pools = objects.PciDevicePoolList(objects=[])
compute.save()
self.compare_obj(compute, compute_dict,
subs=self.subs(),
comparators=self.comparators())
mock_compute_node_update.assert_called_once_with(
mock_update.assert_called_once_with(
self.context, 123, {'pci_stats': fake_pci})
def test_save_pci_device_pools_null(self):
@mock.patch('nova.db.compute_node_get')
@mock.patch('nova.db.compute_node_update')
def test_save_pci_device_pools_null(self, mock_update, mock_get):
compute_dict = fake_compute_node.copy()
compute_dict['pci_stats'] = None
mock_get.return_value = compute_dict
with mock.patch.object(
db, 'compute_node_update',
return_value=compute_dict) as mock_compute_node_update:
compute = compute_node.ComputeNode(context=self.context)
compute.id = 123
compute.pci_device_pools = None
compute.save()
self.compare_obj(compute, compute_dict,
subs=self.subs(),
comparators=self.comparators())
compute = compute_node.ComputeNode(context=self.context)
compute.id = 123
compute.pci_device_pools = None
compute.save()
self.compare_obj(compute, compute_dict,
subs=self.subs(),
comparators=self.comparators())
mock_compute_node_update.assert_called_once_with(
mock_update.assert_called_once_with(
self.context, 123, {'pci_stats': None})
@mock.patch.object(db, 'compute_node_create',
return_value=fake_compute_node)
def test_set_id_failure(self, db_mock):
@mock.patch.object(db, 'compute_node_get',
return_value=fake_compute_node)
def test_set_id_failure(self, mock_get, db_mock):
compute = compute_node.ComputeNode(context=self.context,
uuid=fake_compute_node['uuid'])
compute.create()