VMware: fix bug when more than one datacenter exists

In the case that there was more than one datacenter defined on the VC,
then spawning an instance would result in an exception. The reason for this
was that the nova compute would not set the correct datacenter for the
selected datastore.

The fix also takes care of the correct folder selection. This too was a
result of not selecting the correct folder for the data center.

The 'fake' configuration was updated to contain an additional data
center with its on datastore.

Closes-Bug: #1180044
Closes-Bug: #1214850

Co-authored-by: Shawn Harsock <hartsocks@vmware.com>

Change-Id: Ib61811fffcbc80385efc3166c9e366fdaa6432bd
This commit is contained in:
Gary Kotton 2013-10-18 06:12:40 -07:00
parent ffe6bb5507
commit a25b2ac5f4
4 changed files with 151 additions and 74 deletions

View File

@ -42,7 +42,7 @@ class ConfigDriveTestCase(test.NoDBTestCase):
host_password='test_pass',
use_linked_clone=False, group='vmware')
self.flags(vnc_enabled=False)
vmwareapi_fake.reset()
vmwareapi_fake.reset(vc=True)
stubs.set_stubs(self.stubs)
nova.tests.image.fake.stub_out_image_service(self.stubs)
self.conn = driver.VMwareVCDriver(fake.FakeVirtAPI)
@ -117,6 +117,7 @@ class ConfigDriveTestCase(test.NoDBTestCase):
mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
self.conn._vmops._attach_cdrom_to_vm(mox.IgnoreArg(),
mox.IgnoreArg(),

View File

@ -120,10 +120,11 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
self.user_id = 'fake'
self.project_id = 'fake'
self.node_name = 'test_url'
self.ds = 'ds1'
self.context = context.RequestContext(self.user_id, self.project_id)
vmwareapi_fake.reset()
db_fakes.stub_out_db_instance_api(self.stubs)
stubs.set_stubs(self.stubs)
vmwareapi_fake.reset()
self.conn = driver.VMwareESXDriver(fake.FakeVirtAPI)
# NOTE(vish): none of the network plugging code is actually
# being tested
@ -274,8 +275,8 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
"""
self._create_vm()
inst_file_path = '[fake-ds] %s/fake_name.vmdk' % self.uuid
cache_file_path = '[fake-ds] vmware_base/fake_image_uuid.vmdk'
inst_file_path = '[%s] %s/fake_name.vmdk' % (self.ds, self.uuid)
cache_file_path = '[%s] vmware_base/fake_image_uuid.vmdk' % self.ds
self.assertTrue(vmwareapi_fake.get_file(inst_file_path))
self.assertTrue(vmwareapi_fake.get_file(cache_file_path))
@ -283,8 +284,8 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
"""Test image disk is cached when use_linked_clone is True."""
self.flags(use_linked_clone=True, group='vmware')
self._create_vm()
cache_file_path = '[fake-ds] vmware_base/fake_image_uuid.vmdk'
cache_root_path = '[fake-ds] vmware_base/fake_image_uuid.80.vmdk'
cache_file_path = '[%s] vmware_base/fake_image_uuid.vmdk' % self.ds
cache_root_path = '[%s] vmware_base/fake_image_uuid.80.vmdk' % self.ds
self.assertTrue(vmwareapi_fake.get_file(cache_file_path))
self.assertTrue(vmwareapi_fake.get_file(cache_root_path))
@ -328,8 +329,8 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
self.wait_task = self.conn._session._wait_for_task
self.call_method = self.conn._session._call_method
self.task_ref = None
cached_image = '[fake-ds] vmware_base/fake_image_uuid.80.vmdk'
tmp_file = '[fake-ds] vmware_base/fake_image_uuid.80-flat.vmdk'
cached_image = '[%s] vmware_base/fake_image_uuid.80.vmdk' % self.ds
tmp_file = '[%s] vmware_base/fake_image_uuid.80-flat.vmdk' % self.ds
def fake_wait_for_task(instance_uuid, task_ref):
if task_ref == self.task_ref:
@ -1093,9 +1094,14 @@ class VMwareAPIVCDriverTestCase(VMwareAPIVMTestCase):
self.flags(cluster_name=[cluster_name, cluster_name2],
task_poll_interval=10, datastore_regex='.*', group='vmware')
self.flags(vnc_enabled=False)
vmwareapi_fake.reset(vc=True)
self.conn = driver.VMwareVCDriver(None, False)
self.node_name = self.conn._resources.keys()[0]
self.node_name2 = self.conn._resources.keys()[1]
if cluster_name2 in self.node_name2:
self.ds = 'ds1'
else:
self.ds = 'ds2'
self.vnc_host = 'ha-host'
def tearDown(self):
@ -1209,11 +1215,11 @@ class VMwareAPIVCDriverTestCase(VMwareAPIVMTestCase):
# Check calls for delete vmdk and -flat.vmdk pair
self.conn._vmops._delete_datastore_file(
mox.IgnoreArg(),
"[fake-ds] vmware-tmp/%s-flat.vmdk" % uuid_str,
"[%s] vmware-tmp/%s-flat.vmdk" % (self.ds, uuid_str),
mox.IgnoreArg()).AndReturn(None)
self.conn._vmops._delete_datastore_file(
mox.IgnoreArg(),
"[fake-ds] vmware-tmp/%s.vmdk" % uuid_str,
"[%s] vmware-tmp/%s.vmdk" % (self.ds, uuid_str),
mox.IgnoreArg()).AndReturn(None)
self.mox.ReplayAll()
@ -1300,3 +1306,10 @@ class VMwareAPIVCDriverTestCase(VMwareAPIVMTestCase):
self._create_instance_in_the_db()
self.assertRaises(NotImplementedError, self.conn.unpause,
self.instance)
def test_datastore_dc_map(self):
vmops = self.conn._resources[self.node_name]['vmops']
self.assertEqual({}, vmops._datastore_dc_mapping)
self._create_vm()
# currently there are 2 data stores
self.assertEqual(2, len(vmops._datastore_dc_mapping))

View File

@ -49,8 +49,28 @@ def log_db_contents(msg=None):
{'text': msg or "", 'content': pprint.pformat(_db_content)})
def reset():
def reset(vc=False):
"""Resets the db contents."""
cleanup()
create_network()
create_host_network_system()
create_host_storage_system()
create_host()
ds_ref1 = create_datastore('ds1', 1024, 500)
if vc:
create_host()
ds_ref2 = create_datastore('ds2', 1024, 500)
create_datacenter('dc1', ds_ref1)
if vc:
create_datacenter('dc2', ds_ref2)
create_res_pool()
if vc:
create_cluster('test_cluster', ds_ref1)
create_cluster('test_cluster2', ds_ref2)
def cleanup():
"""Clear the db contents."""
for c in _CLASSES:
# We fake the datastore by keeping the file references as a list of
# names in the db
@ -58,22 +78,6 @@ def reset():
_db_content[c] = []
else:
_db_content[c] = {}
create_network()
create_host_network_system()
create_host_storage_system()
create_host()
create_host()
create_datacenter()
create_datastore()
create_res_pool()
create_cluster('test_cluster')
create_cluster('test_cluster2')
def cleanup():
"""Clear the db contents."""
for c in _CLASSES:
_db_content[c] = {}
def _create_object(table, table_obj):
@ -533,12 +537,12 @@ class ClusterComputeResource(ManagedObject):
class Datastore(ManagedObject):
"""Datastore class."""
def __init__(self, name="fake-ds"):
def __init__(self, name="fake-ds", capacity=1024, free=500):
super(Datastore, self).__init__("ds")
self.set("summary.type", "VMFS")
self.set("summary.name", name)
self.set("summary.capacity", unit.Ti)
self.set("summary.freeSpace", 500 * unit.Gi)
self.set("summary.capacity", capacity * unit.Gi)
self.set("summary.freeSpace", free * unit.Gi)
self.set("summary.accessible", True)
@ -715,7 +719,7 @@ class HostSystem(ManagedObject):
class Datacenter(ManagedObject):
"""Datacenter class."""
def __init__(self, name="ha-datacenter"):
def __init__(self, name="ha-datacenter", ds_ref=None):
super(Datacenter, self).__init__("dc")
self.set("name", name)
self.set("vmFolder", "vm_folder_ref")
@ -725,6 +729,9 @@ class Datacenter(ManagedObject):
network_do = DataObject()
network_do.ManagedObjectReference = [net_ref]
self.set("network", network_do)
datastore = DataObject()
datastore.ManagedObjectReference = [ds_ref]
self.set("datastore", datastore)
class Task(ManagedObject):
@ -754,14 +761,15 @@ def create_host():
_create_object('HostSystem', host_system)
def create_datacenter():
data_center = Datacenter()
def create_datacenter(name, ds_ref=None):
data_center = Datacenter(name, ds_ref)
_create_object('Datacenter', data_center)
def create_datastore():
data_store = Datastore()
def create_datastore(name, capacity, free):
data_store = Datastore(name, capacity, free)
_create_object('Datastore', data_store)
return data_store.obj
def create_res_pool():
@ -774,11 +782,11 @@ def create_network():
_create_object('Network', network)
def create_cluster(name):
def create_cluster(name, ds_ref):
cluster = ClusterComputeResource(name=name)
cluster._add_host(_get_object_refs("HostSystem")[0])
cluster._add_host(_get_object_refs("HostSystem")[1])
cluster._add_datastore(_get_object_refs("Datastore")[0])
cluster._add_datastore(ds_ref)
cluster._add_root_resource_pool(_get_object_refs("ResourcePool")[0])
_create_object('ClusterComputeResource', cluster)

View File

@ -22,6 +22,7 @@ Class for VM tasks like spawn, snapshot, suspend, resume etc.
"""
import base64
import collections
import copy
import os
import time
@ -79,6 +80,9 @@ VMWARE_LINKED_CLONE = 'vmware_linked_clone'
RESIZE_TOTAL_STEPS = 4
DcInfo = collections.namedtuple('DcInfo',
['ref', 'name', 'vmFolder'])
class VMwareVMOps(object):
"""Management class for VM-related tasks."""
@ -98,6 +102,7 @@ class VMwareVMOps(object):
self._rescue_suffix = '-rescue'
self._poll_rescue_last_ran = None
self._is_neutron = utils.is_neutron()
self._datastore_dc_mapping = {}
def list_instances(self):
"""Lists the VM instances that are registered with the ESX host."""
@ -206,6 +211,7 @@ class VMwareVMOps(object):
datastore_regex=self._datastore_regex)
data_store_ref = ds[0]
data_store_name = ds[1]
dc_info = self.get_datacenter_ref_and_name(data_store_ref)
#TODO(hartsocks): this pattern is confusing, reimplement as methods
# The use of nested functions in this file makes for a confusing and
@ -254,7 +260,6 @@ class VMwareVMOps(object):
raise exception.InstanceUnacceptable(instance_id=instance['uuid'],
reason=reason)
vm_folder_ref = self._get_vmfolder_ref()
node_mo_id = vm_util.get_mo_id_from_instance(instance)
res_pool_ref = vm_util.get_res_pool_ref(self._session,
self._cluster, node_mo_id)
@ -292,7 +297,7 @@ class VMwareVMOps(object):
# Create the VM on the ESX host
vm_create_task = self._session._call_method(
self._session._get_vim(),
"CreateVM_Task", vm_folder_ref,
"CreateVM_Task", dc_info.vmFolder,
config=config_spec, pool=res_pool_ref)
self._session._wait_for_task(instance['uuid'], vm_create_task)
@ -335,7 +340,7 @@ class VMwareVMOps(object):
"CreateVirtualDisk_Task",
service_content.virtualDiskManager,
name=uploaded_vmdk_path,
datacenter=dc_ref,
datacenter=dc_info.ref,
spec=vmdk_create_spec)
self._session._wait_for_task(instance['uuid'], vmdk_create_task)
LOG.debug(_("Created Virtual Disk of size %(vmdk_file_size_in_kb)s"
@ -364,7 +369,7 @@ class VMwareVMOps(object):
instance['image_ref'],
instance,
host=self._session._host_ip,
data_center_name=self._get_datacenter_ref_and_name()[1],
data_center_name=dc_info.name,
datastore_name=data_store_name,
cookies=cookies,
file_path=upload_vmdk_name)
@ -399,7 +404,7 @@ class VMwareVMOps(object):
"CopyVirtualDisk_Task",
service_content.virtualDiskManager,
sourceName=source,
sourceDatacenter=self._get_datacenter_ref_and_name()[0],
sourceDatacenter=dc_info.ref,
destName=dest,
destSpec=vmdk_copy_spec)
self._session._wait_for_task(instance['uuid'], vmdk_copy_task)
@ -448,14 +453,12 @@ class VMwareVMOps(object):
sparse_uploaded_vmdk_path = vm_util.build_datastore_path(
data_store_name,
sparse_uploaded_vmdk_name)
dc_ref = self._get_datacenter_ref_and_name()[0]
if disk_type != "sparse":
# Create a flat virtual disk and retain the metadata file.
_create_virtual_disk()
self._delete_datastore_file(instance,
flat_uploaded_vmdk_path,
dc_ref)
dc_info.ref)
_fetch_image_on_esx_datastore()
@ -466,7 +469,7 @@ class VMwareVMOps(object):
uploaded_vmdk_path)
self._delete_datastore_file(instance,
sparse_uploaded_vmdk_path,
dc_ref)
dc_info.ref)
else:
# linked clone base disk exists
if disk_type == "sparse":
@ -488,7 +491,7 @@ class VMwareVMOps(object):
root_vmdk_path = dest_vmdk_path
if root_gb_in_kb > vmdk_file_size_in_kb:
self._extend_virtual_disk(instance, root_gb_in_kb,
root_vmdk_path, dc_ref)
root_vmdk_path, dc_info.ref)
else:
root_vmdk_name = "%s/%s.%s.vmdk" % (upload_folder, upload_name,
root_gb)
@ -498,7 +501,6 @@ class VMwareVMOps(object):
data_store_ref, data_store_name,
upload_folder,
upload_name + ".%s.vmdk" % root_gb):
dc_ref = self._get_datacenter_ref_and_name()[0]
LOG.debug(_("Copying root disk of size %sGb"), root_gb)
copy_spec = self.get_copy_virtual_disk_spec(
client_factory, adapter_type, disk_type)
@ -507,14 +509,14 @@ class VMwareVMOps(object):
"CopyVirtualDisk_Task",
service_content.virtualDiskManager,
sourceName=uploaded_vmdk_path,
sourceDatacenter=dc_ref,
sourceDatacenter=dc_info.ref,
destName=root_vmdk_path,
destSpec=copy_spec)
self._session._wait_for_task(instance['uuid'],
vmdk_copy_task)
if root_gb_in_kb > vmdk_file_size_in_kb:
self._extend_virtual_disk(instance, root_gb_in_kb,
root_vmdk_path, dc_ref)
root_vmdk_path, dc_info.ref)
# Attach the root disk to the VM.
self._volumeops.attach_disk_to_vm(
@ -527,6 +529,7 @@ class VMwareVMOps(object):
injected_files,
admin_password,
data_store_name,
dc_info.name,
instance['uuid'],
cookies)
uploaded_iso_path = vm_util.build_datastore_path(
@ -558,7 +561,7 @@ class VMwareVMOps(object):
_power_on_vm()
def _create_config_drive(self, instance, injected_files, admin_password,
data_store_name, upload_folder, cookies):
data_store_name, dc_name, upload_folder, cookies):
if CONF.config_drive_format != 'iso9660':
reason = (_('Invalid config_drive_format "%s"') %
CONF.config_drive_format)
@ -577,8 +580,6 @@ class VMwareVMOps(object):
with utils.tempdir() as tmp_path:
tmp_file = os.path.join(tmp_path, 'configdrive.iso')
cdb.make_drive(tmp_file)
dc_name = self._get_datacenter_ref_and_name()[1]
upload_iso_path = "%s/configdrive.iso" % (
upload_folder)
vmware_images.upload_iso_to_datastore(
@ -746,9 +747,11 @@ class VMwareVMOps(object):
"vmware-tmp")
if not self._path_exists(ds_browser, tmp_folder_path):
self._mkdir(vm_util.build_datastore_path(datastore_name,
"vmware-tmp"))
"vmware-tmp"),
ds_ref)
return ds_ref
_check_if_tmp_folder_exists()
ds_ref = _check_if_tmp_folder_exists()
# Generate a random vmdk file name to which the coalesced vmdk content
# will be copied to. A random name is chosen so that we don't have
@ -758,7 +761,7 @@ class VMwareVMOps(object):
"vmware-tmp/%s.vmdk" % random_name)
dest_vmdk_data_file_path = vm_util.build_datastore_path(datastore_name,
"vmware-tmp/%s-flat.vmdk" % random_name)
dc_ref = self._get_datacenter_ref_and_name()[0]
dc_info = self.get_datacenter_ref_and_name(ds_ref)
def _copy_vmdk_content():
# Copy the contents of the disk (or disks, if there were snapshots
@ -773,9 +776,9 @@ class VMwareVMOps(object):
"CopyVirtualDisk_Task",
service_content.virtualDiskManager,
sourceName=vmdk_file_path_before_snapshot,
sourceDatacenter=dc_ref,
sourceDatacenter=dc_info.ref,
destName=dest_vmdk_file_path,
destDatacenter=dc_ref,
destDatacenter=dc_info.ref,
destSpec=copy_spec,
force=False)
self._session._wait_for_task(instance['uuid'], copy_disk_task)
@ -799,7 +802,7 @@ class VMwareVMOps(object):
adapter_type=adapter_type,
image_version=1,
host=self._session._host_ip,
data_center_name=self._get_datacenter_ref_and_name()[1],
data_center_name=dc_info.name,
datastore_name=datastore_name,
cookies=cookies,
file_path="vmware-tmp/%s-flat.vmdk" % random_name)
@ -821,7 +824,7 @@ class VMwareVMOps(object):
# is retained too by design since it makes little sense to remove
# it when the data disk it refers to still lingers.
for f in dest_vmdk_data_file_path, dest_vmdk_file_path:
self._delete_datastore_file(instance, f, dc_ref)
self._delete_datastore_file(instance, f, dc_info.ref)
_clean_temp_data()
@ -908,12 +911,14 @@ class VMwareVMOps(object):
"""
try:
vm_ref = vm_util.get_vm_ref(self._session, instance)
lst_properties = ["config.files.vmPathName", "runtime.powerState"]
lst_properties = ["config.files.vmPathName", "runtime.powerState",
"datastore"]
props = self._session._call_method(vim_util,
"get_object_properties",
None, vm_ref, "VirtualMachine", lst_properties)
query = {'runtime.powerState': None,
'config.files.vmPathName': None}
'config.files.vmPathName': None,
'datastore': None}
self._get_values_from_object_properties(props, query)
pwr_state = query['runtime.powerState']
vm_config_pathname = query['config.files.vmPathName']
@ -949,13 +954,16 @@ class VMwareVMOps(object):
"datastore %(datastore_name)s") %
{'datastore_name': datastore_name},
instance=instance)
ds_ref_ret = query['datastore']
ds_ref = ds_ref_ret.ManagedObjectReference[0]
dc_info = self.get_datacenter_ref_and_name(ds_ref)
vim = self._session._get_vim()
delete_task = self._session._call_method(
vim,
"DeleteDatastoreFile_Task",
vim.get_service_content().fileManager,
name=dir_ds_compliant_path,
datacenter=self._get_datacenter_ref_and_name()[0])
datacenter=dc_info.ref)
self._session._wait_for_task(instance['uuid'], delete_task)
LOG.debug(_("Deleted contents of the VM from "
"datastore %(datastore_name)s") %
@ -1168,14 +1176,14 @@ class VMwareVMOps(object):
client_factory = self._session._get_vim().client.factory
rel_spec = vm_util.relocate_vm_spec(client_factory, ds_ref, host_ref)
clone_spec = vm_util.clone_vm_spec(client_factory, rel_spec)
vm_folder_ref = self._get_vmfolder_ref()
dc_info = self.get_datacenter_ref_and_name(ds_ref)
# 3. Clone VM on ESX host
LOG.debug(_("Cloning VM to host %s") % dest, instance=instance)
vm_clone_task = self._session._call_method(
self._session._get_vim(),
"CloneVM_Task", vm_ref,
folder=vm_folder_ref,
folder=dc_info.vmFolder,
name=instance['uuid'],
spec=clone_spec)
self._session._wait_for_task(instance['uuid'], vm_clone_task)
@ -1460,12 +1468,18 @@ class VMwareVMOps(object):
"port - %(port)s") % {'port': port},
instance=instance)
def _get_datacenter_ref_and_name(self):
def get_datacenter_ref_and_name(self, ds_ref):
"""Get the datacenter name and the reference."""
dc_obj = self._session._call_method(vim_util, "get_objects",
"Datacenter", ["name"])
vm_util._cancel_retrieve_if_necessary(self._session, dc_obj)
return dc_obj.objects[0].obj, dc_obj.objects[0].propSet[0].val
map = self._datastore_dc_mapping.get(ds_ref.value)
if not map:
dc_obj = self._session._call_method(vim_util, "get_objects",
"Datacenter", ["name"])
vm_util._cancel_retrieve_if_necessary(self._session, dc_obj)
map = DcInfo(ref=dc_obj.objects[0].obj,
name=dc_obj.objects[0].propSet[0].val,
vmFolder=self._get_vmfolder_ref())
self._datastore_dc_mapping[ds_ref.value] = map
return map
def _get_host_ref_from_name(self, host_name):
"""Get reference to the host with the name specified."""
@ -1533,17 +1547,17 @@ class VMwareVMOps(object):
task_info.result.file[0].path == file_name)
return True, file_exists
def _mkdir(self, ds_path):
def _mkdir(self, ds_path, ds_ref):
"""
Creates a directory at the path specified. If it is just "NAME",
then a directory with this name is created at the topmost level of the
DataStore.
"""
LOG.debug(_("Creating directory with path %s") % ds_path)
dc_ref = self._get_datacenter_ref_and_name()[0]
dc_info = self.get_datacenter_ref_and_name(ds_ref)
self._session._call_method(self._session._get_vim(), "MakeDirectory",
self._session._get_vim().get_service_content().fileManager,
name=ds_path, datacenter=dc_ref,
name=ds_path, datacenter=dc_info.ref,
createParentDirectories=False)
LOG.debug(_("Created directory with path %s") % ds_path)
@ -1561,7 +1575,8 @@ class VMwareVMOps(object):
folder_path,
file_name)
if not folder_exists:
self._mkdir(vm_util.build_datastore_path(ds_name, folder_name))
self._mkdir(vm_util.build_datastore_path(ds_name, folder_name),
ds_ref)
return file_exists
@ -1600,3 +1615,43 @@ class VMwareVCVMOps(VMwareVMOps):
# VirtualDiskManager.CopyVirtualDisk is called on VC. The behavior of a
# spec-less copy is to consolidate to the target disk while keeping its
# disk and adapter type unchanged.
def _update_datacenter_cache_from_objects(self, dcs):
"""Updates the datastore/datacenter cache."""
while dcs:
token = vm_util._get_token(dcs)
for dco in dcs.objects:
name = None
vmFolder = None
dc_ref = dco.obj
ds_refs = []
for p in dco.propSet:
if p.name == 'name':
name = p.val
if p.name == 'datastore':
datastore_refs = p.val.ManagedObjectReference
for ds in datastore_refs:
ds_refs.append(ds.value)
if p.name == 'vmFolder':
vmFolder = p.val
for ds_ref in ds_refs:
self._datastore_dc_mapping[ds_ref] = DcInfo(ref=dc_ref,
name=name, vmFolder=vmFolder)
if token:
dcs = self._session._call_method(vim_util,
"continue_to_get_objects",
token)
else:
break
def get_datacenter_ref_and_name(self, ds_ref):
"""Get the datacenter name and the reference."""
dc_info = self._datastore_dc_mapping.get(ds_ref.value)
if not dc_info:
dcs = self._session._call_method(vim_util, "get_objects",
"Datacenter", ["name", "datastore", "vmFolder"])
self._update_datacenter_cache_from_objects(dcs)
dc_info = self._datastore_dc_mapping.get(ds_ref.value)
return dc_info