libvirt: add '_' prefix to remaining internal methods

Various methods are internal helpers so should have a '_'
prefix to indicate they are not part of public virt
driver API.

Related-bug: #1333219
Change-Id: I2996ccd188cc34a924f01d86a18c747465b7383f
This commit is contained in:
Daniel P. Berrange 2014-06-20 14:38:21 +01:00
parent 9e61d37c19
commit be97af9a30
5 changed files with 124 additions and 115 deletions

View File

@ -646,8 +646,8 @@ class LibvirtConnTestCase(test.TestCase):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
'has_min_version')
libvirt_driver.LibvirtDriver.has_min_version = lambda x, y: False
'_has_min_version')
libvirt_driver.LibvirtDriver._has_min_version = lambda x, y: False
self.assertRaises(exception.PciDeviceDetachFailed,
conn._detach_pci_devices, None, pci_devices)
@ -678,8 +678,8 @@ class LibvirtConnTestCase(test.TestCase):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
'has_min_version')
libvirt_driver.LibvirtDriver.has_min_version = lambda x, y: True
'_has_min_version')
libvirt_driver.LibvirtDriver._has_min_version = lambda x, y: True
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
'_get_guest_pci_device')
@ -722,8 +722,8 @@ class LibvirtConnTestCase(test.TestCase):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
'has_min_version')
libvirt_driver.LibvirtDriver.has_min_version = lambda x, y: True
'_has_min_version')
libvirt_driver.LibvirtDriver._has_min_version = lambda x, y: True
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
'_get_guest_pci_device')
@ -813,8 +813,8 @@ class LibvirtConnTestCase(test.TestCase):
bdm = [{'connection_info': {'data': data}}]
bdi = {'block_device_mapping': bdm}
# Tests that the parameters to the to_xml method are sanitized for
# passwords when logged.
# Tests that the parameters to the _get_guest_xml method
# are sanitized for passwords when logged.
def fake_debug(*args, **kwargs):
if 'auth_password' in args[0]:
self.assertNotIn('scrubme', args[0])
@ -828,8 +828,9 @@ class LibvirtConnTestCase(test.TestCase):
) as (
debug_mock, conf_mock
):
conn.to_xml(self.context, self.test_instance, network_info={},
disk_info={}, image_meta={}, block_device_info=bdi)
conn._get_guest_xml(self.context, self.test_instance,
network_info={}, disk_info={},
image_meta={}, block_device_info=bdi)
# we don't care what the log message is, we just want to make sure
# our stub method is called which asserts the password is scrubbed
self.assertTrue(debug_mock.called)
@ -2722,7 +2723,7 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
devices = conn.get_all_block_devices()
devices = conn._get_all_block_devices()
self.assertEqual(devices, ['/path/to/dev/1', '/path/to/dev/3'])
def test_snapshot_in_ami_format(self):
@ -3394,7 +3395,7 @@ class LibvirtConnTestCase(test.TestCase):
with contextlib.nested(
mock.patch.object(conn, 'volume_driver_method',
return_value=mock_conf),
mock.patch.object(conn, 'set_cache_mode')
mock.patch.object(conn, '_set_cache_mode')
) as (mock_volume_driver_method, mock_set_cache_mode):
for state in (power_state.RUNNING, power_state.PAUSED):
mock_dom.info.return_value = [state, 512, 512, 2, 1234, 5678]
@ -3455,7 +3456,8 @@ class LibvirtConnTestCase(test.TestCase):
instance_ref = db.instance_create(self.context, instance_data)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref)
xml = conn.to_xml(self.context, instance_ref, network_info, disk_info)
xml = conn._get_guest_xml(self.context, instance_ref,
network_info, disk_info)
tree = etree.fromstring(xml)
interfaces = tree.findall("./devices/interface")
self.assertEqual(len(interfaces), 2)
@ -3522,7 +3524,8 @@ class LibvirtConnTestCase(test.TestCase):
network_info = _fake_network_info(self.stubs, 1)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref)
xml = conn.to_xml(self.context, instance_ref, network_info, disk_info)
xml = conn._get_guest_xml(self.context, instance_ref,
network_info, disk_info)
tree = etree.fromstring(xml)
check = [
@ -3576,8 +3579,8 @@ class LibvirtConnTestCase(test.TestCase):
network_info = _fake_network_info(self.stubs, 1)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref)
xml = conn.to_xml(self.context, instance_ref,
network_info, disk_info)
xml = conn._get_guest_xml(self.context, instance_ref,
network_info, disk_info)
tree = etree.fromstring(xml)
for i, (check, expected_result) in enumerate(checks):
@ -3614,8 +3617,8 @@ class LibvirtConnTestCase(test.TestCase):
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref)
xml = drv.to_xml(self.context, instance_ref,
network_info, disk_info, image_meta)
xml = drv._get_guest_xml(self.context, instance_ref,
network_info, disk_info, image_meta)
tree = etree.fromstring(xml)
disks = tree.findall('./devices/disk/driver')
for disk in disks:
@ -3628,8 +3631,8 @@ class LibvirtConnTestCase(test.TestCase):
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref)
xml = drv.to_xml(self.context, instance_ref,
network_info, disk_info, image_meta)
xml = drv._get_guest_xml(self.context, instance_ref,
network_info, disk_info, image_meta)
tree = etree.fromstring(xml)
disks = tree.findall('./devices/disk/driver')
for disk in disks:
@ -3646,9 +3649,9 @@ class LibvirtConnTestCase(test.TestCase):
instance_ref,
block_device_info,
image_meta)
xml = drv.to_xml(self.context, instance_ref,
network_info, disk_info, image_meta,
block_device_info=block_device_info)
xml = drv._get_guest_xml(self.context, instance_ref,
network_info, disk_info, image_meta,
block_device_info=block_device_info)
tree = etree.fromstring(xml)
got_disks = tree.findall('./devices/disk')
@ -3674,8 +3677,8 @@ class LibvirtConnTestCase(test.TestCase):
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref)
xml = drv.to_xml(self.context, instance_ref,
network_info, disk_info, image_meta)
xml = drv._get_guest_xml(self.context, instance_ref,
network_info, disk_info, image_meta)
tree = etree.fromstring(xml)
self.assertEqual(tree.find('./uuid').text,
instance_ref['uuid'])
@ -3838,8 +3841,9 @@ class LibvirtConnTestCase(test.TestCase):
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
rescue=rescue)
xml = conn.to_xml(self.context, instance_ref,
network_info, disk_info, rescue=rescue)
xml = conn._get_guest_xml(self.context, instance_ref,
network_info, disk_info,
rescue=rescue)
tree = etree.fromstring(xml)
for i, (check, expected_result) in enumerate(checks):
self.assertEqual(check(tree),
@ -4673,7 +4677,7 @@ class LibvirtConnTestCase(test.TestCase):
instance = db.instance_create(self.context, instance_ref)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, 'to_xml', fake_none)
self.stubs.Set(conn, '_get_guest_xml', fake_none)
self.stubs.Set(conn, '_create_image', fake_create_image)
self.stubs.Set(conn, '_create_domain_and_network', fake_none)
self.stubs.Set(conn, 'get_info', fake_get_info)
@ -4702,7 +4706,7 @@ class LibvirtConnTestCase(test.TestCase):
return {'state': power_state.RUNNING}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, 'to_xml', fake_none)
self.stubs.Set(conn, '_get_guest_xml', fake_none)
self.stubs.Set(imagebackend.Image, 'cache', fake_cache)
self.stubs.Set(conn, '_create_domain_and_network', fake_none)
@ -4770,7 +4774,7 @@ class LibvirtConnTestCase(test.TestCase):
return FakeLibvirtPciDevice()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, 'to_xml', fake_none)
self.stubs.Set(conn, '_get_guest_xml', fake_none)
self.stubs.Set(conn, '_create_image', fake_none)
self.stubs.Set(conn, '_create_domain_and_network', fake_none)
self.stubs.Set(conn, 'get_info', fake_get_info)
@ -4839,7 +4843,7 @@ class LibvirtConnTestCase(test.TestCase):
instance['os_type'] = os_type
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, 'to_xml', fake_none)
self.stubs.Set(conn, '_get_guest_xml', fake_none)
self.stubs.Set(conn, '_create_domain_and_network', fake_none)
self.stubs.Set(conn, 'get_info', fake_get_info)
if mkfs:
@ -4852,7 +4856,8 @@ class LibvirtConnTestCase(test.TestCase):
None,
image_meta)
conn._create_image(context, instance, disk_info['mapping'])
conn.to_xml(self.context, instance, None, disk_info, image_meta)
conn._get_guest_xml(self.context, instance, None,
disk_info, image_meta)
wantFiles = [
{'filename': '356a192b7913b04c54574d18c28d46e6395428ab',
@ -4922,7 +4927,7 @@ class LibvirtConnTestCase(test.TestCase):
instance = db.instance_create(self.context, instance_ref)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, 'to_xml', fake_none)
self.stubs.Set(conn, '_get_guest_xml', fake_none)
self.stubs.Set(conn, '_create_domain_and_network', fake_none)
self.stubs.Set(conn, 'get_info', fake_get_info)
@ -4932,7 +4937,8 @@ class LibvirtConnTestCase(test.TestCase):
None,
image_meta)
conn._create_image(context, instance, disk_info['mapping'])
conn.to_xml(self.context, instance, None, disk_info, image_meta)
conn._get_guest_xml(self.context, instance, None,
disk_info, image_meta)
wantFiles = [
{'filename': '356a192b7913b04c54574d18c28d46e6395428ab',
@ -5521,7 +5527,7 @@ class LibvirtConnTestCase(test.TestCase):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(conn, '_destroy')
self.mox.StubOutWithMock(conn, 'get_instance_disk_info')
self.mox.StubOutWithMock(conn, 'to_xml')
self.mox.StubOutWithMock(conn, '_get_guest_xml')
self.mox.StubOutWithMock(conn, '_create_images_and_backing')
self.mox.StubOutWithMock(conn, '_create_domain_and_network')
@ -5538,9 +5544,9 @@ class LibvirtConnTestCase(test.TestCase):
conn._destroy(instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance, block_device_info)
conn.to_xml(self.context, instance, network_info, disk_info,
block_device_info=block_device_info,
write_to_disk=True).AndReturn(dummyxml)
conn._get_guest_xml(self.context, instance, network_info, disk_info,
block_device_info=block_device_info,
write_to_disk=True).AndReturn(dummyxml)
disk_info_json = '[{"virt_disk_size": 2}]'
conn.get_instance_disk_info(instance["name"], dummyxml,
block_device_info).AndReturn(disk_info_json)
@ -6482,10 +6488,10 @@ class LibvirtConnTestCase(test.TestCase):
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
conn = driver._conn
self.mox.StubOutWithMock(driver, 'list_instance_ids')
self.mox.StubOutWithMock(driver, '_list_instance_ids')
conn.lookupByID = self.mox.CreateMockAnything()
driver.list_instance_ids().AndReturn([1, 2])
driver._list_instance_ids().AndReturn([1, 2])
conn.lookupByID(1).AndReturn(DiagFakeDomain(None))
conn.lookupByID(2).AndReturn(DiagFakeDomain(5))
@ -6507,10 +6513,10 @@ class LibvirtConnTestCase(test.TestCase):
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
conn = driver._conn
self.mox.StubOutWithMock(driver, 'list_instance_ids')
self.mox.StubOutWithMock(driver, '_list_instance_ids')
conn.lookupByID = self.mox.CreateMockAnything()
driver.list_instance_ids().AndReturn([1])
driver._list_instance_ids().AndReturn([1])
conn.lookupByID(1).AndReturn(DiagFakeDomain())
self.mox.ReplayAll()
@ -6629,7 +6635,7 @@ class LibvirtConnTestCase(test.TestCase):
fake_conf = FakeConfigGuestDisk()
fake_conf.source_type = 'file'
conn.set_cache_mode(fake_conf)
conn._set_cache_mode(fake_conf)
self.assertEqual(fake_conf.driver_cache, 'directsync')
def test_set_cache_mode_invalid_mode(self):
@ -6638,7 +6644,7 @@ class LibvirtConnTestCase(test.TestCase):
fake_conf = FakeConfigGuestDisk()
fake_conf.source_type = 'file'
conn.set_cache_mode(fake_conf)
conn._set_cache_mode(fake_conf)
self.assertIsNone(fake_conf.driver_cache)
def test_set_cache_mode_invalid_object(self):
@ -6647,7 +6653,7 @@ class LibvirtConnTestCase(test.TestCase):
fake_conf = FakeConfigGuest()
fake_conf.driver_cache = 'fake'
conn.set_cache_mode(fake_conf)
conn._set_cache_mode(fake_conf)
self.assertEqual(fake_conf.driver_cache, 'fake')
def _test_shared_storage_detection(self, is_same):
@ -7140,7 +7146,7 @@ class LibvirtConnTestCase(test.TestCase):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn,
'to_xml',
'_get_guest_xml',
fake_to_xml)
self.stubs.Set(conn,
'_lookup_by_name',
@ -8793,7 +8799,7 @@ class LibvirtDriverTestCase(test.TestCase):
self.stubs.Set(libvirt_driver.disk, 'extend', fake_extend)
self.stubs.Set(libvirt_driver.disk, 'can_resize_image',
fake_can_resize_image)
self.stubs.Set(self.libvirtconnection, 'to_xml', fake_to_xml)
self.stubs.Set(self.libvirtconnection, '_get_guest_xml', fake_to_xml)
self.stubs.Set(self.libvirtconnection, 'plug_vifs', fake_plug_vifs)
self.stubs.Set(self.libvirtconnection, '_create_image',
fake_create_image)
@ -8853,7 +8859,7 @@ class LibvirtDriverTestCase(test.TestCase):
block_device_info=None):
return ""
self.stubs.Set(self.libvirtconnection, 'to_xml', fake_to_xml)
self.stubs.Set(self.libvirtconnection, '_get_guest_xml', fake_to_xml)
self.stubs.Set(self.libvirtconnection, 'plug_vifs', fake_plug_vifs)
self.stubs.Set(utils, 'execute', fake_execute)
fw = base_firewall.NoopFirewallDriver()
@ -8902,7 +8908,8 @@ class LibvirtDriverTestCase(test.TestCase):
self.mox.StubOutWithMock(utils, 'execute')
self.stubs.Set(blockinfo, 'get_disk_info', lambda *a: None)
self.stubs.Set(self.libvirtconnection, 'to_xml', lambda *a, **k: None)
self.stubs.Set(self.libvirtconnection, '_get_guest_xml',
lambda *a, **k: None)
self.stubs.Set(self.libvirtconnection, '_create_domain_and_network',
lambda *a: None)
self.stubs.Set(loopingcall, 'FixedIntervalLoopingCall',
@ -9573,14 +9580,14 @@ class LibvirtVolumeSnapshotTestCase(test.TestCase):
domain.XMLDesc(0).AndReturn(self.dom_xml)
self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
self.mox.StubOutWithMock(self.conn, 'has_min_version')
self.mox.StubOutWithMock(self.conn, '_has_min_version')
self.mox.StubOutWithMock(domain, 'blockRebase')
self.mox.StubOutWithMock(domain, 'blockCommit')
self.mox.StubOutWithMock(domain, 'blockJobInfo')
self.conn._lookup_by_name('instance-%s' % instance['id']).\
AndReturn(domain)
self.conn.has_min_version(mox.IgnoreArg()).AndReturn(True)
self.conn._has_min_version(mox.IgnoreArg()).AndReturn(True)
domain.blockRebase('vda', 'snap.img', 0, 0)
@ -9605,14 +9612,14 @@ class LibvirtVolumeSnapshotTestCase(test.TestCase):
domain.XMLDesc(0).AndReturn(self.dom_xml)
self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
self.mox.StubOutWithMock(self.conn, 'has_min_version')
self.mox.StubOutWithMock(self.conn, '_has_min_version')
self.mox.StubOutWithMock(domain, 'blockRebase')
self.mox.StubOutWithMock(domain, 'blockCommit')
self.mox.StubOutWithMock(domain, 'blockJobInfo')
self.conn._lookup_by_name('instance-%s' % instance['id']).\
AndReturn(domain)
self.conn.has_min_version(mox.IgnoreArg()).AndReturn(True)
self.conn._has_min_version(mox.IgnoreArg()).AndReturn(True)
domain.blockCommit('vda', 'other-snap.img', 'snap.img', 0, 0)
@ -9696,9 +9703,9 @@ class LibvirtVolumeSnapshotTestCase(test.TestCase):
self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
self.mox.StubOutWithMock(self.conn, '_volume_api')
self.mox.StubOutWithMock(self.conn, 'has_min_version')
self.mox.StubOutWithMock(self.conn, '_has_min_version')
self.conn.has_min_version(mox.IgnoreArg()).AndReturn(True)
self.conn._has_min_version(mox.IgnoreArg()).AndReturn(True)
self.conn._volume_api.update_snapshot_status(
self.c, self.snapshot_id, 'error_deleting')

View File

@ -57,7 +57,7 @@ class LibvirtVolumeTestCase(test.NoDBTestCase):
def _get_hypervisor_type(self):
return self.hyperv
def get_all_block_devices(self):
def _get_all_block_devices(self):
return []
self.fake_conn = FakeLibvirtDriver(fake.FakeVirtAPI())
@ -281,7 +281,7 @@ class LibvirtVolumeTestCase(test.NoDBTestCase):
libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
devs = ['/dev/disk/by-path/ip-%s-iscsi-%s-lun-2' % (self.location,
self.iqn)]
self.stubs.Set(self.fake_conn, 'get_all_block_devices', lambda: devs)
self.stubs.Set(self.fake_conn, '_get_all_block_devices', lambda: devs)
vol = {'id': 1, 'name': self.name}
connection_info = self.iscsi_connection(vol, self.location, self.iqn)
conf = libvirt_driver.connect_volume(connection_info, self.disk_info)
@ -311,7 +311,7 @@ class LibvirtVolumeTestCase(test.NoDBTestCase):
self.iqn)]
with contextlib.nested(
mock.patch.object(os.path, 'exists', return_value=True),
mock.patch.object(self.fake_conn, 'get_all_block_devices',
mock.patch.object(self.fake_conn, '_get_all_block_devices',
return_value=devs),
mock.patch.object(libvirt_driver, '_rescan_multipath'),
mock.patch.object(libvirt_driver, '_run_multipath'),
@ -512,7 +512,7 @@ class LibvirtVolumeTestCase(test.NoDBTestCase):
self.flags(iscsi_use_multipath=True, group='libvirt')
self.stubs.Set(os.path, 'exists', lambda x: True)
devs = ['/dev/mapper/sda', '/dev/mapper/sdb']
self.stubs.Set(self.fake_conn, 'get_all_block_devices', lambda: devs)
self.stubs.Set(self.fake_conn, '_get_all_block_devices', lambda: devs)
libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
connection_info = self.iscsi_connection(self.vol, self.location,
self.iqn)
@ -549,7 +549,7 @@ class LibvirtVolumeTestCase(test.NoDBTestCase):
lambda x: _get_multipath_device_name(x)
block_devs = ['/dev/disks/by-path/%s-iscsi-%s-lun-2' % (location, iqn)]
self.stubs.Set(self.fake_conn, 'get_all_block_devices',
self.stubs.Set(self.fake_conn, '_get_all_block_devices',
lambda: block_devs)
vol = {'id': 1, 'name': name}
@ -589,7 +589,7 @@ class LibvirtVolumeTestCase(test.NoDBTestCase):
dev = '/dev/disk/by-path/ip-%s-iscsi-%s-lun-1' % (self.location,
self.iqn)
devs = [dev0, dev]
self.stubs.Set(self.fake_conn, 'get_all_block_devices', lambda: devs)
self.stubs.Set(self.fake_conn, '_get_all_block_devices', lambda: devs)
connection_info = self.iscsi_connection(self.vol, self.location,
self.iqn)
mpdev_filepath = '/dev/mapper/foo'
@ -607,7 +607,7 @@ class LibvirtVolumeTestCase(test.NoDBTestCase):
self.stubs.Set(os.path, 'exists', lambda x: True)
self.stubs.Set(time, 'sleep', lambda x: None)
devs = ['/dev/mapper/sda', '/dev/mapper/sdb']
self.stubs.Set(self.fake_conn, 'get_all_block_devices', lambda: devs)
self.stubs.Set(self.fake_conn, '_get_all_block_devices', lambda: devs)
libvirt_driver = volume.LibvirtISERVolumeDriver(self.fake_conn)
name = 'volume-00000001'
location = '10.0.2.15:3260'
@ -648,7 +648,7 @@ class LibvirtVolumeTestCase(test.NoDBTestCase):
vol = {'id': 1, 'name': name}
dev = '/dev/disk/by-path/ip-%s-iscsi-%s-lun-1' % (location, iqn)
devs = [dev0, dev]
self.stubs.Set(self.fake_conn, 'get_all_block_devices', lambda: devs)
self.stubs.Set(self.fake_conn, '_get_all_block_devices', lambda: devs)
self.stubs.Set(libvirt_driver, '_get_iscsi_devices', lambda: [])
connection_info = self.iser_connection(vol, location, iqn)
mpdev_filepath = '/dev/mapper/foo'

View File

@ -288,7 +288,7 @@ class LibvirtVolumeDriver(VolumeDriver):
LOG.warn(_('detach volume could not find tid for %s'), iqn,
instance=instance)
def get_all_block_devices(self):
def _get_all_block_devices(self):
"""Return all block devices in use on this node."""
return _list_backingstore_path()

View File

@ -395,7 +395,7 @@ class LibvirtDriver(driver.ComputeDriver):
self._host_state = HostState(self)
return self._host_state
def set_cache_mode(self, conf):
def _set_cache_mode(self, conf):
"""Set cache mode on LibvirtConfigGuestDisk object."""
try:
source_type = conf.source_type
@ -408,7 +408,7 @@ class LibvirtDriver(driver.ComputeDriver):
conf.driver_cache = cache_mode
@staticmethod
def _has_min_version(conn, lv_ver=None, hv_ver=None, hv_type=None):
def _conn_has_min_version(conn, lv_ver=None, hv_ver=None, hv_type=None):
try:
if lv_ver is not None:
libvirt_version = conn.getLibVersion()
@ -429,8 +429,8 @@ class LibvirtDriver(driver.ComputeDriver):
except Exception:
return False
def has_min_version(self, lv_ver=None, hv_ver=None, hv_type=None):
return self._has_min_version(self._conn, lv_ver, hv_ver, hv_type)
def _has_min_version(self, lv_ver=None, hv_ver=None, hv_type=None):
return self._conn_has_min_version(self._conn, lv_ver, hv_ver, hv_type)
def _native_thread(self):
"""Receives async events coming in from libvirtd.
@ -618,7 +618,7 @@ class LibvirtDriver(driver.ComputeDriver):
libvirt.virEventRegisterDefaultImpl()
self._do_quality_warnings()
if not self.has_min_version(MIN_LIBVIRT_VERSION):
if not self._has_min_version(MIN_LIBVIRT_VERSION):
major = MIN_LIBVIRT_VERSION[0]
minor = MIN_LIBVIRT_VERSION[1]
micro = MIN_LIBVIRT_VERSION[2]
@ -768,14 +768,14 @@ class LibvirtDriver(driver.ComputeDriver):
return False
# TODO(Shrews): Remove when libvirt Bugzilla bug # 836647 is fixed.
def list_instance_ids(self):
def _list_instance_ids(self):
if self._conn.numOfDomains() == 0:
return []
return self._conn.listDomainsID()
def list_instances(self):
names = []
for domain_id in self.list_instance_ids():
for domain_id in self._list_instance_ids():
try:
# We skip domains with ID 0 (hypervisors).
if domain_id != 0:
@ -793,7 +793,7 @@ class LibvirtDriver(driver.ComputeDriver):
def list_instance_uuids(self):
uuids = set()
for domain_id in self.list_instance_ids():
for domain_id in self._list_instance_ids():
try:
# We skip domains with ID 0 (hypervisors).
if domain_id != 0:
@ -1189,7 +1189,7 @@ class LibvirtDriver(driver.ComputeDriver):
"block size") % CONF.libvirt.virt_type
raise exception.InvalidHypervisorType(msg)
if not self.has_min_version(MIN_LIBVIRT_BLOCKIO_VERSION):
if not self._has_min_version(MIN_LIBVIRT_BLOCKIO_VERSION):
ver = ".".join([str(x) for x in MIN_LIBVIRT_BLOCKIO_VERSION])
msg = _("Volume sets block size, but libvirt '%s' or later is "
"required.") % ver
@ -1199,7 +1199,7 @@ class LibvirtDriver(driver.ComputeDriver):
conf = self.volume_driver_method('connect_volume',
connection_info,
disk_info)
self.set_cache_mode(conf)
self._set_cache_mode(conf)
try:
# NOTE(vish): We can always affect config because our
@ -1319,9 +1319,9 @@ class LibvirtDriver(driver.ComputeDriver):
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
block_device_info)
xml = self.to_xml(nova_context.get_admin_context(),
instance, network_info, disk_info,
block_device_info=block_device_info)
xml = self._get_guest_xml(nova_context.get_admin_context(),
instance, network_info, disk_info,
block_device_info=block_device_info)
return xml
def detach_volume(self, connection_info, instance, mountpoint,
@ -1478,9 +1478,9 @@ class LibvirtDriver(driver.ComputeDriver):
# NOTE(rmk): Live snapshots require QEMU 1.3 and Libvirt 1.0.0.
# These restrictions can be relaxed as other configurations
# can be validated.
if self.has_min_version(MIN_LIBVIRT_LIVESNAPSHOT_VERSION,
MIN_QEMU_LIVESNAPSHOT_VERSION,
REQ_HYPERVISOR_LIVESNAPSHOT) \
if self._has_min_version(MIN_LIBVIRT_LIVESNAPSHOT_VERSION,
MIN_QEMU_LIVESNAPSHOT_VERSION,
REQ_HYPERVISOR_LIVESNAPSHOT) \
and not source_format == "lvm" and not source_format == 'rbd':
live_snapshot = True
# Abort is an idempotent operation, so make sure any block
@ -1848,7 +1848,7 @@ class LibvirtDriver(driver.ComputeDriver):
libvirt 1.0.5.5 vs. 1.0.5.6 here.)
"""
if not self.has_min_version(MIN_LIBVIRT_BLOCKJOBINFO_VERSION):
if not self._has_min_version(MIN_LIBVIRT_BLOCKJOBINFO_VERSION):
ver = '.'.join([str(x) for x in MIN_LIBVIRT_BLOCKJOBINFO_VERSION])
msg = _("Libvirt '%s' or later is required for online deletion "
"of volume snapshots.") % ver
@ -2064,9 +2064,9 @@ class LibvirtDriver(driver.ComputeDriver):
# regenerate raw backend images, however, so when it
# does we need to (re)generate the xml after the images
# are in place.
xml = self.to_xml(context, instance, network_info, disk_info,
block_device_info=block_device_info,
write_to_disk=True)
xml = self._get_guest_xml(context, instance, network_info, disk_info,
block_device_info=block_device_info,
write_to_disk=True)
# NOTE (rmk): Re-populate any missing backing files.
disk_info_json = self.get_instance_disk_info(instance['name'], xml,
@ -2196,9 +2196,9 @@ class LibvirtDriver(driver.ComputeDriver):
'.rescue', rescue_images,
network_info=network_info,
admin_pass=rescue_password)
xml = self.to_xml(context, instance, network_info, disk_info,
image_meta, rescue=rescue_images,
write_to_disk=True)
xml = self._get_guest_xml(context, instance, network_info, disk_info,
image_meta, rescue=rescue_images,
write_to_disk=True)
self._destroy(instance)
self._create_domain(xml)
@ -2220,7 +2220,7 @@ class LibvirtDriver(driver.ComputeDriver):
pass
def _enable_hairpin(self, xml):
interfaces = self.get_interfaces(xml)
interfaces = self._get_interfaces(xml)
for interface in interfaces:
utils.execute('tee',
'/sys/class/net/%s/brport/hairpin_mode' % interface,
@ -2242,10 +2242,10 @@ class LibvirtDriver(driver.ComputeDriver):
block_device_info=block_device_info,
files=injected_files,
admin_pass=admin_password)
xml = self.to_xml(context, instance, network_info,
disk_info, image_meta,
block_device_info=block_device_info,
write_to_disk=True)
xml = self._get_guest_xml(context, instance, network_info,
disk_info, image_meta,
block_device_info=block_device_info,
write_to_disk=True)
self._create_domain_and_network(context, xml, instance, network_info,
block_device_info)
@ -2741,7 +2741,7 @@ class LibvirtDriver(driver.ComputeDriver):
# for libvirt version < 1.1.1, this is race condition
# so forbid detach if not had this version
if not self.has_min_version(MIN_LIBVIRT_DEVICE_CALLBACK_VERSION):
if not self._has_min_version(MIN_LIBVIRT_DEVICE_CALLBACK_VERSION):
if pci_devs:
reason = (_("Detaching PCI devices with libvirt < %(ver)s"
" is not permitted") %
@ -2929,7 +2929,7 @@ class LibvirtDriver(driver.ComputeDriver):
# TODO(berrange): in the future, when MIN_LIBVIRT_VERSION is
# updated to be at least this new, we can kill off the elif
# blocks here
if self.has_min_version(MIN_LIBVIRT_HOST_CPU_VERSION):
if self._has_min_version(MIN_LIBVIRT_HOST_CPU_VERSION):
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.mode = mode
cpu.model = model
@ -3045,7 +3045,7 @@ class LibvirtDriver(driver.ComputeDriver):
devices.append(diskconfig)
for d in devices:
self.set_cache_mode(d)
self._set_cache_mode(d)
if (image_meta and
image_meta.get('properties', {}).get('hw_scsi_model')):
@ -3423,9 +3423,9 @@ class LibvirtDriver(driver.ComputeDriver):
return guest
def to_xml(self, context, instance, network_info, disk_info,
image_meta=None, rescue=None,
block_device_info=None, write_to_disk=False):
def _get_guest_xml(self, context, instance, network_info, disk_info,
image_meta=None, rescue=None,
block_device_info=None, write_to_disk=False):
# We should get image metadata every time for generating xml
if image_meta is None:
image_ref = instance['image_ref']
@ -3435,7 +3435,7 @@ class LibvirtDriver(driver.ComputeDriver):
# this ahead of time so that we don't acquire it while also
# holding the logging lock.
network_info_str = str(network_info)
msg = ('Start to_xml '
msg = ('Start _get_guest_xml '
'network_info=%(network_info)s '
'disk_info=%(disk_info)s '
'image_meta=%(image_meta)s rescue=%(rescue)s '
@ -3455,7 +3455,8 @@ class LibvirtDriver(driver.ComputeDriver):
xml_path = os.path.join(instance_dir, 'libvirt.xml')
libvirt_utils.write_to_file(xml_path, xml)
LOG.debug('End to_xml xml=%(xml)s', {'xml': xml}, instance=instance)
LOG.debug('End _get_guest_xml xml=%(xml)s',
{'xml': xml}, instance=instance)
return xml
def _lookup_by_id(self, instance_id):
@ -3683,10 +3684,10 @@ class LibvirtDriver(driver.ComputeDriver):
domain.resume()
return domain
def get_all_block_devices(self):
def _get_all_block_devices(self):
"""Return all block devices in use on this node."""
devices = []
for dom_id in self.list_instance_ids():
for dom_id in self._list_instance_ids():
try:
domain = self._lookup_by_id(dom_id)
doc = etree.fromstring(domain.XMLDesc(0))
@ -3705,7 +3706,7 @@ class LibvirtDriver(driver.ComputeDriver):
devices.append(child.get('dev'))
return devices
def get_interfaces(self, xml):
def _get_interfaces(self, xml):
"""Note that this function takes a domain xml.
Returns a list of all network interfaces for this instance.
@ -3803,7 +3804,7 @@ class LibvirtDriver(driver.ComputeDriver):
if CONF.libvirt.virt_type == 'lxc':
return total + 1
dom_ids = self.list_instance_ids()
dom_ids = self._list_instance_ids()
for dom_id in dom_ids:
try:
dom = self._lookup_by_id(dom_id)
@ -3842,7 +3843,7 @@ class LibvirtDriver(driver.ComputeDriver):
idx3 = m.index('Cached:')
if CONF.libvirt.virt_type == 'xen':
used = 0
for domain_id in self.list_instance_ids():
for domain_id in self._list_instance_ids():
try:
dom_mem = int(self._lookup_by_id(domain_id).info()[2])
except exception.InstanceNotFound:
@ -4691,9 +4692,10 @@ class LibvirtDriver(driver.ComputeDriver):
# libvirt.xml
disk_info = blockinfo.get_disk_info(
CONF.libvirt.virt_type, instance, block_device_info)
xml = self.to_xml(context, instance, network_info, disk_info,
block_device_info=block_device_info,
write_to_disk=True)
xml = self._get_guest_xml(context, instance,
network_info, disk_info,
block_device_info=block_device_info,
write_to_disk=True)
self._conn.defineXML(xml)
def get_instance_disk_info(self, instance_name, xml=None,
@ -5068,9 +5070,9 @@ class LibvirtDriver(driver.ComputeDriver):
disk_mapping=disk_info['mapping'],
network_info=network_info,
block_device_info=None, inject_files=False)
xml = self.to_xml(context, instance, network_info, disk_info,
block_device_info=block_device_info,
write_to_disk=True)
xml = self._get_guest_xml(context, instance, network_info, disk_info,
block_device_info=block_device_info,
write_to_disk=True)
self._create_domain_and_network(context, xml, instance, network_info,
block_device_info, power_on)
if power_on:
@ -5108,8 +5110,8 @@ class LibvirtDriver(driver.ComputeDriver):
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
block_device_info)
xml = self.to_xml(context, instance, network_info, disk_info,
block_device_info=block_device_info)
xml = self._get_guest_xml(context, instance, network_info, disk_info,
block_device_info=block_device_info)
self._create_domain_and_network(context, xml, instance, network_info,
block_device_info, power_on)

View File

@ -342,7 +342,7 @@ class LibvirtISCSIVolumeDriver(LibvirtBaseVolumeDriver):
device_prefix = ("/dev/disk/by-path/ip-%s-iscsi-%s-lun-" %
(iscsi_properties['target_portal'],
iscsi_properties['target_iqn']))
devices = self.connection.get_all_block_devices()
devices = self.connection._get_all_block_devices()
devices = [dev for dev in devices if dev.startswith(device_prefix)]
if not devices:
self._disconnect_from_iscsi_portal(iscsi_properties)
@ -378,7 +378,7 @@ class LibvirtISCSIVolumeDriver(LibvirtBaseVolumeDriver):
multipath_device):
self._rescan_iscsi()
self._rescan_multipath()
block_devices = self.connection.get_all_block_devices()
block_devices = self.connection._get_all_block_devices()
devices = []
for dev in block_devices:
if "/mapper/" in dev: