nova-net: Remove dependency on nova-net from fake cache

There's more to be done here around making the cached objects look like
something that would be generated by neutron, but this is good enough
for now.

Change-Id: I08aa4d581720b0f6cd1dabccc98dac210d1a1663
Signed-off-by: Stephen Finucane <sfinucan@redhat.com>
This commit is contained in:
Stephen Finucane 2019-11-28 16:43:29 +00:00
parent 5957ca666b
commit 828f3f2691
7 changed files with 162 additions and 218 deletions

View File

@ -251,7 +251,7 @@ class ExtendedFloatingIpTestV21(test.TestCase):
super(ExtendedFloatingIpTestV21, self).tearDown()
def test_extended_floating_ip_associate_fixed(self):
fixed_address = '192.168.1.101'
fixed_address = '192.168.1.100'
def fake_associate_floating_ip(*args, **kwargs):
self.assertEqual(fixed_address, kwargs['fixed_address'])

View File

@ -42,7 +42,6 @@ from nova import objects
from nova.objects import base
from nova import quota
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_network
from nova.tests.unit.objects import test_keypair
from nova import utils
@ -179,10 +178,6 @@ class stub_out_compute_api_backup(object):
return dict(id='123', status='ACTIVE', name=name, properties=props)
def stub_out_nw_api_get_instance_nw_info(test, num_networks=1, func=None):
fake_network.stub_out_nw_api_get_instance_nw_info(test)
def stub_out_nw_api(test, cls=None, private=None, publics=None):
if not private:
private = '192.168.0.3'

View File

@ -276,39 +276,7 @@ def next_floating_ip(fixed_ip_id):
'auto_assigned': False}
def ipv4_like(ip, match_string):
ip = ip.split('.')
match_octets = match_string.split('.')
for i, octet in enumerate(match_octets):
if octet == '*':
continue
if octet != ip[i]:
return False
return True
def fake_get_instance_nw_info(test, num_networks=1, ips_per_vif=2,
floating_ips_per_fixed_ip=0):
# test is an instance of nova.test.TestCase
# ips_per_vif is the number of ips each vif will have
# num_floating_ips is number of float ips for each fixed ip
network = network_manager.FlatManager(host=HOST)
network.db = db
# reset the fixed and floating ip generators
global floating_ip_id, fixed_ip_id, fixed_ips
floating_ip_id = floating_ip_ids()
fixed_ip_id = fixed_ip_ids()
fixed_ips = []
def fixed_ips_fake(*args, **kwargs):
global fixed_ips
ips = [next_fixed_ip(i, floating_ips_per_fixed_ip)
for i in range(1, num_networks + 1)
for j in range(ips_per_vif)]
fixed_ips = ips
return ips
def fake_get_instance_nw_info(test, num_networks=1):
def update_cache_fake(*args, **kwargs):
fake_info_cache = {
@ -321,28 +289,101 @@ def fake_get_instance_nw_info(test, num_networks=1, ips_per_vif=2,
}
return fake_info_cache
test.stub_out('nova.db.api.fixed_ip_get_by_instance', fixed_ips_fake)
test.stub_out('nova.db.api.instance_info_cache_update', update_cache_fake)
class FakeContext(nova.context.RequestContext):
def is_admin(self):
return True
# TODO(stephenfin): This doesn't match the kind of object we would receive
# from '_build_vif_model' and callers of same. We should fix that.
nw_model = network_model.NetworkInfo()
for network_id in range(1, num_networks + 1):
network = network_model.Network(
id=getattr(uuids, 'network%i' % network_id),
bridge='fake_br%d' % network_id,
label='test%d' % network_id,
subnets=[
network_model.Subnet(
cidr='192.168.%d.0/24' % network_id,
dns=[
network_model.IP(
address='192.168.%d.3' % network_id,
type='dns',
version=4,
meta={},
),
network_model.IP(
address='192.168.%d.4' % network_id,
type='dns',
version=4,
meta={},
),
],
gateway=network_model.IP(
address='192.168.%d.1' % network_id,
type='gateway',
version=4,
meta={},
),
ips=[
network_model.FixedIP(
address='192.168.%d.100' % network_id,
version=4,
meta={},
),
],
routes=[],
version=4,
meta={},
),
network_model.Subnet(
cidr='2001:db8:0:%x::/64' % network_id,
dns=[],
gateway=network_model.IP(
address='2001:db8:0:%x::1' % network_id,
type='gateway',
version=6,
meta={},
),
ips=[
network_model.FixedIP(
address='2001:db8:0:%x:dcad:beff:feef:1' % (
network_id),
version=6,
meta={},
),
],
routes=[],
version=6,
meta={}
),
],
meta={
"tenant_id": "806e1f03-b36f-4fc6-be29-11a366f150eb"
},
)
vif = network_model.VIF(
id=getattr(uuids, 'vif%i' % network_id),
address='DE:AD:BE:EF:00:%02x' % network_id,
network=network,
type='bridge',
details={},
devname=None,
ovs_interfaceid=None,
qbh_params=None,
qbg_params=None,
active=False,
vnic_type='normal',
profile=None,
preserve_on_delete=False,
meta={'rxtx_cap': 30},
)
nw_model.append(vif)
nw_model = network.get_instance_nw_info(
FakeContext('fakeuser', 'fake_project'),
0, 3, None)
return nw_model
def stub_out_nw_api_get_instance_nw_info(test, func=None,
num_networks=1,
ips_per_vif=1,
floating_ips_per_fixed_ip=0):
def stub_out_nw_api_get_instance_nw_info(test, func=None):
def get_instance_nw_info(self, context, instance, conductor_api=None):
return fake_get_instance_nw_info(test, num_networks=num_networks,
ips_per_vif=ips_per_vif,
floating_ips_per_fixed_ip=floating_ips_per_fixed_ip)
return fake_get_instance_nw_info(test)
if func is None:
func = get_instance_nw_info

View File

@ -38,7 +38,6 @@ from nova import ipv6
from nova.network import floating_ips
from nova.network import linux_net
from nova.network import manager as network_manager
from nova.network import model as net_model
from nova.network import rpcapi as network_rpcapi
from nova import objects
from nova.objects import network as network_obj
@ -48,12 +47,10 @@ from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
from nova.tests.unit import fake_ldap
from nova.tests.unit import fake_network
from nova.tests.unit import matchers
from nova.tests.unit.objects import test_fixed_ip
from nova.tests.unit.objects import test_floating_ip
from nova.tests.unit.objects import test_network
from nova.tests.unit.objects import test_service
from nova.tests.unit import utils as test_utils
from nova import utils
@ -197,91 +194,6 @@ class FlatNetworkTestCase(test.TestCase):
fakes.FAKE_PROJECT_ID,
is_admin=False)
@testtools.skipIf(test_utils.is_osx(),
'IPv6 pretty-printing broken on OSX, see bug 1409135')
def test_get_instance_nw_info_fake(self):
fake_get_instance_nw_info = fake_network.fake_get_instance_nw_info
nw_info = fake_get_instance_nw_info(self, 0, 2)
self.assertFalse(nw_info)
nw_info = fake_get_instance_nw_info(self, 1, 2)
for i, vif in enumerate(nw_info):
nid = i + 1
check = {'bridge': 'fake_br%d' % nid,
'cidr': '192.168.%s.0/24' % nid,
'cidr_v6': '2001:db8:0:%x::/64' % nid,
'id': getattr(uuids, 'vif%i' % nid),
'multi_host': False,
'injected': False,
'bridge_interface': None,
'vlan': None,
'broadcast': '192.168.%d.255' % nid,
'dhcp_server': '192.168.1.1',
'dns': ['192.168.%d.3' % nid, '192.168.%d.4' % nid],
'gateway': '192.168.%d.1' % nid,
'gateway_v6': '2001:db8:0:1::1',
'label': 'test%d' % nid,
'mac': 'DE:AD:BE:EF:00:%02x' % nid,
'rxtx_cap': 30,
'vif_type': net_model.VIF_TYPE_BRIDGE,
'vif_devname': None,
'vif_uuid': getattr(uuids, 'vif%i' % nid),
'ovs_interfaceid': None,
'qbh_params': None,
'qbg_params': None,
'should_create_vlan': False,
'should_create_bridge': False,
'ip': '192.168.%d.%03d' % (nid, nid + 99),
'ip_v6': '2001:db8:0:1:dcad:beff:feef:%x' % nid,
'netmask': '255.255.255.0',
'netmask_v6': 64,
'physical_network': None,
}
network = vif['network']
net_v4 = vif['network']['subnets'][0]
net_v6 = vif['network']['subnets'][1]
vif_dict = dict(bridge=network['bridge'],
cidr=net_v4['cidr'],
cidr_v6=net_v6['cidr'],
id=vif['id'],
multi_host=network.get_meta('multi_host', False),
injected=network.get_meta('injected', False),
bridge_interface=
network.get_meta('bridge_interface'),
vlan=network.get_meta('vlan'),
broadcast=str(net_v4.as_netaddr().broadcast),
dhcp_server=network.get_meta('dhcp_server',
net_v4['gateway']['address']),
dns=[ip['address'] for ip in net_v4['dns']],
gateway=net_v4['gateway']['address'],
gateway_v6=net_v6['gateway']['address'],
label=network['label'],
mac=vif['address'],
rxtx_cap=vif.get_meta('rxtx_cap'),
vif_type=vif['type'],
vif_devname=vif.get('devname'),
vif_uuid=vif['id'],
ovs_interfaceid=vif.get('ovs_interfaceid'),
qbh_params=vif.get('qbh_params'),
qbg_params=vif.get('qbg_params'),
should_create_vlan=
network.get_meta('should_create_vlan', False),
should_create_bridge=
network.get_meta('should_create_bridge',
False),
ip=net_v4['ips'][i]['address'],
ip_v6=net_v6['ips'][i]['address'],
netmask=str(net_v4.as_netaddr().netmask),
netmask_v6=net_v6.as_netaddr()._prefixlen,
physical_network=
network.get_meta('physical_network', None))
self.assertThat(vif_dict, matchers.DictMatches(check))
def test_validate_networks(self):
self.mox.StubOutWithMock(db, 'fixed_ip_get_by_address')

View File

@ -45,7 +45,7 @@ class NotificationsTestCase(test.TestCase):
super(NotificationsTestCase, self).setUp()
self.fixture = self.useFixture(o_fixture.ClearRequestContext())
self.net_info = fake_network.fake_get_instance_nw_info(self, 1, 1)
self.net_info = fake_network.fake_get_instance_nw_info(self)
fake_notifier.stub_notifier(self)
self.addCleanup(fake_notifier.reset)
@ -213,7 +213,7 @@ class NotificationsTestCase(test.TestCase):
@mock.patch.object(objects.BandwidthUsageList, 'get_by_uuids')
def test_vm_update_with_states(self, mock_bandwidth_list):
mock_bandwidth_list.return_value = [self.get_fake_bandwidth()]
fake_net_info = fake_network.fake_get_instance_nw_info(self, 1, 1)
fake_net_info = fake_network.fake_get_instance_nw_info(self)
self.instance.info_cache.network_info = fake_net_info
notifications.send_update_with_states(self.context, self.instance,
@ -290,7 +290,7 @@ class NotificationsTestCase(test.TestCase):
self.flags(notify_on_state_change="vm_and_task_state",
group='notifications')
mock_bandwidth_list.return_value = [self.get_fake_bandwidth()]
fake_net_info = fake_network.fake_get_instance_nw_info(self, 1, 1)
fake_net_info = fake_network.fake_get_instance_nw_info(self)
self.instance.info_cache.network_info = fake_net_info
notifications.send_update_with_states(self.context, self.instance,

View File

@ -2167,7 +2167,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
vifs = [vif, vif1, vif2, vif3, vif4]
network_info = _fake_network_info(self, 4)
network_info = _fake_network_info(self)
network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT_PHYSICAL
network_info[0]['address'] = "51:5a:2c:a4:5e:1b"
network_info[0]['details'] = dict(vlan='2145')
@ -2471,7 +2471,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
_fake_network_info(self),
image_meta, disk_info,
context=ctxt)
@ -2565,7 +2565,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
_fake_network_info(self),
image_meta, disk_info)
num_ports = 0
@ -2599,7 +2599,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
_fake_network_info(self),
image_meta, disk_info)
num_ports = 0
@ -2640,7 +2640,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
_fake_network_info(self),
image_meta, disk_info,
context=ctxt)
self.assertEqual("N/A",
@ -2654,7 +2654,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
_fake_network_info(self),
image_meta, {'mapping': {}})
self.assertEqual(instance_ref["uuid"], cfg.uuid)
self.assertEqual(instance_ref.flavor.memory_mb * units.Ki, cfg.memory)
@ -2681,7 +2681,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
_fake_network_info(self),
image_meta, {'mapping': {}})
self.assertEqual(instance_ref["uuid"], cfg.uuid)
self.assertEqual(instance_ref.flavor.memory_mb * units.Ki, cfg.memory)
@ -3125,7 +3125,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
image_meta)
return drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
_fake_network_info(self),
image_meta, disk_info,
context=ctxt)
@ -4404,7 +4404,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
_fake_network_info(self),
image_meta, disk_info)
self.assertIsInstance(cfg.clock,
@ -4443,7 +4443,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
_fake_network_info(self),
image_meta, disk_info)
self.assertIsInstance(cfg.clock,
@ -6950,7 +6950,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
_fake_network_info(self),
image_meta, disk_info)
self.assertEqual(cfg.os_cmdline, "fake_os_command_line")
@ -6970,7 +6970,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
_fake_network_info(self),
image_meta, disk_info)
self.assertIsNone(cfg.os_cmdline)
@ -6995,7 +6995,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
# console=hvc0' set by default, so testing an empty string and None
# value in the os_command_line image property must pass
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
_fake_network_info(self),
image_meta, disk_info)
self.assertNotEqual(cfg.os_cmdline, "")
@ -7030,7 +7030,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
_fake_network_info(self),
image_meta, disk_info)
self.assertEqual(cfg.os_mach_type, "virt")
@ -7071,7 +7071,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
_fake_network_info(self),
image_meta, disk_info)
self.assertTrue(mock_path_exists.called)
mock_path_exists.assert_any_call(
@ -7148,7 +7148,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
image_meta)
return drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
_fake_network_info(self),
image_meta, disk_info)
def test_get_guest_config_machine_type_through_image_meta(self):
@ -7260,7 +7260,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
_fake_network_info(self),
image_meta, disk_info)
self.assertEqual(cfg.os_mach_type, "fake_machine_type")
@ -7330,7 +7330,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
_fake_network_info(self),
image_meta, disk_info)
self.assertIsInstance(conf.cpu,
vconfig.LibvirtConfigGuestCPU)
@ -7353,7 +7353,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
_fake_network_info(self),
image_meta, disk_info)
self.assertIsInstance(conf.cpu,
vconfig.LibvirtConfigGuestCPU)
@ -7376,7 +7376,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
_fake_network_info(self),
image_meta, disk_info)
self.assertIsNone(conf.cpu)
@ -7393,7 +7393,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
_fake_network_info(self),
image_meta, disk_info)
self.assertIsNone(conf.cpu)
@ -7407,7 +7407,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
_fake_network_info(self),
image_meta, disk_info)
self.assertIsInstance(conf.cpu,
vconfig.LibvirtConfigGuestCPU)
@ -7443,7 +7443,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
_fake_network_info(self),
image_meta, disk_info)
self.assertIsInstance(conf.cpu,
vconfig.LibvirtConfigGuestCPU)
@ -7459,7 +7459,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
_fake_network_info(self),
image_meta, disk_info)
self.assertIsInstance(conf.cpu,
vconfig.LibvirtConfigGuestCPU)
@ -7481,7 +7481,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
_fake_network_info(self),
image_meta, disk_info)
self.assertIsInstance(conf.cpu,
vconfig.LibvirtConfigGuestCPU)
@ -7506,7 +7506,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
_fake_network_info(self),
image_meta, disk_info)
self.assertIsInstance(conf.cpu,
vconfig.LibvirtConfigGuestCPU)
@ -7533,7 +7533,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
_fake_network_info(self),
image_meta, disk_info)
self.assertIsInstance(conf.cpu,
vconfig.LibvirtConfigGuestCPU)
@ -7562,7 +7562,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
_fake_network_info(self),
image_meta, disk_info)
features = [feature.name for feature in conf.cpu.features]
self.assertIsInstance(conf.cpu,
@ -7588,7 +7588,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
cpu_models=["PENRYN", "IVYBRIDGE"],
group="libvirt")
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
_fake_network_info(self),
image_meta, disk_info)
self.assertEqual(conf.cpu.mode, "custom")
self.assertEqual(conf.cpu.model, "Penryn")
@ -7608,7 +7608,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
cpu_models=["SandyBridge", "IvyBridge"],
group="libvirt")
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
_fake_network_info(self),
image_meta, disk_info)
self.assertEqual(conf.cpu.mode, "custom")
self.assertEqual(conf.cpu.model, "SandyBridge")
@ -7639,7 +7639,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
cpu_models=["qemu64", "Broadwell-noTSX"],
group="libvirt")
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
_fake_network_info(self),
image_meta, disk_info)
self.assertEqual(conf.cpu.mode, "custom")
self.assertEqual(conf.cpu.model, "Broadwell-noTSX")
@ -7671,7 +7671,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
cpu_models=["qemu64", "SandyBridge", "Broadwell-noTSX"],
group="libvirt")
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
_fake_network_info(self),
image_meta, disk_info)
self.assertEqual(conf.cpu.mode, "custom")
self.assertEqual(conf.cpu.model, "SandyBridge")
@ -7705,7 +7705,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertRaises(exception.InvalidCPUInfo,
drvr._get_guest_config,
instance_ref,
_fake_network_info(self, 1),
_fake_network_info(self),
image_meta,
disk_info)
@ -7750,7 +7750,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
cpu_models=["qemu64", "SandyBridge", "Broadwell-noTSX"],
group="libvirt")
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
_fake_network_info(self),
image_meta, disk_info)
self.assertEqual(conf.cpu.mode, "custom")
self.assertEqual(conf.cpu.model, "Broadwell-noTSX")
@ -7772,7 +7772,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
_fake_network_info(self),
image_meta, disk_info)
features = [feature.name for feature in conf.cpu.features]
self.assertIsInstance(conf.cpu,
@ -7800,7 +7800,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
_fake_network_info(self),
image_meta, disk_info)
features = [feature.name for feature in conf.cpu.features]
self.assertIsInstance(conf.cpu,
@ -7826,7 +7826,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
_fake_network_info(self),
image_meta, disk_info)
self.assertIsInstance(conf.cpu,
vconfig.LibvirtConfigGuestCPU)
@ -9516,7 +9516,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(drvr._uri(), 'lxc:///')
network_info = _fake_network_info(self, 1)
network_info = _fake_network_info(self)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
@ -9575,7 +9575,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.flags(disk_prefix=prefix, group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
network_info = _fake_network_info(self, 1)
network_info = _fake_network_info(self)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
@ -9613,7 +9613,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
network_info = _fake_network_info(self, 1)
network_info = _fake_network_info(self)
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
@ -9645,7 +9645,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
def _check_xml_and_disk_bus(self, image_meta,
block_device_info, wantConfig):
instance_ref = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self, 1)
network_info = _fake_network_info(self)
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
@ -9676,7 +9676,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
def _check_xml_and_uuid(self, image_meta):
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
network_info = _fake_network_info(self, 1)
network_info = _fake_network_info(self)
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
@ -9836,7 +9836,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(drvr._uri(), expected_uri)
network_info = _fake_network_info(self, 1)
network_info = _fake_network_info(self)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta,
@ -13858,7 +13858,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
# _fake_network_info must be called before create_fake_libvirt_mock(),
# as _fake_network_info calls importutils.import_class() and
# create_fake_libvirt_mock() mocks importutils.import_class().
network_info = _fake_network_info(self, 1)
network_info = _fake_network_info(self)
self.create_fake_libvirt_mock(getLibVersion=fake_getLibVersion,
getCapabilities=fake_getCapabilities,
getCPUModelNames=fake_getCPUModelNames,
@ -15235,7 +15235,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_get_domain.return_value = mock_virDomain
instance = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self, 1)
network_info = _fake_network_info(self)
drvr.resume_state_on_host_boot(self.context, instance, network_info,
block_device_info=None)
@ -15292,7 +15292,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_get_info):
self.context.auth_token = True # any non-None value will suffice
instance = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self, 1)
network_info = _fake_network_info(self)
block_device_info = None
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
@ -15516,7 +15516,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_get_image_metadata, mock_ID, mock_attachDevice):
instance = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self, 1)
network_info = _fake_network_info(self)
network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT
guest = libvirt_guest.Guest(FakeVirtDomain())
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
@ -15535,7 +15535,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_get_image_metadata, mock_ID, mock_attachDevice):
instance = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self, 1)
network_info = _fake_network_info(self)
network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT_PHYSICAL
guest = libvirt_guest.Guest(FakeVirtDomain())
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
@ -15554,7 +15554,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_get_image_metadata, mock_ID, mock_attachDevice):
instance = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self, 1)
network_info = _fake_network_info(self)
network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT
instance.info_cache = objects.InstanceInfoCache(
network_info=network_info)
@ -15574,7 +15574,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance = objects.Instance(**self.test_instance)
expeted_pci_slot = "0000:00:00.0"
network_info = _fake_network_info(self, 1)
network_info = _fake_network_info(self)
network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT
# some more adjustments for the fake network_info so that
# the correct get_config function will be executed (vif's
@ -15680,7 +15680,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
"<target dev='vdb' bus='virtio'/></disk>"
"</devices></domain>")
instance = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self, 1)
network_info = _fake_network_info(self)
block_device_info = None
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
guest = libvirt_guest.Guest('fake_dom')
@ -17985,7 +17985,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
lambda self, instance: FakeVirtDomain())
instance = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self, 1)
network_info = _fake_network_info(self)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
fake_image_meta = objects.ImageMeta.from_dict(
@ -19035,7 +19035,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
_fake_network_info(self),
image_meta, disk_info)
self.assertEqual("parallels", cfg.virt_type)
self.assertEqual(instance_ref["uuid"], cfg.uuid)
@ -19078,7 +19078,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
rescue_data = None
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
_fake_network_info(self),
image_meta, {'mapping': {'disk': {}}},
rescue_data)
self.assertEqual("parallels", cfg.virt_type)
@ -19143,7 +19143,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
driver_block_device.DriverVolumeBlockDevice, 'save'
) as mock_save:
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
_fake_network_info(self),
image_meta, disk_info, None, info)
mock_save.assert_called_once_with()
@ -19295,7 +19295,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
_fake_network_info(self),
image_meta, {'mapping': {}},
mdevs=mdevs)
# Loop over all devices to make sure we have at least one mediated one.
@ -19333,7 +19333,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_get_vpmems_label.return_value = ['4GB', '16GB']
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
_fake_network_info(self),
image_meta, {'mapping': {}})
vpmem_amount = 0
for device in cfg.devices:
@ -21162,9 +21162,9 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
ins_ref = self._create_instance()
self.drvr.confirm_migration(self.context, "migration_ref", ins_ref,
_fake_network_info(self, 1))
_fake_network_info(self))
mock_cleanup.assert_called_once_with(
self.context, ins_ref, _fake_network_info(self, 1))
self.context, ins_ref, _fake_network_info(self))
@mock.patch('time.sleep', new=mock.Mock())
def test_cleanup_resize_same_host(self):
@ -21184,7 +21184,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
mock_get_path.return_value = '/fake/inst'
drvr._cleanup_resize(
self.context, ins_ref, _fake_network_info(self, 1))
self.context, ins_ref, _fake_network_info(self))
mock_get_path.assert_called_once_with(ins_ref)
self.assertEqual(5, mock_rmtree.call_count)
@ -21193,7 +21193,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
CONF.set_override('policy_dirs', [], group='oslo_policy')
host = 'not' + CONF.host
ins_ref = self._create_instance({'host': host})
fake_net = _fake_network_info(self, 1)
fake_net = _fake_network_info(self)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
@ -21227,7 +21227,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
CONF.set_override('policy_dirs', [], group='oslo_policy')
host = 'not' + CONF.host
ins_ref = self._create_instance({'host': host})
fake_net = _fake_network_info(self, 1)
fake_net = _fake_network_info(self)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.image_backend = mock.Mock()
@ -21272,7 +21272,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
mock_get_path.return_value = '/fake/inst'
drvr._cleanup_resize(
self.context, ins_ref, _fake_network_info(self, 1))
self.context, ins_ref, _fake_network_info(self))
mock_get_path.assert_called_once_with(ins_ref)
mock_remove.assert_called_once_with(
libvirt_utils.RESIZE_SNAPSHOT_NAME)
@ -21300,7 +21300,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
mock_get_path.return_value = '/fake/inst'
drvr._cleanup_resize(
self.context, ins_ref, _fake_network_info(self, 1))
self.context, ins_ref, _fake_network_info(self))
mock_get_path.assert_called_once_with(ins_ref)
self.assertFalse(mock_remove.called)
self.assertEqual(5, mock_rmtree.call_count)
@ -21577,7 +21577,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
def test_attach_interface_build_metadata_fails(self):
instance = self._create_instance()
network_info = _fake_network_info(self, 1)
network_info = _fake_network_info(self)
domain = FakeVirtDomain(fake_xml="""
<domain type='kvm'>
<devices>
@ -21637,7 +21637,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
mock_get_domain, mock_attach,
mock_info, mock_build, mock_save):
instance = self._create_instance()
network_info = _fake_network_info(self, 1)
network_info = _fake_network_info(self)
domain = FakeVirtDomain(fake_xml="""
<domain type='kvm'>
<devices>
@ -21699,7 +21699,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
device_not_found=False):
# setup some mocks
instance = self._create_instance()
network_info = _fake_network_info(self, 1)
network_info = _fake_network_info(self)
domain = FakeVirtDomain(fake_xml="""
<domain type='kvm'>
<devices>
@ -21798,7 +21798,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
# Asserts that we don't log an error when the interface device is not
# found on the guest after a libvirt error during detach.
instance = self._create_instance()
vif = _fake_network_info(self, 1)[0]
vif = _fake_network_info(self)[0]
guest = mock.Mock(spec=libvirt_guest.Guest)
guest.get_power_state = mock.Mock()
self.drvr._host.get_guest = mock.Mock(return_value=guest)
@ -21821,7 +21821,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
def test_detach_interface_device_with_same_mac_address(
self, mock_get_domain, mock_detach, mock_info):
instance = self._create_instance()
network_info = _fake_network_info(self, 1)
network_info = _fake_network_info(self)
domain = FakeVirtDomain(fake_xml="""
<domain type='kvm'>
<devices>
@ -21903,7 +21903,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
image_meta_dict = {'id': uuids.image_id, 'name': 'fake'}
image_meta = objects.ImageMeta.from_dict(image_meta_dict)
network_info = _fake_network_info(self, 1)
network_info = _fake_network_info(self)
rescue_password = 'fake_password'
domain_xml = [None]

View File

@ -720,10 +720,6 @@ class XenAPIVMTestCase(stubs.XenAPITestBase,
'ips': [{'enabled': '1',
'ip': '192.168.1.100',
'netmask': '255.255.255.0',
'gateway': '192.168.1.1'},
{'enabled': '1',
'ip': '192.168.1.101',
'netmask': '255.255.255.0',
'gateway': '192.168.1.1'}],
'label': 'test1',
'mac': 'DE:AD:BE:EF:00:01'}, tcpip_data)