introduce "{tx|rx}_queue_size" options
The libvirt/{tx|rx}_queue_size options will be required by NFV deploymens in order to increase network performances per vCPUs and avoiding packet drops. Note that the options are available start to Rocky. Since QEMU 2.7.0 and libvirt 2.3.0 to configure RX queue size. Since QEMU 2.10.0 and libvirt 3.7.0 to configure TX queue size. Closes-Bug: 1804170 Change-Id: Iceacb42aae248fb36e9eecdc992c4a982f4e32b4 Signed-off-by: Sahid Orentino Ferdjaoui <sahid.ferdjaoui@canonical.com>
This commit is contained in:
parent
9ff97e429d
commit
27e71b5e41
18
config.yaml
18
config.yaml
|
@ -187,6 +187,24 @@ options:
|
|||
Sets vcpu_pin_set option in nova.conf which defines which pcpus that
|
||||
instance vcpus can or cannot use. For example '^0,^2' to reserve two
|
||||
cpus for the host.
|
||||
virtio-net-tx-queue-size:
|
||||
type: int
|
||||
default:
|
||||
description: |
|
||||
Sets libvirt/tx_queue_size option in nova.conf. Larger queues sizes for
|
||||
virtio-net devices increases networking performance by amortizing vCPU
|
||||
preemption and avoiding packet drops. Only works with Rocky and later,
|
||||
since QEMU 2.10.0 and libvirt 3.7.0. Default value 256. Authorized
|
||||
values [256, 512, 1024].
|
||||
virtio-net-rx-queue-size:
|
||||
type: int
|
||||
default:
|
||||
description: |
|
||||
Sets libvirt/rx_queue_size option in nova.conf. Larger queues sizes for
|
||||
virtio-net devices increases networking performance by amortizing vCPU
|
||||
preemption and avoiding packet drops. Only works with Rocky and later,
|
||||
since QEMU 2.7.0 and libvirt 2.3.0. Default value 256. Authorized
|
||||
values [256, 512, 1024].
|
||||
worker-multiplier:
|
||||
type: float
|
||||
default:
|
||||
|
|
|
@ -247,6 +247,13 @@ class NovaComputeLibvirtContext(context.OSContextGenerator):
|
|||
if config('vcpu-pin-set'):
|
||||
ctxt['vcpu_pin_set'] = config('vcpu-pin-set')
|
||||
|
||||
if config('virtio-net-tx-queue-size'):
|
||||
ctxt['virtio_net_tx_queue_size'] = (
|
||||
config('virtio-net-tx-queue-size'))
|
||||
if config('virtio-net-rx-queue-size'):
|
||||
ctxt['virtio_net_rx_queue_size'] = (
|
||||
config('virtio-net-rx-queue-size'))
|
||||
|
||||
ctxt['reserved_host_memory'] = config('reserved-host-memory')
|
||||
|
||||
db = kv()
|
||||
|
|
|
@ -0,0 +1,266 @@
|
|||
# rocky
|
||||
###############################################################################
|
||||
# [ WARNING ]
|
||||
# Configuration file maintained by Juju. Local changes may be overwritten.
|
||||
{% if restart_trigger -%}
|
||||
# restart trigger: {{ restart_trigger }}
|
||||
{% endif -%}
|
||||
###############################################################################
|
||||
[DEFAULT]
|
||||
verbose={{ verbose }}
|
||||
debug={{ debug }}
|
||||
dhcpbridge_flagfile=/etc/nova/nova.conf
|
||||
dhcpbridge=/usr/bin/nova-dhcpbridge
|
||||
logdir=/var/log/nova
|
||||
state_path=/var/lib/nova
|
||||
force_dhcp_release=True
|
||||
use_syslog = {{ use_syslog }}
|
||||
ec2_private_dns_show_ip=True
|
||||
api_paste_config=/etc/nova/api-paste.ini
|
||||
enabled_apis=osapi_compute,metadata
|
||||
auth_strategy=keystone
|
||||
my_ip = {{ host_ip }}
|
||||
force_raw_images = {{ force_raw_images }}
|
||||
|
||||
{% if arch == 'aarch64' -%}
|
||||
libvirt_use_virtio_for_bridges=False
|
||||
libvirt_disk_prefix=vd
|
||||
{% endif -%}
|
||||
|
||||
{% if console_vnc_type -%}
|
||||
vnc_enabled = True
|
||||
novnc_enabled = True
|
||||
vnc_keymap = {{ console_keymap }}
|
||||
vncserver_listen = 0.0.0.0
|
||||
vncserver_proxyclient_address = {{ console_listen_addr }}
|
||||
{% if console_access_protocol == 'novnc' or console_access_protocol == 'vnc' -%}
|
||||
novncproxy_base_url = {{ novnc_proxy_address }}
|
||||
{% endif -%}
|
||||
{% if console_access_protocol == 'xvpvnc' or console_access_protocol == 'vnc' -%}
|
||||
xvpvncproxy_port = {{ xvpvnc_proxy_port }}
|
||||
xvpvncproxy_host = {{ xvpvnc_proxy_host }}
|
||||
xvpvncproxy_base_url = {{ xvpvnc_proxy_address }}
|
||||
{% endif -%}
|
||||
{% else -%}
|
||||
vnc_enabled = False
|
||||
novnc_enabled = False
|
||||
{% endif -%}
|
||||
|
||||
{% if neutron_plugin and neutron_plugin in ('ovs', 'midonet') -%}
|
||||
libvirt_vif_driver = nova.virt.libvirt.vif.LibvirtGenericVIFDriver
|
||||
{% if neutron_security_groups -%}
|
||||
security_group_api = neutron
|
||||
firewall_driver = nova.virt.firewall.NoopFirewallDriver
|
||||
{% endif -%}
|
||||
{% endif -%}
|
||||
|
||||
{% if neutron_plugin and neutron_plugin == 'vsp' -%}
|
||||
network_api_class=nova.network.neutronv2.api.API
|
||||
libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtGenericVIFDriver
|
||||
neutron_ovs_bridge=alubr0
|
||||
security_group_api=neutron
|
||||
firewall_driver = nova.virt.firewall.NoopFirewallDriver
|
||||
{% endif -%}
|
||||
|
||||
{% if neutron_plugin and (neutron_plugin == 'nvp' or neutron_plugin == 'nsx') -%}
|
||||
libvirt_vif_driver = nova.virt.libvirt.vif.LibvirtOpenVswitchVirtualPortDriver
|
||||
security_group_api = neutron
|
||||
firewall_driver = nova.virt.firewall.NoopFirewallDriver
|
||||
{% endif -%}
|
||||
|
||||
{% if neutron_plugin and neutron_plugin == 'Calico' -%}
|
||||
security_group_api = neutron
|
||||
firewall_driver = nova.virt.firewall.NoopFirewallDriver
|
||||
{% endif -%}
|
||||
|
||||
{% if neutron_plugin and neutron_plugin == 'plumgrid' -%}
|
||||
security_group_api=neutron
|
||||
firewall_driver = nova.virt.firewall.NoopFirewallDriver
|
||||
{% endif -%}
|
||||
|
||||
{% if network_manager != 'neutron' and network_manager_config -%}
|
||||
{% for key, value in network_manager_config.items() -%}
|
||||
{{ key }} = {{ value }}
|
||||
{% endfor -%}
|
||||
{% endif -%}
|
||||
|
||||
{% if network_manager == 'neutron' -%}
|
||||
network_api_class = nova.network.neutronv2.api.API
|
||||
use_neutron = True
|
||||
{% else -%}
|
||||
network_manager = nova.network.manager.FlatDHCPManager
|
||||
{% endif -%}
|
||||
|
||||
{% if network_device_mtu -%}
|
||||
network_device_mtu = {{ network_device_mtu }}
|
||||
{% endif -%}
|
||||
|
||||
{% if volume_service -%}
|
||||
volume_api_class = nova.volume.cinder.API
|
||||
{% endif -%}
|
||||
|
||||
{% if user_config_flags -%}
|
||||
{% for key, value in user_config_flags.items() -%}
|
||||
{{ key }} = {{ value }}
|
||||
{% endfor -%}
|
||||
{% endif -%}
|
||||
|
||||
{% if instances_path -%}
|
||||
instances_path = {{ instances_path }}
|
||||
{% endif -%}
|
||||
|
||||
{% if sections and 'DEFAULT' in sections -%}
|
||||
{% for key, value in sections['DEFAULT'] -%}
|
||||
{{ key }} = {{ value }}
|
||||
{% endfor -%}
|
||||
{% endif -%}
|
||||
|
||||
{% if vcpu_pin_set -%}
|
||||
vcpu_pin_set = {{ vcpu_pin_set }}
|
||||
{% endif -%}
|
||||
reserved_host_memory_mb = {{ reserved_host_memory }}
|
||||
|
||||
{% include "section-zeromq" %}
|
||||
|
||||
{% if default_availability_zone -%}
|
||||
default_availability_zone = {{ default_availability_zone }}
|
||||
{% endif -%}
|
||||
|
||||
{% if resume_guests_state_on_host_boot -%}
|
||||
resume_guests_state_on_host_boot = {{ resume_guests_state_on_host_boot }}
|
||||
{% endif -%}
|
||||
|
||||
metadata_workers = {{ workers }}
|
||||
|
||||
[pci]
|
||||
{% if pci_passthrough_whitelist -%}
|
||||
passthrough_whitelist = {{ pci_passthrough_whitelist }}
|
||||
{% endif -%}
|
||||
{% if pci_alias %}
|
||||
alias = {{ pci_alias }}
|
||||
{% endif %}
|
||||
|
||||
{% if network_manager == 'neutron' and network_manager_config -%}
|
||||
[neutron]
|
||||
url = {{ network_manager_config.neutron_url }}
|
||||
{% if network_manager_config.keystone_host or auth_host -%}
|
||||
{% if neutron_plugin and neutron_plugin == 'vsp' -%}
|
||||
ovs_bridge = alubr0
|
||||
{% endif -%}
|
||||
{% if auth_host -%}
|
||||
auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}
|
||||
auth_type = password
|
||||
{% if admin_domain_name -%}
|
||||
project_domain_name = {{ admin_domain_name }}
|
||||
user_domain_name = {{ admin_domain_name }}
|
||||
{% else -%}
|
||||
project_domain_name = default
|
||||
user_domain_name = default
|
||||
{% endif -%}
|
||||
project_name = {{ admin_tenant_name }}
|
||||
username = {{ admin_user }}
|
||||
password = {{ admin_password }}
|
||||
signing_dir = {{ signing_dir }}
|
||||
{% endif -%}
|
||||
{% if metadata_shared_secret -%}
|
||||
metadata_proxy_shared_secret = {{ metadata_shared_secret }}
|
||||
service_metadata_proxy=True
|
||||
{% endif -%}
|
||||
{% endif -%}
|
||||
{% endif -%}
|
||||
|
||||
{% include "section-keystone-authtoken-mitaka" %}
|
||||
|
||||
{% if glance_api_servers -%}
|
||||
[glance]
|
||||
api_servers = {{ glance_api_servers }}
|
||||
{% endif -%}
|
||||
|
||||
{% if console_access_protocol == 'spice' -%}
|
||||
[spice]
|
||||
agent_enabled = True
|
||||
enabled = True
|
||||
html5proxy_base_url = {{ spice_proxy_address }}
|
||||
keymap = {{ console_keymap }}
|
||||
server_listen = 0.0.0.0
|
||||
server_proxyclient_address = {{ console_listen_addr }}
|
||||
{% endif -%}
|
||||
|
||||
[libvirt]
|
||||
{% if cpu_mode -%}
|
||||
cpu_mode = {{ cpu_mode }}
|
||||
{% endif -%}
|
||||
{% if cpu_model -%}
|
||||
cpu_model = {{ cpu_model }}
|
||||
{% endif -%}
|
||||
{% if cpu_model_extra_flags %}
|
||||
cpu_model_extra_flags = {{ cpu_model_extra_flags }}
|
||||
{% endif %}
|
||||
{% if libvirt_images_type -%}
|
||||
images_type = {{ libvirt_images_type }}
|
||||
{% endif -%}
|
||||
{% if libvirt_images_type and rbd_pool -%}
|
||||
images_rbd_pool = {{ rbd_pool }}
|
||||
images_rbd_ceph_conf = {{ libvirt_rbd_images_ceph_conf }}
|
||||
inject_password = false
|
||||
inject_key = false
|
||||
inject_partition = -2
|
||||
{% endif -%}
|
||||
rbd_user = {{ rbd_user }}
|
||||
rbd_secret_uuid = {{ rbd_secret_uuid }}
|
||||
{% if live_migration_uri -%}
|
||||
live_migration_uri = {{ live_migration_uri }}
|
||||
{% endif -%}
|
||||
{% if disk_cachemodes -%}
|
||||
disk_cachemodes = {{ disk_cachemodes }}
|
||||
{% endif %}
|
||||
# Disable tunnelled migration so that selective
|
||||
# live block migration can be supported.
|
||||
live_migration_tunnelled = False
|
||||
{% if use_multipath -%}
|
||||
volume_use_multipath = {{ use_multipath }}
|
||||
{% endif %}
|
||||
hw_disk_discard = unmap
|
||||
{% if virtio_net_tx_queue_size -%}
|
||||
tx_queue_size = {{ virtio_net_tx_queue_size }}
|
||||
{% endif %}
|
||||
{% if virtio_net_rx_queue_size -%}
|
||||
rx_queue_size = {{ virtio_net_rx_queue_size }}
|
||||
{% endif %}
|
||||
|
||||
{% if virt_type == 'lxd' -%}
|
||||
[lxd]
|
||||
{% if enable_live_migration -%}
|
||||
allow_live_migration = True
|
||||
{% endif -%}
|
||||
{% if storage_pool -%}
|
||||
pool = {{ storage_pool }}
|
||||
{% endif -%}
|
||||
{% endif -%}
|
||||
|
||||
{% include "parts/section-database" %}
|
||||
|
||||
{% include "section-rabbitmq-oslo" %}
|
||||
|
||||
[notifications]
|
||||
# Starting in the Pike release, the notification_format includes both the
|
||||
# versioned and unversioned message notifications. Ceilometer does not yet
|
||||
# consume the versioned message notifications, so intentionally make the
|
||||
# notification format unversioned until this is implemented.
|
||||
notification_format = unversioned
|
||||
|
||||
{% include "section-oslo-notifications" %}
|
||||
|
||||
{% include "parts/section-cinder" %}
|
||||
|
||||
[oslo_concurrency]
|
||||
lock_path=/var/lock/nova
|
||||
|
||||
[workarounds]
|
||||
disable_libvirt_livesnapshot = False
|
||||
|
||||
{% include "parts/section-ephemeral" %}
|
||||
|
||||
{% include "parts/section-serial-console" %}
|
||||
|
||||
{% include "parts/section-placement" %}
|
|
@ -396,6 +396,8 @@ class NovaComputeContextTests(CharmTestCase):
|
|||
self.test_config.set('reserved-host-memory', 1024)
|
||||
self.test_config.set('vcpu-pin-set', '^0^2')
|
||||
self.test_config.set('pci-passthrough-whitelist', 'mypcidevices')
|
||||
self.test_config.set('virtio-net-tx-queue-size', 512)
|
||||
self.test_config.set('virtio-net-rx-queue-size', 1024)
|
||||
libvirt = context.NovaComputeLibvirtContext()
|
||||
|
||||
self.assertEqual(
|
||||
|
@ -410,7 +412,9 @@ class NovaComputeContextTests(CharmTestCase):
|
|||
'reserved_host_memory': 1024,
|
||||
'vcpu_pin_set': '^0^2',
|
||||
'force_raw_images': True,
|
||||
'pci_passthrough_whitelist': 'mypcidevices'}, libvirt())
|
||||
'pci_passthrough_whitelist': 'mypcidevices',
|
||||
'virtio_net_tx_queue_size': 512,
|
||||
'virtio_net_rx_queue_size': 1024}, libvirt())
|
||||
|
||||
def test_ksm_configs(self):
|
||||
self.lsb_release.return_value = {'DISTRIB_CODENAME': 'lucid'}
|
||||
|
|
Loading…
Reference in New Issue