Initial nfv testcases

Includes tests for cpu pinning, numa topology and hugepages.

Change-Id: I2124bdf308b24f749eaea17b20a7e7fabbb2840a
This commit is contained in:
Przemyslaw Czesnowicz 2015-08-31 16:45:25 +01:00 committed by Waldemar Znoinski
parent 34832f591a
commit 0b3f5d9885
13 changed files with 685 additions and 50 deletions

View File

@ -109,3 +109,4 @@ Tips
To run a subset of tests::
$ python -m unittest tests.test_intel-nfv-ci-tests

View File

@ -4,6 +4,6 @@ History
-------
0.1.0 (2015-06-15)
---------------------
------------------
* First release on PyPI.

View File

@ -1,6 +1,6 @@
===============================
==================
intel-nfv-ci-tests
===============================
==================
.. image:: https://img.shields.io/travis/stackforge/intel-nfv-ci-tests.svg
:target: https://travis-ci.org/stackforge/intel-nfv-ci-tests

View File

@ -20,7 +20,7 @@ import sys
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
@ -36,7 +36,7 @@ import intel_nfv_ci_tests
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
@ -49,7 +49,7 @@ templates_path = ['_templates']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
@ -69,13 +69,13 @@ release = intel_nfv_ci_tests.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
@ -83,28 +83,28 @@ exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# keep_warnings = False
# -- Options for HTML output -------------------------------------------
@ -116,27 +116,27 @@ html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
@ -146,46 +146,46 @@ html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'intel-nfv-ci-testsdoc'
@ -195,13 +195,13 @@ htmlhelp_basename = 'intel-nfv-ci-testsdoc'
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
@ -215,23 +215,23 @@ latex_documents = [
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# latex_domain_indices = True
# -- Options for manual page output ------------------------------------
@ -245,7 +245,7 @@ man_pages = [
]
# If true, show URL addresses after external links.
#man_show_urls = False
# man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
@ -263,13 +263,13 @@ texinfo_documents = [
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# texinfo_no_detailmenu = False

View File

@ -4,7 +4,7 @@
contain the root `toctree` directive.
Welcome to intel-nfv-ci-tests's documentation!
======================================
==============================================
Contents:

View File

@ -10,3 +10,4 @@ Or, if you have virtualenvwrapper installed::
$ mkvirtualenv intel-nfv-ci-tests
$ pip install intel-nfv-ci-tests

View File

@ -1,7 +1,8 @@
========
=====
Usage
========
=====
To use intel_nfv_ci_tests in a project::
import intel_nfv_ci_tests

View File

@ -0,0 +1,267 @@
# Copyright 2015 Intel Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import libvirt
import multiprocessing
from tempest_lib.common.utils import data_utils
from tempest_lib import decorators
from tempest_lib import exceptions as lib_exc
import testtools
import xml.etree.ElementTree as ET
from tempest.api.compute import base
from tempest.common import waiters
from tempest import config
from tempest import test
CONF = config.CONF
def get_core_mappings():
"""Return core mapping for a dual-socket, HT-enabled board.
Generate mappings for CPU. Has following structure:
{numa_node_a: ([core_1_thread_a, core_2_thread_a, ...],
[core_1_thread_b, core_2_thread_b, ...]),
...}
The physical cores are assigned indexes first (0-based) and start
at node 0. The virtual cores are then listed.
>>> get_core_mappings(2)
{0: ([0, 1], [4, 5]), 1: ([2, 3], [6, 7])}
"""
# get number of real CPUs per socket, assuming a dual-socket,
# HT-enabled board (2 * 2)
cpu_per_soc = multiprocessing.cpu_count() / (2 * 2)
# calculate mappings
core_mappings = {
soc: (range(soc * cpu_per_soc, (soc + 1) * cpu_per_soc),
range((soc + 2) * cpu_per_soc, (soc + 3) * cpu_per_soc))
for soc in range(0, 2)
}
return core_mappings
class FlavorsAdminTestJSON(base.BaseV2ComputeAdminTest):
"""
Tests Flavors API Create and Delete that require admin privileges
"""
@classmethod
def skip_checks(cls):
super(FlavorsAdminTestJSON, cls).skip_checks()
if not test.is_extension_enabled('OS-FLV-EXT-DATA', 'compute'):
msg = "OS-FLV-EXT-DATA extension not enabled."
raise cls.skipException(msg)
@classmethod
def setup_clients(cls):
super(FlavorsAdminTestJSON, cls).setup_clients()
cls.flavors_client = cls.os_adm.flavors_client
cls.servers_client = cls.os_adm.servers_client
@classmethod
def resource_setup(cls):
super(FlavorsAdminTestJSON, cls).resource_setup()
cls.flavor_name_prefix = 'test_hw_'
cls.ram = 512
cls.vcpus = 4
cls.disk = 0
cls.ephemeral = 0
cls.swap = 0
cls.rxtx_factor = 2
def flavor_clean_up(self, flavor_id):
self.flavors_client.delete_flavor(flavor_id)
self.flavors_client.wait_for_resource_deletion(flavor_id)
def server_clean_up(self, server_id):
self.servers_client.delete_server(server_id)
waiters.wait_for_server_termination(self.servers_client, server_id)
def _create_flavor(self, cpu_policy='shared',
cpu_threads_policy=None):
flavor_name = data_utils.rand_name(self.flavor_name_prefix)
flavor_id = data_utils.rand_int_id(start=1000)
flavor = self.flavors_client.create_flavor(
name=flavor_name, ram=self.ram, vcpus=self.vcpus,
disk=self.disk, id=flavor_id,
swap=self.swap,
rxtx_factor=self.rxtx_factor)['flavor']
self.addCleanup(self.flavor_clean_up, flavor['id'])
specs = {'hw:cpu_policy': cpu_policy}
if cpu_policy == 'dedicated':
specs['hw:cpu_threads_policy'] = cpu_threads_policy
self.flavors_client.set_flavor_extra_spec(flavor['id'], **specs)
return flavor
def _create_server(self, flavor):
server = self.create_test_server(
flavor=flavor['id'], wait_until='ACTIVE')
self.addCleanup(self.server_clean_up, server['id'])
# get more information
server = self.servers_client.show_server(server['id'])['server']
return server
def _resize_server(self, server, flavor):
self.servers_client.resize(server['id'], flavor['id'])
# get more information
server = self.servers_client.show_server(server['id'])['server']
return server
def _reboot_server(self, server, reboot_type):
self.servers_client.reboot_server(server['id'], reboot_type)
# get more information
server = self.servers_client.show_server(server['id'])['server']
return server
def _get_cpu_pinning(self, server):
instance_name = server['OS-EXT-SRV-ATTR:instance_name']
conn = libvirt.openReadOnly('qemu:///system')
dom0 = conn.lookupByName(instance_name)
root = ET.fromstring(dom0.XMLDesc())
vcpupin_nodes = root.findall('./cputune/vcpupin')
cpu_pinnings = {int(x.get('vcpu')): int(x.get('cpuset'))
for x in vcpupin_nodes if x is not None}
return cpu_pinnings
def test_cpu_shared(self):
flavor = self._create_flavor(cpu_policy='shared')
self._create_server(flavor)
@decorators.skip_because(bug='0')
def test_cpu_dedicated_threads_separate(self):
"""Ensure vCPUs *are not* placed on thread siblings."""
flavor = self._create_flavor(
cpu_policy='dedicated', cpu_threads_policy='separate')
server = self._create_server(flavor)
cpu_pinnings = self._get_cpu_pinning(server)
core_mappings = get_core_mappings()
self.assertEqual(len(cpu_pinnings), self.vcpus)
# if the 'prefer' policy is used, then when one thread is used
# the other should never be used.
for vcore in set(cpu_pinnings):
pcpu = cpu_pinnings[vcore]
if pcpu in core_mappings[0][0]:
index = core_mappings[0][0].index(pcpu)
self.assertNotIn(core_mappings[0][1][index],
cpu_pinnings.values())
else:
index = core_mappings[0][1].index(pcpu)
self.assertNotIn(core_mappings[0][0][index],
cpu_pinnings.values())
def test_cpu_dedicated_threads_prefer(self):
"""Ensure vCPUs *are* placed on thread siblings."""
flavor = self._create_flavor(
cpu_policy='dedicated', cpu_threads_policy='prefer')
server = self._create_server(flavor)
cpu_pinnings = self._get_cpu_pinning(server)
core_mappings = get_core_mappings()
self.assertEqual(len(cpu_pinnings), self.vcpus)
# if the 'prefer' policy is used, then when one thread is used
# the other should also be used.
for vcore in set(cpu_pinnings):
pcpu = cpu_pinnings[vcore]
if pcpu in core_mappings[0][0]:
index = core_mappings[0][0].index(pcpu)
self.assertIn(core_mappings[0][1][index],
cpu_pinnings.values())
else:
index = core_mappings[0][1].index(pcpu)
self.assertIn(core_mappings[0][0][index],
cpu_pinnings.values())
@decorators.skip_because(bug='0')
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
def test_resize_pinned_server_to_unpinned(self):
flavor_a = self._create_flavor(
cpu_policy='dedicated', cpu_threads_policy='prefer')
server = self._create_server(flavor_a)
cpu_pinnings = self._get_cpu_pinning(server)
self.assertEqual(len(cpu_pinnings), self.vcpus)
flavor_b = self._create_flavor(cpu_policy='shared')
server = self._resize_server(server, flavor_b)
cpu_pinnings = self._get_cpu_pinning(server)
self.assertEqual(len(cpu_pinnings), 0)
@decorators.skip_because(bug='0')
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
def test_resize_unpinned_server_to_pinned(self):
flavor_a = self._create_flavor(cpu_policy='shared')
server = self._create_server(flavor_a)
cpu_pinnings = self._get_cpu_pinning(server)
self.assertEqual(len(cpu_pinnings), 0)
flavor_b = self._create_flavor(
cpu_policy='dedicated', cpu_threads_policy='prefer')
server = self._resize_server(server, flavor_b)
cpu_pinnings = self._get_cpu_pinning(server)
self.assertEqual(len(cpu_pinnings), self.vcpus)
def test_reboot_pinned_server(self):
flavor_a = self._create_flavor(
cpu_policy='dedicated', cpu_threads_policy='prefer')
server = self._create_server(flavor_a)
cpu_pinnings = self._get_cpu_pinning(server)
self.assertEqual(len(cpu_pinnings), self.vcpus)
server = self._reboot_server(server, 'HARD')
cpu_pinnings = self._get_cpu_pinning(server)
self.assertEqual(len(cpu_pinnings), self.vcpus)
def test_oversubscribed_server(self):
flavor = self._create_flavor(
cpu_policy='dedicated', cpu_threads_policy='prefer')
# TODO(sfinucan) - this relies on the fact that the CPU quota
# is 20 which isn't truly representative. Find out how to
# change the quotas programatically.
for _ in xrange(0, 5):
self._create_server(flavor)
self.assertRaises(lib_exc.Forbidden, self._create_server, flavor)

View File

@ -0,0 +1,122 @@
import subprocess
from tempest import clients
from tempest.common import cred_provider
from tempest.common import waiters
from tempest.scenario import manager
from tempest_lib.common.utils import data_utils
# Using 2M hugepages
HUGEPAGE_SIZE = 2048
def command(args, args2=None):
'''
Command: returns the output of the given command(s)
Input: up to 2 commands
Output: String representing the output of these commands
Note: Using shell=False means that the following are unsupported:
- Using pipes: Separate your commands
i.e. "cat /dev/null | grep anything" ->
["cat", "/dev/null"], ["grep", "anything"]
- Using wildcards: use glob to expand wildcards in dir listings
i.e. ["cat", "/proc/*info"] ->
["cat"]+glob.glob("/proc/*info")
- String in commands: split manually
e.g. awk {'print $2'} -> str.split()[1]
'''
if args2:
process1 = subprocess.Popen(args, stdout=subprocess.PIPE,
shell=False)
process2 = subprocess.Popen(args2, stdin=process1.stdout,
stdout=subprocess.PIPE, shell=False)
# Allow process_curl to receive a SIGPIPE if process_wc exits.
process1.stdout.close()
return process2.communicate()[0]
else:
return subprocess.Popen(args,
stdout=subprocess.PIPE,
shell=False).communicate()[0]
def _get_number_free_hugepages(pagesize=HUGEPAGE_SIZE):
# original command:
# "cat /sys/kernel/mm/hugepages/hugepages-${size}kB/"
return command(["cat",
"/sys/kernel/mm/hugepages/hugepages-{}kB/free_hugepages"
.format(pagesize)])
class TestHugepages(manager.ScenarioTest):
run_ssh = True
disk_config = 'AUTO'
@classmethod
def setup_credentials(cls):
super(TestHugepages, cls).setup_credentials()
cls.manager = clients.Manager(
credentials=cred_provider.get_configured_credentials(
'identity_admin', fill_in=False))
def setUp(self):
super(TestHugepages, self).setUp()
self.meta = {'hello': 'world'}
self.accessIPv4 = '1.1.1.1'
self.name = data_utils.rand_name('server')
self.client = self.servers_client
cli_resp = self.create_server(
name=self.name,
flavor=self.create_flavor_with_extra_specs(),
)
self.server_initial = cli_resp
waiters.wait_for_server_status(self.client, self.server_initial['id'],
'ACTIVE')
self.server = self.client.show_server(self.server_initial['id'])
def create_flavor_with_extra_specs(self, name='hugepages_flavor', count=1):
flavor_with_hugepages_name = data_utils.rand_name(name)
flavor_with_hugepages_id = data_utils.rand_int_id(start=1000)
ram = 64
vcpus = 1
disk = 0
# set numa pagesize
extra_specs = {"hw:mem_page_size": str(HUGEPAGE_SIZE)}
# Create a flavor with extra specs
resp = (self.flavors_client.
create_flavor(name=flavor_with_hugepages_name,
ram=ram, vcpus=vcpus, disk=disk,
id=flavor_with_hugepages_id))
self.flavors_client.set_flavor_extra_spec(flavor_with_hugepages_id,
**extra_specs)
self.addCleanup(self.flavor_clean_up, flavor_with_hugepages_id)
self.assertEqual(200, resp.response.status)
return flavor_with_hugepages_id
def flavor_clean_up(self, flavor_id):
resp = self.flavors_client.delete_flavor(flavor_id)
self.assertEqual(resp.response.status, 202)
self.flavors_client.wait_for_resource_deletion(flavor_id)
def test_hugepage_backed_instance(self):
# Check system hugepages
hugepages_init = int(_get_number_free_hugepages())
# Calc expected hugepages
# flavor memory/hugepage_size, rounded up
# create instance with hugepages flavor
flavor_id = self.create_flavor_with_extra_specs("hugepages_flavor")
self.create_server(wait_on_boot=True, flavor=flavor_id)
required_hugepages = 64 / (HUGEPAGE_SIZE / 1024.) # ram/hugepages_size
expected_hugepages = int(hugepages_init - required_hugepages)
actual_hugepages = int(_get_number_free_hugepages(HUGEPAGE_SIZE))
self.assertEqual(required_hugepages, 32)
self.assertEqual(expected_hugepages, actual_hugepages)

View File

@ -0,0 +1,226 @@
# Copyright 2015 Intel Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import multiprocessing
from oslo_concurrency import processutils
from oslo_log import log as logging
from tempest import config
from tempest.scenario import manager
from tempest.scenario import utils as test_utils
from tempest import test
from tempest_lib.common.utils import data_utils
CONF = config.CONF
LOG = logging.getLogger(__name__)
load_tests = test_utils.load_tests_input_scenario_utils
class TestServerNumaBase(manager.NetworkScenarioTest):
credentials = ['admin']
@classmethod
def setup_clients(cls):
cls.manager = cls.admin_manager
super(manager.NetworkScenarioTest, cls).setup_clients()
# Use admin client by default
def setUp(self):
super(TestServerNumaBase, self).setUp()
# Setup image and flavor the test instance
# Support both configured and injected values
if not hasattr(self, 'image_ref'):
self.image_ref = CONF.compute.image_ref
if not hasattr(self, 'flavor_ref'):
self.flavor_ref = CONF.compute.flavor_ref
self.image_utils = test_utils.ImageUtils(self.manager)
if not self.image_utils.is_flavor_enough(self.flavor_ref,
self.image_ref):
raise self.skipException(
'{image} does not fit in {flavor}'.format(
image=self.image_ref, flavor=self.flavor_ref
)
)
self.run_ssh = True
self.keypair = self.create_keypair()
self.ssh_user = self.image_utils.ssh_user(self.image_ref)
LOG.debug('Starting test for i:{image}, f:{flavor}. '
'Run ssh: {ssh}, user: {ssh_user}'.format(
image=self.image_ref, flavor=self.flavor_ref,
ssh=self.run_ssh, ssh_user=self.ssh_user))
def create_flavor_with_numa(self,
numa_nodes=2,
ram=2048,
vcpus=4,
disk=0,
specs=dict()):
flavor_with_numa = data_utils.rand_name('numa_flavor')
flavor_with_numa_id = data_utils.rand_int_id(start=1000)
extra_specs = specs
extra_specs["hw:numa_nodes"] = str(numa_nodes)
# Create a flavor with extra specs
resp = (self.flavors_client.create_flavor(name=flavor_with_numa,
ram=ram, vcpus=vcpus,
disk=disk,
id=flavor_with_numa_id))
self.flavors_client.set_flavor_extra_spec(flavor_with_numa_id,
**extra_specs)
self.addCleanup(self.flavor_clean_up, flavor_with_numa_id)
self.assertEqual(200, resp.response.status)
return flavor_with_numa_id
def flavor_clean_up(self, flavor_id):
resp = self.flavors_client.delete_flavor(flavor_id)
self.assertEqual(resp.response.status, 202)
self.flavors_client.wait_for_resource_deletion(flavor_id)
def boot_instance(self, flavor=None):
# Create server with image and flavor from input scenario
security_groups = [{'name': self.security_group['name']}]
create_kwargs = {
'key_name': self.keypair['name'],
'security_groups': security_groups
}
if flavor is None:
flavor = self.create_flavor_with_numa()
self.instance = self.create_server(
image=self.image_ref,
flavor=flavor,
create_kwargs=create_kwargs)
def verify_ssh(self):
# Obtain a floating IP
floating_ip = self.floating_ips_client.create_floating_ip()[
'floating_ip']
self.addCleanup(self.delete_wrapper,
self.floating_ips_client.delete_floating_ip,
floating_ip['id'])
# Attach a floating IP
self.floating_ips_client.associate_floating_ip_to_server(
floating_ip['ip'], self.instance['id'])
# Check ssh
return self.get_remote_client(
server_or_ip=floating_ip['ip'],
username='cirros',
private_key=self.keypair['private_key'])
def get_placement(self, vcpu):
out, _ = processutils.execute('pgrep --full %s' % self.instance['id'],
shell=True)
if not out:
return
cgroup, _ = processutils.execute('grep name /proc/%s/cgroup'
% out.strip(), shell=True)
cgroup = cgroup.split(":")[-1].strip()
placement = []
for i in range(vcpu):
cpus, _ = processutils.execute('cgget -n -v -r cpuset.cpus %s'
% (cgroup.replace('\\', '\\\\') +
'/vcpu' + str(i)), shell=True)
placement.append(cpus.strip())
return placement
def get_numa_nodes(self):
cpu_per_soc = self._get_cores()
ret = [str(x * cpu_per_soc) + '-' + str(x * cpu_per_soc + cpu_per_soc -
1) + ',' + str(
x * cpu_per_soc + 2 * cpu_per_soc) + '-' +
str(x * cpu_per_soc + 3 * cpu_per_soc - 1) for x in range(2)]
return ret
def _get_cores(self):
# get number of real CPUs per socket, assuming a dual-socket,
# HT-enabled board (2 * 2)
cores = multiprocessing.cpu_count() / (2 * 2)
return cores
class TestServerNumaTopo(TestServerNumaBase):
"""
This smoke test case follows this basic set of operations:
* Create a keypair for use in launching an instance
* Create a security group to control network access in instance
* Add simple permissive rules to the security group
* Launch an instance with numa topology defined
* Perform ssh to instance
* Get numa topology from VM, check correctness
* Get numa placement info for VM from HOST
* Check if placement is correct
* Terminate the instance
"""
def get_numa_topology(self, rmt):
topo = {'nodes': []}
nodes = int(rmt.exec_command("ls /sys/devices/system/node"
" | grep node | wc -l"))
for i in range(nodes):
node = {}
node['cpu'] = rmt.exec_command("cat /sys/devices/system/node/"
"node%s/cpulist" % i)
node['mem'] = rmt.exec_command("cat /sys/devices/system/node/"
"node%s/meminfo" % i)
topo["nodes"].append(node)
return topo
@test.services('compute', 'network')
def test_server_numa(self):
self.security_group = self._create_security_group(
tenant_id=self.tenant_id)
self.boot_instance()
rmt_client = self.verify_ssh()
topo = self.get_numa_topology(rmt_client)
self.assertEqual(2, len(topo['nodes']))
self.assertNotEqual(None, rmt_client)
placement = self.get_placement(4)
self.assertEqual(placement[0], placement[1])
self.assertNotEqual(placement[1], placement[2])
self.assertEqual(placement[2], placement[3])
self.servers_client.delete_server(self.instance['id'])
class TestServerNumaPCI(TestServerNumaBase):
"""
Tests in this class check if pci device
assigned to the VM is affinitized to the same
numa node as the VM itself.
"""
def get_pci_numa_node(self, addr):
out, _ = processutils.execute('cat /sys/bus/pci/devices/%s/numa_node'
% addr, shell=True)
return out.strip()
@test.services('compute', 'network')
def test_server_numa_pci(self):
self.security_group = self._create_security_group(
tenant_id=self.tenant_id)
self.boot_instance(self.create_flavor_with_numa(1, 1024, 2, 0,
{"pci_passthrough:alias":
"niantic_vf:1"}))
placement = self.get_placement(1)
host_nodes = self.get_numa_nodes()
guest_numa = [host_nodes.index(x) for x in placement]
pci_node = 1
self.assertIn(int(pci_node), guest_numa)
self.servers_client.delete_server(self.instance['id'])

View File

@ -1 +1,6 @@
wheel==0.23.0
-e git+https://github.com/openstack/neutron.git@master#egg=neutron
tempest-lib>=0.7.0
oslo.concurrency>=2.3.0 # Apache-2.0
oslo.log>=1.6.0 # Apache-2.0

12
tox.ini
View File

@ -39,3 +39,15 @@ ignore = E123,E125,E126,E128,E129,E265,H301,H305,H307,H402,H404,H405,H904,H803
show-source = True
builtins = _
exclude=.venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build
[testenv:full]
sitepackages = False
setenv = VIRTUAL_ENV={envdir}
OS_TEST_PATH=./intel_nfv_ci_tests/integration
deps = setuptools
-r{toxinidir}/requirements.txt
commands =
find . -type f -name "*.pyc" -delete
bash tools/pretty_tox.sh '(^intel_nfv_ci_tests/integration) {posargs}'