Merge "Test plan v0.1"

This commit is contained in:
Jenkins 2016-09-23 11:37:54 +00:00 committed by Gerrit Code Review
commit 00f57db10d
10 changed files with 2206 additions and 0 deletions

2
doc/requirements.txt Normal file
View File

@ -0,0 +1,2 @@
docutils==0.9.1
sphinx>=1.1.2,!=1.2.0,<1.3

177
doc/test/Makefile Normal file
View File

@ -0,0 +1,177 @@
# Makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = _build
# User-friendly check for sphinx-build
ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
endif
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
# the i18n builder cannot share the environment and doctrees with the others
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
help:
@echo "Please use \`make <target>' where <target> is one of"
@echo " html to make standalone HTML files"
@echo " dirhtml to make HTML files named index.html in directories"
@echo " singlehtml to make a single large HTML file"
@echo " pickle to make pickle files"
@echo " json to make JSON files"
@echo " htmlhelp to make HTML files and a HTML help project"
@echo " qthelp to make HTML files and a qthelp project"
@echo " devhelp to make HTML files and a Devhelp project"
@echo " epub to make an epub"
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
@echo " latexpdf to make LaTeX files and run them through pdflatex"
@echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
@echo " text to make text files"
@echo " man to make manual pages"
@echo " texinfo to make Texinfo files"
@echo " info to make Texinfo files and run them through makeinfo"
@echo " gettext to make PO message catalogs"
@echo " changes to make an overview of all changed/added/deprecated items"
@echo " xml to make Docutils-native XML files"
@echo " pseudoxml to make pseudoxml-XML files for display purposes"
@echo " linkcheck to check all external links for integrity"
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
clean:
rm -rf $(BUILDDIR)/*
html:
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
dirhtml:
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
singlehtml:
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
@echo
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
pickle:
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
@echo
@echo "Build finished; now you can process the pickle files."
json:
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
@echo
@echo "Build finished; now you can process the JSON files."
htmlhelp:
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
@echo
@echo "Build finished; now you can run HTML Help Workshop with the" \
".hhp project file in $(BUILDDIR)/htmlhelp."
qthelp:
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
@echo
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/FuelNSXplugin.qhcp"
@echo "To view the help file:"
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/FuelNSXplugin.qhc"
devhelp:
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
@echo
@echo "Build finished."
@echo "To view the help file:"
@echo "# mkdir -p $$HOME/.local/share/devhelp/FuelNSXplugin"
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/FuelNSXplugin"
@echo "# devhelp"
epub:
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
@echo
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
latex:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
@echo "Run \`make' in that directory to run these through (pdf)latex" \
"(use \`make latexpdf' here to do that automatically)."
latexpdf:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through pdflatex..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
latexpdfja:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through platex and dvipdfmx..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
text:
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
@echo
@echo "Build finished. The text files are in $(BUILDDIR)/text."
man:
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
@echo
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
texinfo:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
@echo "Run \`make' in that directory to run these through makeinfo" \
"(use \`make info' here to do that automatically)."
info:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo "Running Texinfo files through makeinfo..."
make -C $(BUILDDIR)/texinfo info
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
gettext:
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
@echo
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
changes:
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
@echo
@echo "The overview file is in $(BUILDDIR)/changes."
linkcheck:
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
@echo
@echo "Link check complete; look for any errors in the above output " \
"or in $(BUILDDIR)/linkcheck/output.txt."
doctest:
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
@echo "Testing of doctests in the sources finished, look at the " \
"results in $(BUILDDIR)/doctest/output.txt."
xml:
$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
@echo
@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
pseudoxml:
$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
@echo
@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."

254
doc/test/conf.py Normal file
View File

@ -0,0 +1,254 @@
"""Copyright 2016 Mirantis, Inc.
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
"""
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [ ]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Fuel NSX-T plugin'
copyright = u'2016, Mirantis Inc.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0.0'
# The full version, including alpha/beta/rc tags.
release = '1.0-1.0.0-1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
#exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'FuelNSXplugindoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = { 'classoptions': ',openany,oneside',
'babel': '\\usepackage[english]{babel}',
'preamble': '\setcounter{tocdepth}{3} '
'\setcounter{secnumdepth}{0}'}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'nsx-test-plan-' + version + '.tex', u'Fuel NSX-T plugin testing documentation',
u'Mirantis Inc.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'fuelnsxplugin', u'Fuel NSX-T plugin testing documentation',
[u'Mirantis Inc.'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'FuelNSXplugin', u'Fuel NSX-T plugin testing documentation',
u'Mirantis Inc.', 'FuelNSXplugin', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Insert footnotes where they are defined instead of at the end.
pdf_inline_footnotes = True

11
doc/test/index.rst Normal file
View File

@ -0,0 +1,11 @@
Fuel NSX-T plugin's testing documentation
========================================
Testing documents
-----------------
.. toctree::
:glob:
:maxdepth: 3
source/nsx-t_test_plan

View File

@ -0,0 +1,256 @@
=================================
Test Plan for NSX-T plugin v1.0.0
=================================
************
Introduction
************
Purpose
=======
Main purpose of this document is intended to describe Quality Assurance
activities, required to insure that Fuel plugin for VMware NSX driver is
ready for production. The project will be able to offer VMware NSX
integration functionality with MOS. The scope of this plan defines the
following objectives:
* Identify testing activities;
* Outline testing approach, test types, test cycle that will be used;
* List of metrics and deliverable elements;
* List of items for testing and out of testing scope;
* Detect exit criteria in testing purposes;
* Describe test environment.
Scope
=====
Fuel NSX-T plugin includes NSX-T plugin for Neutron which is developed by
third party. This test plan covers a full functionality of Fuel NSX-T plugin,
include basic scenarios related with NSX Neutron plugin.
Following test types should be provided:
* Smoke/BVT tests
* Integration tests
* System tests
* Destructive tests
* GUI tests
Performance testing will be executed on the scale lab and a custom set of
rally scenarios must be run with NSX environment. Configuration, environment
and scenarios for performance/scale testing should be determine separately.
Intended Audience
=================
This document is intended for project team staff (QA and Dev engineers and
managers) and all other persons who are interested in testing results.
Limitation
==========
Plugin (or its components) has the following limitations:
* VMware NSX-T plugin can be enabled only with Neutron tunnel segmentation.
* NSX Transformers Manager 1.0.0 and 1.0.1 are supported.
Product compatibility matrix
============================
.. list-table:: product compatibility matrix
:widths: 15 10 30
:header-rows: 1
* - Requirement
- Version
- Comment
* - MOS
- 9.0
-
* - OpenStack release
- Mitaka with Ubuntu 14.04
-
* - vSphere
- 6.0
-
* - VMware NSX Transformers
- 1.0.0, 1.0.1
-
**************************************
Evaluation Mission and Test Motivation
**************************************
Project main goal is to build a MOS plugin that integrates a Neutron VMware
NSX-T plugin. This plugin gives opportunity to utilize KVM and VMware compute
cluster. The plugin must be compatible with the version 9.0 of Mirantis
OpenStack and should be tested with software/hardware described in
`product compatibility matrix`_.
See the VMware NSX-T plugin specification for more details.
Evaluation mission
==================
* Find important problems with integration of Neutron VMware NSX-T plugin.
* Verify a specification.
* Provide tests for maintenance update.
* Lab environment deployment.
* Deploy MOS with developed plugin installed.
* Create and run specific tests for plugin/deployment.
* Documentation.
*****************
Target Test Items
*****************
* Install/uninstall Fuel NSX-T plugin
* Deploy Cluster with Fuel NSX-T plugin by Fuel
* Roles of nodes
* controller
* mongo
* compute
* compute-vmware
* cinder-vmware
* Hypervisors:
* Qemu+Vcenter
* KVM
* Storage:
* Ceph
* Cinder
* VMWare vCenter/ESXi datastore for images
* Network
* Neutron with NSX-T plugin
* Additional components
* Ceilometer
* Health Check
* Upgrade master node
* MOS and VMware-NSX-T plugin
* Computes(Nova)
* Launch and manage instances
* Launch instances in batch
* Networks (Neutron)
* Create and manage public and private networks.
* Create and manage routers.
* Port binding / disabling
* Security groups
* Assign vNIC to a VM
* Connection between instances
* Horizon
* Create and manage projects
* Glance
* Create and manage images
* GUI
* Fuel UI
* CLI
* Fuel CLI
*************
Test approach
*************
The project test approach consists of Smoke, Integration, System, Regression
Failover and Acceptance test levels.
**Smoke testing**
The goal of smoke testing is to ensure that the most critical features of Fuel
VMware NSX-T plugin work after new build delivery. Smoke tests will be used by
QA to accept software builds from Development team.
**Integration and System testing**
The goal of integration and system testing is to ensure that new or modified
components of Fuel and MOS work effectively with Fuel VMware NSX-T plugin
without gaps in data flow.
**Regression testing**
The goal of regression testing is to verify that key features of Fuel VMware
NSX-T plugin are not affected by any changes performed during preparation to
release (includes defects fixing, new features introduction and possible
updates).
**Failover testing**
Failover and recovery testing ensures that the target-of-test can successfully
failover and recover from a variety of hardware, software, or network
malfunctions with undue loss of data or data integrity.
**Acceptance testing**
The goal of acceptance testing is to ensure that Fuel VMware NSX-T plugin has
reached a level of stability that meets requirements and acceptance criteria.
***********************
Entry and exit criteria
***********************
Criteria for test process starting
==================================
Before test process can be started it is needed to make some preparation
actions - to execute important preconditions. The following steps must be
executed successfully for starting test phase:
* all project requirements are reviewed and confirmed;
* implementation of testing features has finished (a new build is ready for testing);
* implementation code is stored in GIT;
* test environment is prepared with correct configuration, installed all needed software, hardware;
* test environment contains the last delivered build for testing;
* test plan is ready and confirmed internally;
* implementation of manual tests and autotests (if any) has finished.
Feature exit criteria
=====================
Testing of a feature can be finished when:
* All planned tests (prepared before) for the feature are executed; no defects are found during this run;
* All planned tests for the feature are executed; defects found during this run are verified or confirmed to be acceptable (known issues);
* The time for testing of that feature according to the project plan has run out and Project Manager confirms that no changes to the schedule are possible.
Suspension and resumption criteria
==================================
Testing of a particular feature is suspended if there is a blocking issue
which prevents tests execution. Blocking issue can be one of the following:
* Testing environment for the feature is not ready
* Testing environment is unavailable due to failure
* Feature has a blocking defect, which prevents further usage of this feature and there is no workaround available
* CI tests fail
************
Deliverables
************
List of deliverables
====================
Project testing activities are to be resulted in the following reporting documents:
* Test plan
* Test report
* Automated test cases
Acceptance criteria
===================
* All acceptance criteria for user stories are met.
* All test cases are executed. BVT tests are passed
* Critical and high issues are fixed
* All required documents are delivered
* Release notes including a report on the known errors of that release
**********
Test cases
**********
.. include:: test_suite_smoke.rst
.. include:: test_suite_integration.rst
.. include:: test_suite_scale.rst
.. include:: test_suite_system.rst
.. include:: test_suite_failover.rst

View File

@ -0,0 +1,129 @@
Failover
========
Verify deleting of Fuel NSX-T plugin is impossible if it's used by created cluster.
-----------------------------------------------------------------------------------
ID
##
nsxt_uninstall_negative
Description
###########
It is impossible to remove plugin while at least one environment exists.
Complexity
##########
smoke
Steps
#####
1. Install NSX-T plugin on master node.
2. Create a new environment with enabled NSX-T plugin.
3. Try to delete plugin via cli from master node::
fuel plugins --remove nsxt==1.0.0
Expected result
###############
Alert: "400 Client Error: Bad Request (Can't delete plugin which is enabled for some environment.)" should be displayed.
Check plugin functionality after shutdown primary controller.
-------------------------------------------------------------
ID
##
nsxt_shutdown_controller
Description
###########
Check plugin functionality after shutdown primary controller.
Complexity
##########
core
Steps
#####
1. Log in to the Fuel with preinstalled plugin and deployed enviroment with 3 controllers.
2. Log in to Horizon.
3. Create vcenter VM and check connectivity to outside world from VM.
4. Shutdown primary controller.
5. Ensure that VIPs are moved to other controller.
6. Ensure taht there is a connectivity to outside world from created VM.
7. Create a new network and attach it to default router.
8. Create a vcenter VM with new network and check network connectivity via ICMP.
Expected result
###############
Networking works correct after failure of primary controller.
Check cluster functionality after reboot vcenter.
-------------------------------------------------
ID
##
nsxt_reboot_vcenter
Description
###########
Test verifies that system functionality is ok when vcenter has been rebooted.
Complexity
##########
core
Steps
#####
1. Log in to the Fuel with preinstalled plugin and deployed enviroment.
2. Log in to Horizon.
3. Launch vcenter instance VM_1 with image TestVM-VMDK and flavor m1.tiny.
4. Launch vcenter instance VM_2 with image TestVM-VMDK and flavor m1.tiny.
5. Check connection between VMs, send ping from VM_1 to VM_2 and vice verse.
6. Reboot vcenter::
vmrun -T ws-shared -h https://localhost:443/sdk -u vmware -p pass
reset "[standard] vcenter/vcenter.vmx"
7. Check that controller lost connection with vCenter.
8. Wait for vCenter is online.
9. Ensure that all instances from vCenter are displayed in dashboard.
10. Ensure there is connectivity between vcenter1's and vcenter2's VMs.
11. Run OSTF.
Expected result
###############
Cluster should be deployed and all OSTF test cases should be passed. Ping should get response.

View File

@ -0,0 +1,54 @@
Integration
===========
Deploy cluster with NSX-T plugin and ceilometer.
------------------------------------------------
ID
##
nsxt_ceilometer
Description
###########
Check deployment of environment with Fuel NSX-T plugin and Ceilometer.
Complexity
##########
core
Steps
#####
1. Log in to the Fuel UI with preinstalled NSX-T plugin.
2. Create a new environment with following parameters:
* Compute: KVM/QEMU with vCenter
* Networking: Neutron with NSX-T plugin
* Storage: default
* Additional services: Ceilometer
3. Add nodes with following roles:
* Controller + Mongo
* Controller + Mongo
* Controller + Mongo
* Compute-vmware
* Compute
4. Configure interfaces on nodes.
5. Configure network settings.
6. Enable and configure NSX-T plugin.
7. Configure VMware vCenter Settings. Add 2 vSphere clusters and configure Nova Compute instances on conrollers and compute-vmware.
8. Verify networks.
9. Deploy cluster.
10. Run OSTF.
Expected result
###############
Cluster should be deployed and all OSTF tests cases should be passed.

View File

@ -0,0 +1,180 @@
Scale
=====
Check scale actions for controller nodes.
-----------------------------------------
ID
##
nsxt_add_delete_controller
Description
###########
Verifies that system functionality is ok when controller has been removed.
Complexity
##########
core
Steps
#####
1. Log in to the Fuel with preinstalled NSX-T plugin.
2. Create a new environment with following parameters:
* Compute: KVM/QEMU with vCenter
* Networking: Neutron with NSX-T plugin
* Storage: default
3. Add nodes with following roles:
* Controller
* Controller
* Controller
* Controller
* Cinder-vmware
* Compute-vmware
4. Configure interfaces on nodes.
5. Configure network settings.
6. Enable and configure NSX-T plugin.
7. Configure VMware vCenter Settings. Add 2 vSphere clusters and configure Nova Compute instances on conrollers and compute-vmware.
8. Deploy cluster.
9. Run OSTF.
10. Launch 1 KVM and 1 vcenter VMs.
11. Remove node with controller role.
12. Redeploy cluster.
13. Check that all instances are in place.
14. Run OSTF.
15. Add controller.
16. Redeploy cluster.
17. Check that all instances are in place.
18. Run OSTF.
Expected result
###############
Cluster should be deployed and all OSTF test cases should be passed.
Check scale actions for compute nodes.
--------------------------------------
ID
##
nsxt_add_delete_compute_node
Description
###########
Verify that system functionality is ok after redeploy.
Complexity
##########
core
Steps
#####
1. Connect to the Fuel web UI with preinstalled NSX-T plugin.
2. Create a new environment with following parameters:
* Compute: KVM/QEMU
* Networking: Neutron with NSX-T plugin
* Storage: default
* Additional services: default
3. Add nodes with following roles:
* Controller
* Controller
* Controller
* Compute
4. Configure interfaces on nodes.
5. Configure network settings.
6. Enable and configure NSX-T plugin.
7. Deploy cluster.
8. Run OSTF.
9. Launch KVM vm.
10. Add node with compute role.
11. Redeploy cluster.
12. Check that all instances are in place.
13. Run OSTF.
14. Remove node with compute role from base installation.
15. Redeploy cluster.
16. Check that all instances are in place.
17. Run OSTF.
Expected result
###############
Changing of cluster configuration was successful. Cluster should be deployed and all OSTF test cases should be passed.
Check scale actions for compute-vmware nodes.
---------------------------------------------
ID
##
nsxt_add_delete_compute_vmware_node
Description
###########
Verify that system functionality is ok after redeploy.
Complexity
##########
core
Steps
#####
1. Connect to the Fuel web UI with preinstalled NSX-T plugin.
2. Create a new environment with following parameters:
* Compute: KVM/QEMU with vCenter
* Networking: Neutron with NSX-T plugin
* Storage: default
* Additional services: default
3. Add nodes with following roles:
* Controller
* Controller
* Controller
* Compute-vmware
4. Configure interfaces on nodes.
5. Configure network settings.
6. Enable and configure NSX-T plugin.
7. Configure VMware vCenter Settings. Add 1 vSphere cluster and configure Nova Compute instance on compute-vmware.
8. Deploy cluster.
9. Run OSTF.
10. Launch vcenter vm.
11. Remove node with compute-vmware role.
12. Reconfigure vcenter compute clusters.
13. Redeploy cluster.
14. Check vm instance has been removed.
15. Run OSTF.
16. Add node with compute-vmware role.
17. Reconfigure vcenter compute clusters.
18. Redeploy cluster.
19. Run OSTF.
Expected result
###############
Changing of cluster configuration was successful. Cluster should be deployed and all OSTF test cases should be passed.

View File

@ -0,0 +1,363 @@
Smoke
=====
Install Fuel VMware NSX-T plugin.
---------------------------------
ID
##
nsxt_install
Description
###########
Check that plugin can be installed.
Complexity
##########
smoke
Steps
#####
1. Connect to the Fuel master node via ssh.
2. Upload NSX-T plugin.
3. Install NSX-T plugin.
4. Run command 'fuel plugins'.
5. Check name, version and package version of plugin.
Expected result
###############
Output::
[root@nailgun ~]# fuel plugins --install nsx-t-1.0-1.0.0-1.noarch.rpm
Loaded plugins: fastestmirror, priorities
Examining nsx-t-1.0-1.0.0-1.noarch.rpm: nsx-t-1.0-1.0.0-1.noarch
Marking nsx-t-1.0-1.0.0-1.noarch.rpm to be installed
Resolving Dependencies
--> Running transaction check
---> Package nsx-t-1.0.noarch 0:1.0.0-1 will be installed
--> Finished Dependency Resolution
Dependencies Resolved
Package Arch Version Repository Size
Installing:
nsx-t-1.0 noarch 1.0.0-1 /nsx-t-1.0-1.0.0-1.noarch 20 M
Transaction Summary
Install 1 Package
Total size: 20 M
Installed size: 20 M
Downloading packages:
Running transaction check
Running transaction test
Transaction test succeeded
Running transaction
Installing : nsx-t-1.0-1.0.0-1.noarch 1/1
Verifying : nsx-t-1.0-1.0.0-1.noarch 1/1
Installed:
nsx-t-1.0.noarch 0:1.0.0-1
Complete!
Plugin nsx-t-1.0-1.0.0-1.noarch.rpm was successfully installed.
Plugin was installed successfully using cli.
Uninstall Fuel VMware NSX-T plugin.
-----------------------------------
ID
##
nsxt_uninstall
Description
###########
Check that plugin can be removed.
Complexity
##########
smoke
Steps
#####
1. Connect to fuel node with preinstalled NSX-T plugin via ssh.
2. Remove NSX-T plugin.
3. Run command 'fuel plugins' to ensure the NSX-T plugin has been removed.
Expected result
###############
Output::
[root@nailgun ~]# fuel plugins --remove nsx-t==1.0.0
Loaded plugins: fastestmirror, priorities
Resolving Dependencies
--> Running transaction check
---> Package nsx-t-1.0.noarch 0:1.0.0-1 will be erased
--> Finished Dependency Resolution
Dependencies Resolved
Package Arch Version Repository Size
Removing:
nsx-t-1.0 noarch 1.0.0-1 @/nsx-t-1.0-1.0.0-1.noarch 20 M
Transaction Summary
Remove 1 Package
Installed size: 20 M
Downloading packages:
Running transaction check
Running transaction test
Transaction test succeeded
Running transaction
Erasing : nsx-t-1.0-1.0.0-1.noarch 1/1
Verifying : nsx-t-1.0-1.0.0-1.noarch 1/1
Removed:
nsx-t-1.0.noarch 0:1.0.0-1
Complete!
Plugin nsx-t==1.0.0 was successfully removed.
Plugin was removed.
Verify that all UI elements of NSX-T plugin section meets the requirements.
---------------------------------------------------------------------------
ID
##
nsxt_gui
Description
###########
Verify that all UI elements of NSX-T plugin section meets the requirements.
Complexity
##########
smoke
Steps
#####
1. Login to the Fuel web UI.
2. Click on the Networks tab.
3. Verify that section of NSX-T plugin is present under the Other menu option.
4. Verify that check box 'NSX-T plugin' is enabled by default.
5. Verify that all labels of 'NSX-T plugin' section have the same font style and colour.
6. Verify that all elements of NSX-T plugin section are vertical aligned.
Expected result
###############
All elements of NSX-T plugin section are regimented.
Deployment with plugin, controller and vmware datastore backend.
----------------------------------------------------------------
ID
##
nsxt_smoke
Description
###########
Check deployment of environment with NSX-T plugin and one controller.
Complexity
##########
smoke
Steps
#####
1. Log in to the Fuel with preinstalled NSX-T plugin.
2. Create a new environment with following parameters:
* Compute: KVM, QEMU with vCenter
* Networking: Neutron with NSX-T plugin
* Storage: default
* Additional services: default
3. Add nodes with following roles:
* Controller
4. Configure interfaces on nodes.
5. Configure network settings.
6. Enable and configure NSX-T plugin.
7. Configure VMware vCenter Settings. Add 1 vSphere cluster and configure Nova Compute instances on controllers.
8. Deploy cluster.
9. Run OSTF.
Expected result
###############
Cluster should be deployed successfully and all OSTF tests should be passed.
Deploy HA cluster with NSX-T plugin.
------------------------------------
ID
##
nsxt_bvt
Description
###########
Check deployment of environment with NSX-T plugin, 3 Controllers, 1 Compute, 3 CephOSD, cinder-vware + compute-vmware roles.
Complexity
##########
smoke
Steps
#####
1. Connect to the Fuel web UI with preinstalled NSX-T plugin.
2. Create a new environment with following parameters:
* Compute: KVM, QEMU with vCenter
* Networking: Neutron with NSX-T plugin
* Storage: Ceph RBD for images (Glance)
* Additional services: default
3. Add nodes with following roles:
* Controller
* Controller
* Controller
* CephOSD
* CephOSD
* CephOSD
* Compute-vmware, cinder-vmware
* Compute
4. Configure interfaces on nodes.
5. Configure network settings.
6. Enable and configure NSX-T plugin.
7. Configure VMware vCenter Settings. Add 2 vSphere clusters and configure Nova Compute instances on controllers and compute-vmware.
8. Verify networks.
9. Deploy cluster.
10. Run OSTF.
Expected result
###############
Cluster should be deployed and all OSTF tests should be passed.
Check option 'Bypass NSX Manager certificate verification' works correct
------------------------------------------------------------------------
ID
##
nsxt_insecure_false
Description
###########
Check that insecure checkbox functions properly.
Complexity
##########
core
Steps
#####
1. Provide CA certificate via web UI or through system storage.
2. Install NSX-T plugin.
3. Enable plugin on tab Networks -> NSX-T plugin.
4. Fill the form with corresponding values.
5. Uncheck checkbox 'Bypass NSX Manager certificate verification'.
6. Deploy cluster with one controller.
7. Run OSTF.
Expected result
###############
Cluster should be deployed and all OSTF tests should be passed.
Verify that nsxt driver configured properly after enabling NSX-T plugin
-----------------------------------------------------------------------
ID
##
nsxt_config_ok
Description
###########
Check that all parameters of nsxt driver config files have been filled up with values were entered from GUI. Applicable values that are typically used are described in plugin docs. Root & intermediate certificate are signed, in attachment.
Complexity
##########
advanced
Steps
#####
1. Install NSX-T plugin.
2. Enable plugin on tab Networks -> NSX-T plugin.
3. Fill the form with corresponding values.
4. Do all things that are necessary to provide interoperability of NSX-T plugin and NSX Manager with certificate.
5. Check Additional settings. Fill the form with corresponding values. Save settings by pressing the button.
Expected result
###############
Check that nsx.ini on controller nodes is properly configured.

View File

@ -0,0 +1,780 @@
System
======
Setup for system tests
----------------------
ID
##
nsxt_ha_mode
Description
###########
Deploy environment with 3 controlers and 1 Compute node. Nova Compute instances are running on controllers nodes. It is a config for all system tests.
Complexity
##########
core
Steps
#####
1. Log in to the Fuel web UI with preinstalled NSX-T plugin.
2. Create a new environment with following parameters:
* Compute: KVM, QEMU with vCenter
* Networking: Neutron with NSX-T plugin
* Storage: default
* Additional services: default
3. Add nodes with following roles:
* Controller
* Controller
* Controller
* Compute
4. Configure interfaces on nodes.
5. Configure network settings.
6. Enable and configure NSX-T plugin.
7. Configure VMware vCenter Settings. Add 1 vSphere cluster and configure Nova Compute instance on conrollers.
8. Verify networks.
9. Deploy cluster.
10. Run OSTF.
Expected result
###############
Cluster should be deployed and all OSTF test cases should be passed.
Check abilities to create and terminate networks on NSX.
--------------------------------------------------------
ID
##
nsxt_create_terminate_networks
Description
###########
Verifies that creation of network is translated to vcenter.
Complexity
##########
core
Steps
#####
1. Setup for system tests.
2. Log in to Horizon Dashboard.
3. Add private networks net_01 and net_02.
4. Check taht networks are present in the vcenter.
5. Remove private network net_01.
6. Check that network net_01 has been removed from the vcenter.
7. Add private network net_01.
Expected result
###############
No errors.
Check abilities to bind port on NSX to VM, disable and enable this port.
------------------------------------------------------------------------
ID
##
nsxt_ability_to_bind_port
Description
###########
Verifies that system can not manipulate with port(plugin limitation).
Complexity
##########
core
Steps
#####
1. Log in to Horizon Dashboard.
2. Navigate to Project -> Compute -> Instances
3. Launch instance VM_1 with image TestVM-VMDK and flavor m1.tiny in vcenter az.
4. Launch instance VM_2 with image TestVM and flavor m1.tiny in nova az.
5. Verify that VMs should communicate between each other. Send icmp ping from VM_1 to VM_2 and vice versa.
6. Disable NSX_port of VM_1.
7. Verify that VMs should communicate between each other. Send icmp ping from VM_2 to VM_1 and vice versa.
8. Enable NSX_port of VM_1.
9. Verify that VMs should communicate between each other. Send icmp ping from VM_1 to VM_2 and vice versa.
Expected result
###############
Pings should get a response.
Check abilities to assign multiple vNIC to a single VM.
-------------------------------------------------------
ID
##
nsxt_multi_vnic
Description
###########
Check abilities to assign multiple vNICs to a single VM.
Complexity
##########
core
Steps
#####
1. Setup for system tests.
2. Log in to Horizon Dashboard.
3. Add two private networks (net01 and net02).
4. Add one subnet (net01_subnet01: 192.168.101.0/24, net02_subnet01, 192.168.101.0/24) to each network.
NOTE: We have a constraint about network interfaces. One of subnets should have gateway and another should not. So disable gateway on that subnet.
5. Launch instance VM_1 with image TestVM-VMDK and flavor m1.tiny in vcenter az.
6. Launch instance VM_2 with image TestVM and flavor m1.tiny in nova az.
7. Check abilities to assign multiple vNIC net01 and net02 to VM_1.
8. Check abilities to assign multiple vNIC net01 and net02 to VM_2.
9. Send icmp ping from VM_1 to VM_2 and vice versa.
Expected result
###############
VM_1 and VM_2 should be attached to multiple vNIC net01 and net02. Pings should get a response.
Check connectivity between VMs attached to different networks with a router between them.
-----------------------------------------------------------------------------------------
ID
##
nsxt_connectivity_diff_networks
Description
###########
Test verifies that there is a connection between networks connected through the router.
Complexity
##########
core
Steps
#####
1. Setup for system tests.
2. Log in to Horizon Dashboard.
3. Add two private networks (net01 and net02).
4. Add one subnet (net01_subnet01: 192.168.101.0/24, net02_subnet01, 192.168.101.0/24) to each network. Disable gateway for all subnets.
5. Navigate to Project -> Compute -> Instances
6. Launch instances VM_1 and VM_2 in the network 192.168.101.0/24 with image TestVM-VMDK and flavor m1.tiny in vcenter az. Attach default private net as a NIC 1.
7. Launch instances VM_3 and VM_4 in the network 192.168.101.0/24 with image TestVM and flavor m1.tiny in nova az. Attach default private net as a NIC 1.
8. Verify that VMs of same networks should communicate
between each other. Send icmp ping from VM_1 to VM_2, VM_3 to VM_4 and vice versa.
9. Verify that VMs of different networks should not communicate
between each other. Send icmp ping from VM_1 to VM_3, VM_4 to VM_2 and vice versa.
10. Create Router_01, set gateway and add interface to external network.
11. Enable gateway on subnets. Attach private networks to router.
12. Verify that VMs of different networks should communicate between each other. Send icmp ping from VM_1 to VM_3, VM_4 to VM_2 and vice versa.
13. Add new Router_02, set gateway and add interface to external network.
14. Detach net_02 from Router_01 and attach to Router_02
15. Assign floating IPs for all created VMs.
16. Verify that VMs of different networks should communicate between each other by FIPs. Send icmp ping from VM_1 to VM_3, VM_4 to VM_2 and vice versa.
Expected result
###############
Pings should get a response.
Check isolation between VMs in different tenants.
-------------------------------------------------
ID
##
nsxt_different_tenants
Description
###########
Verifies isolation in different tenants.
Complexity
##########
core
Steps
#####
1. Setup for system tests.
2. Log in to Horizon Dashboard.
3. Create non-admin tenant test_tenant.
4. Navigate to Identity -> Projects.
5. Click on Create Project.
6. Type name test_tenant.
7. On tab Project Members add admin with admin and member.
Activate test_tenant project by selecting at the top panel.
8. Navigate to Project -> Network -> Networks
9. Create network with 2 subnet.
Create Router, set gateway and add interface.
10. Navigate to Project -> Compute -> Instances
11. Launch instance VM_1
12. Activate default tenant.
13. Navigate to Project -> Network -> Networks
14. Create network with subnet.
Create Router, set gateway and add interface.
15. Navigate to Project -> Compute -> Instances
16. Launch instance VM_2.
17. Verify that VMs on different tenants should not communicate between each other. Send icmp ping from VM_1 of admin tenant to VM_2 of test_tenant and vice versa.
Expected result
###############
Pings should not get a response.
Check connectivity between VMs with same ip in different tenants.
-----------------------------------------------------------------
ID
##
nsxt_same_ip_different_tenants
Description
###########
Verifies connectivity with same IP in different tenants.
Complexity
##########
advanced
Steps
#####
1. Setup for system tests.
2. Log in to Horizon Dashboard.
3. Create 2 non-admin tenants 'test_1' and 'test_2'.
4. Navigate to Identity -> Projects.
5. Click on Create Project.
6. Type name 'test_1' of tenant.
7. Click on Create Project.
8. Type name 'test_2' of tenant.
9. On tab Project Members add admin with admin and member.
10. In tenant 'test_1' create net1 and subnet1 with CIDR 10.0.0.0/24
11. In tenant 'test_1' create security group 'SG_1' and add rule that allows ingress icmp traffic
12. In tenant 'test_2' create net2 and subnet2 with CIDR 10.0.0.0/24
13. In tenant 'test_2' create security group 'SG_2'
14. In tenant 'test_1' add VM_1 of vcenter in net1 with ip 10.0.0.4 and 'SG_1' as security group.
15. In tenant 'test_1' add VM_2 of nova in net1 with ip 10.0.0.5 and 'SG_1' as security group.
16. In tenant 'test_2' create net1 and subnet1 with CIDR 10.0.0.0/24
17. In tenant 'test_2' create security group 'SG_1' and add rule that allows ingress icmp traffic
18. In tenant 'test_2' add VM_3 of vcenter in net1 with ip 10.0.0.4 and 'SG_1' as security group.
19. In tenant 'test_2' add VM_4 of nova in net1 with ip 10.0.0.5 and 'SG_1' as security group.
20. Assign floating IPs for all created VMs.
21. Verify that VMs with same ip on different tenants should communicate between each other by FIPs. Send icmp ping from VM_1 to VM_3, VM_2 to Vm_4 and vice versa.
Expected result
###############
Pings should get a response.
Check connectivity Vms to public network.
-----------------------------------------
ID
##
nsxt_public_network_availability
Description
###########
Verifies that public network is available.
Complexity
##########
core
Steps
#####
1. Setup for system tests.
2. Log in to Horizon Dashboard.
3. Create net01: net01_subnet, 192.168.111.0/24 and attach it to the router04
4. Launch instance VM_1 of vcenter az with image TestVM-VMDK and flavor m1.tiny in the net_04.
5. Launch instance VM_1 of nova az with image TestVM and flavor m1.tiny in the net_01.
6. Send ping from instances VM_1 and VM_2 to 8.8.8.8.
Expected result
###############
Pings should get a response.
Check connectivity VMs to public network with floating ip.
----------------------------------------------------------
ID
##
nsxt_floating_ip_to_public
Description
###########
Verifies that public network is available via floating ip.
Complexity
##########
core
Steps
#####
1. Setup for system tests.
2. Log in to Horizon Dashboard
3. Create net01: net01_subnet, 192.168.111.0/24 and attach it to the router04
4. Launch instance VM_1 of vcenter az with image TestVM-VMDK and flavor m1.tiny in the net_04. Associate floating ip.
5. Launch instance VM_1 of nova az with image TestVM and flavor m1.tiny in the net_01. Associate floating ip.
6. Send ping from instances VM_1 and VM_2 to 8.8.8.8.
Expected result
###############
Pings should get a response
Check abilities to create and delete security group.
----------------------------------------------------
ID
##
nsxt_create_and_delete_secgroups
Description
###########
Verifies that creation and removing security group works fine.
Complexity
##########
advanced
Steps
#####
1. Setup for system tests.
2. Log in to Horizon Dashboard.
3. Launch instance VM_1 in the tenant network net_02 with image TestVM-VMDK and flavor m1.tiny in vcenter az.
4. Launch instance VM_2 in the tenant network net_02 with image TestVM and flavor m1.tiny in nova az.
5. Create security groups SG_1 to allow ICMP traffic.
6. Add Ingress rule for ICMP protocol to SG_1
7. Attach SG_1 to VMs
8. Check ping between VM_1 and VM_2 and vice verse
9. Create security groups SG_2 to allow TCP traffic 22 port.
Add Ingress rule for TCP protocol to SG_2
10. Attach SG_2 to VMs.
11. ssh from VM_1 to VM_2 and vice verse.
12. Delete custom rules from SG_1 and SG_2.
13. Check ping and ssh aren't available from VM_1 to VM_2 and vice verse.
14. Add Ingress rule for ICMP protocol to SG_1.
15. Add Ingress rule for SSH protocol to SG_2.
16. Check ping between VM_1 and VM_2 and vice verse.
17. Check ssh from VM_1 to VM_2 and vice verse.
18. Attach VMs to default security group.
19. Delete security groups.
20. Check ping between VM_1 and VM_2 and vice verse.
21. Check SSH from VM_1 to VM_2 and vice verse.
Expected result
###############
We should be able to send ICMP and TCP traffic between VMs in different tenants.
Verify that only the associated MAC and IP addresses can communicate on the logical port.
-----------------------------------------------------------------------------------------
ID
##
nsxt_associated_addresses_communication_on_port
Description
###########
Verify that only the associated MAC and IP addresses can communicate on the logical port.
Complexity
##########
core
Steps
#####
1. Setup for system tests.
2. Log in to Horizon Dashboard.
3. Launch 2 instances in each az.
4. Verify that traffic can be successfully sent from and received on the MAC and IP address associated with the logical port.
5. Configure a new IP address from the subnet not like original one on the instance associated with the logical port.
* ifconfig eth0 down
* ifconfig eth0 192.168.99.14 netmask 255.255.255.0
* ifconfig eth0 up
6. Confirm that the instance cannot communicate with that IP address.
7. Revert IP address. Configure a new MAC address on the instance associated with the logical port.
* ifconfig eth0 down
* ifconfig eth0 hw ether 00:80:48:BA:d1:30
* ifconfig eth0 up
8. Confirm that the instance cannot communicate with that MAC address and the original IP address.
Expected result
###############
Instance should not communicate with new ip and mac addresses but it should communicate with old IP.
Check creation instance in the one group simultaneously.
--------------------------------------------------------
ID
##
nsxt_create_and_delete_vms
Description
###########
Verifies that system could create and delete several instances simultaneously.
Complexity
##########
core
Steps
#####
1. Setup for system tests.
2. Navigate to Project -> Compute -> Instances
3. Launch 5 instance VM_1 simultaneously with image TestVM-VMDK and flavor m1.tiny in vcenter az in default net_04.
4. All instance should be created without any error.
5. Launch 5 instance VM_2 simultaneously with image TestVM and flavor m1.tiny in nova az in default net_04.
6. All instance should be created without any error.
7. Check connection between VMs (ping, ssh)
8. Delete all VMs from horizon simultaneously.
Expected result
###############
All instance should be created and deleted without any error.
Verify that instances could be launched on enabled compute host
---------------------------------------------------------------
ID
##
nsxt_disable_hosts
Description
###########
Check instance creation on enabled cluster.
Complexity
##########
core
Steps
#####
1. Setup cluster with 3 controllers, 2 Compute nodes and cinder-vmware +
compute-vmware role.
2. Assign instances in each az.
3. Disable one of compute host with vCenter cluster
(Admin -> Hypervisors).
4. Create several instances in vcenter az.
5. Check that instances were created on enabled compute host
(vcenter cluster).
6. Disable second compute host with vCenter cluster and enable
first one.
7. Create several instances in vcenter az.
8. Check that instances were created on enabled compute host
(vcenter cluster).
9. Create several instances in nova az.
10. Check that instances were created on enabled compute host
(nova cluster).
Expected result
###############
All instances work fine.
Check that settings about new cluster are placed in neutron config
------------------------------------------------------------------
ID
##
nsxt_smoke_add_compute
Description
###########
Adding compute-vmware role and redeploy cluster with NSX-T plugin has effect in neutron configs.
Complexity
##########
core
Steps
#####
1. Upload the NSX-T plugin to master node.
2. Create cluster and configure NSX-T for that cluster.
3. Provision three controller node.
4. Deploy cluster.
5. Get configured clusters morefid(Managed Object Reference) from neutron config.
6. Add node with compute-vmware role.
7. Redeploy cluster with new node.
8. Get new configured clusters morefid from neutron config.
9. Check new cluster added in neutron config.
Expected result
###############
Clusters are reconfigured after compute-vmware has been added.
Fuel create mirror and update core repos on cluster with NSX-T plugin
---------------------------------------------------------------------
ID
##
nsxt_update_core_repos
Description
###########
Fuel create mirror and update core repos in cluster with NSX-T plugin
Complexity
##########
core
Steps
#####
1. Setup for system tests
2. Log into controller node via Fuel CLI and get PIDs of services which were launched by plugin and store them:
`ps ax | grep neutron-server`
3. Launch the following command on the Fuel Master node:
`fuel-mirror create -P ubuntu -G mos ubuntu`
4. Run the command below on the Fuel Master node:
`fuel-mirror apply -P ubuntu -G mos ubuntu --env <env_id> --replace`
5. Run the command below on the Fuel Master node:
`fuel --env <env_id> node --node-id <node_ids_separeted_by_coma> --tasks setup_repositories`
And wait until task is done.
6. Log into controller node and check plugins services are alive and their PID are not changed.
7. Check all nodes remain in ready status.
8. Rerun OSTF.
Expected result
###############
Cluster (nodes) should remain in ready state.
OSTF tests should be passed on rerun.
Configuration with multiple NSX managers
----------------------------------------
ID
##
nsxt_multiple_nsx_managers
Description
###########
NSX-T plugin can configure several NSX managers at once.
Complexity
##########
core
Steps
#####
1. Create cluster.
Prepare 2 NSX managers.
2. Configure plugin.
3. Set comma separtated list of NSX managers.
nsx_api_managers = 1.2.3.4,1.2.3.5
4. Deploy cluster.
5. Run OSTF.
6. Power off the first NSX manager.
7. Run OSTF.
8. Power off the second NSX manager.
Power on the first NSX manager.
9. Run OSTF.
Expected result
###############
OSTF tests should be passed.
Deploy HOT
----------
ID
##
nsxt_hot
Description
###########
Template creates flavor, net, security group, instance.
Complexity
##########
smoke
Steps
#####
1. Deploy cluster with NSX.
2. Copy nsxt_stack.yaml to controller on which heat will be run.
3. On controller node run command::
. ./openrc
heat stack-create -f nsxt_stack.yaml teststack
Wait for status COMPLETE.
4. Run OSTF.
Expected result
###############
All OSTF are passed.