Test Plan for VMware DVS plugin v.2.0.0

Change-Id: I9fa6ce0c4b47b115a516071db5b1b1a2f5fd9d53
This commit is contained in:
Ilya Bumarskov 2015-12-01 22:09:21 +03:00 committed by otsvigun
parent 7bc1e3f4de
commit 8c155410cb
8 changed files with 2081 additions and 0 deletions

177
docs/test_plan/Makefile Normal file
View File

@ -0,0 +1,177 @@
# Makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = _build
# User-friendly check for sphinx-build
ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
endif
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
# the i18n builder cannot share the environment and doctrees with the others
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
help:
@echo "Please use \`make <target>' where <target> is one of"
@echo " html to make standalone HTML files"
@echo " dirhtml to make HTML files named index.html in directories"
@echo " singlehtml to make a single large HTML file"
@echo " pickle to make pickle files"
@echo " json to make JSON files"
@echo " htmlhelp to make HTML files and a HTML help project"
@echo " qthelp to make HTML files and a qthelp project"
@echo " devhelp to make HTML files and a Devhelp project"
@echo " epub to make an epub"
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
@echo " latexpdf to make LaTeX files and run them through pdflatex"
@echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
@echo " text to make text files"
@echo " man to make manual pages"
@echo " texinfo to make Texinfo files"
@echo " info to make Texinfo files and run them through makeinfo"
@echo " gettext to make PO message catalogs"
@echo " changes to make an overview of all changed/added/deprecated items"
@echo " xml to make Docutils-native XML files"
@echo " pseudoxml to make pseudoxml-XML files for display purposes"
@echo " linkcheck to check all external links for integrity"
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
clean:
rm -rf $(BUILDDIR)/*
html:
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
dirhtml:
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
singlehtml:
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
@echo
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
pickle:
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
@echo
@echo "Build finished; now you can process the pickle files."
json:
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
@echo
@echo "Build finished; now you can process the JSON files."
htmlhelp:
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
@echo
@echo "Build finished; now you can run HTML Help Workshop with the" \
".hhp project file in $(BUILDDIR)/htmlhelp."
qthelp:
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
@echo
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/FuelVMwareDVSplugin.qhcp"
@echo "To view the help file:"
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/FuelVMwareDVSplugin.qhc"
devhelp:
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
@echo
@echo "Build finished."
@echo "To view the help file:"
@echo "# mkdir -p $$HOME/.local/share/devhelp/FuelVMwareDVSplugin"
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/FuelVMwareDVSplugin"
@echo "# devhelp"
epub:
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
@echo
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
latex:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
@echo "Run \`make' in that directory to run these through (pdf)latex" \
"(use \`make latexpdf' here to do that automatically)."
latexpdf:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through pdflatex..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
latexpdfja:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through platex and dvipdfmx..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
text:
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
@echo
@echo "Build finished. The text files are in $(BUILDDIR)/text."
man:
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
@echo
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
texinfo:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
@echo "Run \`make' in that directory to run these through makeinfo" \
"(use \`make info' here to do that automatically)."
info:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo "Running Texinfo files through makeinfo..."
make -C $(BUILDDIR)/texinfo info
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
gettext:
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
@echo
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
changes:
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
@echo
@echo "The overview file is in $(BUILDDIR)/changes."
linkcheck:
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
@echo
@echo "Link check complete; look for any errors in the above output " \
"or in $(BUILDDIR)/linkcheck/output.txt."
doctest:
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
@echo "Testing of doctests in the sources finished, look at the " \
"results in $(BUILDDIR)/doctest/output.txt."
xml:
$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
@echo
@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
pseudoxml:
$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
@echo
@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."

253
docs/test_plan/conf.py Normal file
View File

@ -0,0 +1,253 @@
# -*- coding: utf-8 -*-
#
# Fuel VMware DVS plugin documentation build configuration file, created by
# sphinx-quickstart on Fri Aug 14 12:14:29 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [ ]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Fuel VMware DVS plugin'
copyright = u'2015, Mirantis Inc.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.0.0'
# The full version, including alpha/beta/rc tags.
release = '2.0-2.0.0-1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
#exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'FuelVMwareDVSplugindoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = { 'classoptions': ',openany,oneside', 'babel': '\\usepackage[english]{babel}'
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'FuelVMwareDVSvplugin.tex', u'Fuel VMware DVS plugin testing\
documentation', u'Mirantis Inc.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'fuelvmwaredvsplugin', u'Fuel VMware DVS plugin testing\
documentation', [u'Mirantis Inc.'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'FuelVMwareDVSplugin', u'Fuel VMware DVS plugin testing\
documentation', u'Mirantis Inc.', 'FuelVMwareDVSplugin',\
'One line description of project.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Insert footnotes where they are defined instead of at the end.
pdf_inline_footnotes = True

15
docs/test_plan/index.rst Normal file
View File

@ -0,0 +1,15 @@
Fuel VMware DVS plugin's testing documentation
==============================================
Testing documents
-----------------
.. toctree::
:glob:
:maxdepth: 1
source/vmware_dvs_test_plan
source/test_suite_smoke
source/test_suite_system
source/test_suite_failover
source/test_suite_upgrade

View File

@ -0,0 +1,283 @@
========
Failover
========
Verify that it is not possibility to uninstall of Fuel DVS plugin with deployed environment.
--------------------------------------------------------------------------------------------
ID
##
dvs_uninstall_negative
Description
###########
Verify that it is not possibility to uninstall of Fuel DVS plugin with deployed environment.
Complexity
##########
core
Steps
#####
1. Install DVS plugin on master node.
2. Create a new environment with enabled plugin.
3. Try to delete plugin via cli Remove plugin from master node.
Expected result
###############
Alert: "400 Client Error: Bad Request (Can't delete plugin which is enabled for some environment.)" should be displayed.
Verify that vmclusters should be migrate after shutdown controller.
-------------------------------------------------------------------
ID
##
dvs_shutdown_controller
Description
###########
Verify that vcenter-vmcluster should be migrate after shutdown controller.
Complexity
##########
core
Steps
#####
1. Install DVS plugin on master node.
2. Create a new environment with following parameters:
* Compute: KVM/QEMU with vCenter
* Networking: Neutron with VLAN segmentation
* Storage: default
* Additional services: default
3. Add nodes with following roles:
* Controller
* Controller
* Controller
* Compute
* Compute
4. Configure interfaces on nodes.
5. Configure network settings.
6. Enable and configure DVS plugin.
7. Configure VMware vCenter Settings. Add 2 vSphere clusters and configure Nova Compute instances on conrollers.
8. Verify networks.
9. Deploy cluster.
10. Run OSTF
11. Launch instances in nova and vcenter az.
12. Verify connection between VMs. Send ping.
Check that ping get reply.
13. Shutdown controller with vmclusters.
14. Check that vcenter-vmcluster should be migrate to another controller.
15. Verify connection between VMs.
Send ping, check that ping get reply.
Expected result
###############
Vcenter-vmcluster should be migrate to another controller. Ping is available between instances.
Check cluster functionality after reboot vcenter (Nova Compute on controllers).
-------------------------------------------------------------------------------
ID
##
dvs_vcenter_reboot_vcenter
Description
###########
Check cluster functionality after reboot vcenter. Nova Compute instances are running on controller nodes.
Complexity
##########
core
Steps
#####
1. Install DVS plugin on master node.
2. Create a new environment with following parameters:
* Compute: KVM/QEMU with vCenter
* Networking: Neutron with VLAN segmentation
* Storage: default
* Additional services: default
3. Add nodes with following roles:
* Controller
* Compute
* Cinder
* CinderVMware
4. Configure interfaces on nodes.
5. Configure network settings.
6. Enable and configure DVS plugin.
7. Enable VMWare vCenter/ESXi datastore for images (Glance).
8. Configure VMware vCenter Settings. Add 1 vSphere clusters and configure Nova Compute instances on conrollers.
9. Configure Glance credentials on VMware tab.
10. Verify networks.
11. Deploy cluster.
12. Run OSTF.
13. Launch instance VM_1 with image TestVM, availability zone nova and flavor m1.micro.
14. Launch instance VM_2 with image TestVM-VMDK, availability zone vcenter and flavor m1.micro.
15. Check connection between VMs, send ping from VM_1 to VM_2 and vice verse.
16. Reboot vcenter.
17. Check that controller lost connection with vCenter.
18. Wait for vCenter.
19. Ensure that all instances from vCenter displayed in dashboard.
20. Ensure connectivity between Nova's and VMware's VM.
21. Run OSTF.
Expected result
###############
Cluster should be deployed and all OSTF test cases should be passed. Ping should get response.
Check cluster functionality after reboot vcenter (Nova Compute on compute-vmware).
----------------------------------------------------------------------------------
ID
##
dvs_vcenter_reboot_vcenter_2
Description
###########
Check cluster functionality after reboot vcenter. Nova Compute instances are running on compute-vmware nodes.
Complexity
##########
core
Steps
#####
1. Connect to a Fuel web UI with preinstalled plugin.
2. Create a new environment with following parameters:
* Compute: KVM/QEMU with vCenter
* Networking: Neutron with VLAN segmentation
* Storage: default
* Additional services: default
3. Add nodes with following roles:
* Controller
* Compute
* Cinder
* CinderVMware
* ComputeVMware
4. Configure interfaces on nodes.
5. Configure network settings.
6. Enable and configure DVS plugin.
7. Enable VMWare vCenter/ESXi datastore for images (Glance).
8. Configure VMware vCenter Settings. Add 1 vSphere clusters and configure Nova Compute instances on compute-vmware.
9. Configure Glance credentials on VMware tab.
10. Verify networks.
11. Deploy cluster.
12. Run OSTF.
13. Launch instance VM_1 with image TestVM, AZ nova and flavor m1.micro.
14. Launch instance VM_2 with image TestVM-VMDK, AZ vcenter and flavor m1.micro.
15. Check connection between VMs, send ping from VM_1 to VM_2 and vice verse.
16. Reboot vcenter.
17. Check that ComputeVMware lost connection with vCenter.
18. Wait for vCenter.
19. Ensure that all instances from vCenter displayed in dashboard.
20. Ensure connectivity between Nova's and VMware's VM.
21. Run OSTF.
Expected result
###############
Cluster should be deployed and all OSTF test cases should be passed. Pings should get response.
Verify that vmclusters should be migrate after reset controller.
----------------------------------------------------------------
ID
##
dvs_reset_controller
Description
###########
Verify that vcenter-vmcluster should be migrate after reset controller.
Complexity
##########
core
Steps
#####
1. Install DVS plugin on master node.
2. Create a new environment with following parameters:
* Compute: KVM/QEMU with vCenter
* Networking: Neutron with VLAN segmentation
* Storage: default
* Additional services: default
3. Add nodes with following roles:
* Controller
* Controller
* Controller
* Compute
* Compute
4. Configure interfaces on nodes.
5. Configure network settings.
6. Enable and configure DVS plugin.
7. Configure VMware vCenter Settings. Add 2 vSphere clusters and configure Nova Compute instances on conrollers.
8. Verify networks.
9. Deploy cluster.
10. Run OSTF
11. Launch instances in nova and vcenter az.
12. Verify connection between VMs. Send ping.
Check that ping get reply.
13. Reset controller with vmclusters.
14. Check that vmclusters should be migrate to another controller.
15. Verify connection between VMs.
Send ping, check that ping get reply.
Expected result
###############
Vcenter-vmcluster should be migrate to another controller. Ping is available between instances.

View File

@ -0,0 +1,225 @@
=====
Smoke
=====
Install Fuel VMware DVS plugin.
-------------------------------
ID
##
dvs_install
Description
###########
Check that plugin can be installed.
Complexity
##########
smoke
Steps
#####
1. Connect to fuel node via ssh.
2. Upload plugin.
3. Install plugin.
Expected result
###############
Ensure that plugin is installed successfully using cli, run command 'fuel plugins'. Check name, version and package version of plugin.
Uninstall Fuel VMware DVS plugin.
---------------------------------
ID
##
dvs_uninstall
Description
###########
Check that plugin can be removed.
Complexity
##########
smoke
Steps
#####
1. Connect to fuel node with preinstalled plugin via ssh.
2. Remove plugin.
Expected result
###############
Verify that plugin is removed, run command 'fuel plugins'.
Verify that all elements of DVS plugin section meets the requirements.
----------------------------------------------------------------------
ID
##
dvs_gui
Description
###########
Verify that all elements of DVS plugin section meets the requirements.
Complexity
##########
smoke
Steps
#####
1. Connect to a Fuel web UI with preinstalled plugin.
2. Create a new environment with following parameters:
* Compute: KVM/QEMU with vCenter
* Networking: Neutron with VLAN segmentation
* Storage: default
* Additional services: default
3. Click on the Settings tab and check that section of DVS plugin is displayed with all required GUI elements.
4. Verify that section of DVS plugin is present on the Settings tab.
5. Verify that check box "Use Neutron VMware DVS ML2 plugin" is enabled by default.
6. Verify that user can disabled -> enabled DVS plugin by click on check box "Use Neutron VMware DVS ML2 plugin".
7. Verify that check box "Use VMware DVS ML2 plugin for networking" is enabled by default.
8. Verify that all labels of DVS plugin section have same font style and color.
9. Verify that all elements of DVS plugin section are vertical aligned.
Expected result
###############
All elements of DVS plugin section meets the requirements.
Deployment with plugin, controller and vmware datastore backend.
----------------------------------------------------------------
ID
##
dvs_vcenter_smoke
Description
###########
Check deployment with VMware DVS plugin and one controller.
Complexity
##########
smoke
Steps
#####
1. Connect to a Fuel web UI with preinstalled plugin.
2. Create a new environment with following parameters:
* Compute: KVM/QEMU with vCenter
* Networking: Neutron with VLAN segmentation
* Storage: default
* Additional services: default
3. Add nodes with following roles:
* Controller
4. Configure interfaces on nodes.
5. Configure network settings.
6. Enable and configure DVS plugin.
7. Configure settings:
* Enable VMWare vCenter/ESXi datastore for images (Glance).
8. Configure VMware vCenter Settings. Add 1 vSphere clusters and configure Nova Compute instances on conrollers.
9. Deploy cluster.
10. Run OSTF.
Expected result
###############
Cluster should be deployed and all OSTF test cases should be passed.
Deploy cluster with plugin and ceph datastore backend.
------------------------------------------------------
ID
##
dvs_vcenter_bvt
Description
###########
Check deployment with VMware DVS plugin, 3 Controllers, Compute, 2 CephOSD, CinderVMware and computeVMware roles.
Complexity
##########
smoke
Steps
#####
1. Connect to a Fuel web UI with preinstalled plugin.
2. Create a new environment with following parameters:
* Compute: KVM/QEMU with vCenter
* Networking: Neutron with VLAN segmentation
* Storage: Ceph
* Additional services: default
3. Add nodes with following roles:
* Controller
* Controller
* Controller
* Compute
* CephOSD
* CephOSD
* CinderVMware
* ComputeVMware
4. Configure interfaces on nodes.
5. Configure network settings.
6. Enable and configure DVS plugin.
7. Configure VMware vCenter Settings. Add 2 vSphere clusters and configure Nova Compute instances on conrollers and compute-vmware.
8. Verify networks.
9. Deploy cluster.
10. Run OSTF.
Expected result
###############
Cluster should be deployed and all OSTF test cases should be passed.

View File

@ -0,0 +1,792 @@
======
System
======
Setup for system tests.
-----------------------
ID
##
dvs_setup_system
Description
###########
Deploy environment in DualHypervisors mode with 3 controlers, 2 compute-vmware and 1 compute nodes. Nova Compute instances are running on controller nodes.
Complexity
##########
core
Steps
#####
1. Install DVS plugin on master node.
2. Create a new environment with following parameters:
* Compute: KVM/QEMU with vCenter
* Networking: Neutron with VLAN segmentation
* Storage: default
* Additional services: default
3. Add nodes with following roles:
* Controller
* Compute
* Compute
* ComputeVMware
4. Configure interfaces on nodes.
5. Configure network settings.
6. Enable and configure DVS plugin.
7. Enable VMWare vCenter/ESXi datastore for images (Glance).
8. Configure VMware vCenter Settings. Add 2 vSphere clusters and configure Nova Compute instances on conrollers and compute-vmware.
9. Verify networks.
10. Deploy cluster.
11. Run OSTF.
Expected result
###############
Cluster should be deployed and all OSTF test cases should be passed.
Check abilities to create and terminate networks on DVS.
--------------------------------------------------------
ID
##
dvs_create_terminate_networks
Description
###########
Check abilities to create and terminate networks on DVS.
Complexity
##########
core
Steps
#####
1. Setup for system tests.
2. Log in to Horizon Dashboard.
3. Add private networks net_01 and net_02.
4. Check that networks are present in the vSphere.
5. Remove private network net_01.
6. Check that network net_01 is not present in the vSphere.
7. Add private network net_01.
8. Check that networks is present in the vSphere.
Expected result
###############
Networks were successfuly created and presented in Horizon and vSphere.
Check abilities to update network name
--------------------------------------
ID
##
dvs_update_network
Description
###########
Check abilities to update network name
Complexity
##########
core
Steps
#####
1. Setup for system tests.
2. Log in Horizon.
3. Create network net_1.
4. Update network name net_1 to net_2.
5. Update name of default network to 'spring'.
Expected result
###############
Network name should be changed successfully
Check abilities to bind port on DVS to VM, disable and enable this port.
------------------------------------------------------------------------
ID
##
dvs_enable_disbale_port
Description
###########
Check abilities to bind port on DVS to VM, disable and enable this port.
Complexity
##########
core
Steps
#####
1. Setup for system tests.
2. Log in to Horizon Dashboard.
3. Navigate to Project -> Compute -> Instances
4. Launch instance VM_1 with image TestVM, availability zone nova and flavor m1.micro.
5. Launch instance VM_2 with image TestVM-VMDK, availability zone vcenter and flavor m1.micro.
6. Verify that VMs should communicate between each other. Send icmp ping from VM_1 to VM_2 and vice versa.
7. Disable interface of VM_1.
8. Verify that VMs should not communicate between each other. Send icmp ping from VM_2 to VM_1 and vice versa.
9. Enable interface of VM_1.
10. Verify that VMs should communicate between each other. Send icmp ping from VM_1 to VM_2 and vice versa.
Expected result
###############
We can enable/disable interfaces of instances via Horizon.
Check abilities to assign multiple vNIC to a single VM.
-------------------------------------------------------
ID
##
dvs_multi_vnic
Description
###########
Check abilities to assign multiple vNIC to a single VM.
Complexity
##########
core
Steps
#####
1. Setup for system tests
2. Log in to Horizon Dashboard.
3. Add two private networks (net01, and net02).
4. Add one subnet (net01_subnet01: 192.168.101.0/24, net02_subnet01, 192.168.102.0/24) to each network.
5. Launch instance VM_1 with image TestVM and flavor m1.micro in nova az.
6. Launch instance VM_2 with image TestVM-VMDK and flavor m1.micro vcenter az.
7. Check abilities to assign multiple vNIC net01 and net02 to VM_1.
8. Check abilities to assign multiple vNIC net01 and net02 to VM_2.
9. Check that both interfaces on each VM got a ip address. To activate second interface on cirros edit the /etc/network/interfaces and restart network: "sudo /etc/init.d/S40network restart"
10. Send icmp ping from VM_1 to VM_2 and vice versa.
Expected result
###############
VM_1 and VM_2 should be attached to multiple vNIC net01 and net02. Pings should get a response.
Check connection between VMs in one default tenant.
---------------------------------------------------
ID
##
dvs_connectivity_default_tenant
Description
###########
Check connectivity between VMs in default tenant which works in different availability zones: on KVM/QEMU and on vCenter.
Complexity
##########
core
Steps
#####
1. Setup for system tests
2. Navigate to Project -> Compute -> Instances
3. Launch instance VM_1 with image TestVM and flavor m1.micro in nova az.
4. Launch instance VM_2 with image TestVM-VMDK and flavor m1.micro in vcenter az.
5. Verify that VM_1 and VM_2 on different hypervisors should communicate between each other. Send icmp ping from VM_1 of vCenter to VM_2 from Qemu/KVM and vice versa.
Expected result
###############
Pings should get a response.
Check connection between VMs in one non default tenant.
-------------------------------------------------------
ID
##
dvs_connectivity_diff_az_non_default_tenant
Description
###########
Check connection between VMs in one tenant.
Complexity
##########
core
Steps
#####
1. Setup for system tests.
2. Log in to Horizon Dashboard.
3. Create tenant net_01 with subnet.
4. Navigate to Project -> Compute -> Instances
5. Launch instance VM_1 with image TestVM and flavor m1.micro in nova az in net_01
6. Launch instance VM_2 with image TestVM-VMDK and flavor m1.micro in vcenter az in net_01
7. Verify that VMs on same tenants should communicate between each other. Send icmp ping from VM_1 to VM_2 and vice versa.
Expected result
###############
Pings should get a response.
Check connectivity between VMs attached to different networks with and within a router between them.
----------------------------------------------------------------------------------------------------
ID
##
dvs_connectivity_diff_networks
Description
###########
Check connectivity between VMs attached to different networks with and within a router between them.
Complexity
##########
core
Steps
#####
1. Setup for system tests.
2. Log in to Horizon Dashboard.
3. Add two private networks (net01, and net02).
4. Add one subnet (net01_subnet01: 192.168.101.0/24, net02_subnet01, 192.168.102.0/24) to each network.
5. Navigate to Project -> Compute -> Instances
6. Launch instances VM_1 and VM_2 in the network 192.168.101.0/24 with image TestVM and flavor m1.micro in nova az.
7. Launch instances VM_3 and VM_4 in the 192.168.102.0/24 with image TestVM-VMDK and flavor m1.micro in vcenter az.
8. Verify that VMs of same networks should communicate between each other. Send icmp ping from VM_1 to VM_2, VM_3 to VM_4 and vice versa.
9. Verify that VMs of different networks should not communicate between each other. Send icmp ping from VM_1 to VM_3, VM_4 to VM_2 and vice versa.
10. Create Router_01, set gateway and add interface to external network.
11. Attach private networks to Router_01.
12. Verify that VMs of different networks should communicate between each other. Send icmp ping from VM_1 to VM_3, VM_4 to VM_2) and vice versa.
13. Add new Router_02, set gateway and add interface to external network.
14. Delete net_02 from Router_01 and add it to the Router_02.
15. Verify that VMs of different networks should not communicate between each other. Send icmp ping from VM_1 to VM_3, VM_4 to VM_2 and vice versa.
Expected result
###############
Network connectivity must conform to each of the scenarios.
Check isolation between VMs in different tenants.
-------------------------------------------------
ID
##
dvs_connectivity_diff_tenants
Description
###########
Check isolation between VMs in different tenants.
Complexity
##########
core
Steps
#####
1. Setup for system tests
2. Log in to Horizon Dashboard.
3. Create non-admin tenant with name 'test_tenant': Identity -> Projects-> Create Project. On tab Project Members add admin with admin and member.
4. Navigate to Project -> Network -> Networks
5. Create network with subnet.
6. Navigate to Project -> Compute -> Instances
7. Launch instance VM_1 with image TestVM-VMDK in the vcenter az.
8. Navigate to test_tenant
9. Navigate to Project -> Network -> Networks
10. Create Router, set gateway and add interface
11. Navigate to Project -> Compute -> Instances
12. Launch instance VM_2 with image TestVM-VMDK in the vcenter az.
13. Verify that VMs on different tenants should not communicate between each other. Send icmp ping from VM_1 of admin tenant to VM_2 of test_tenant and vice versa.
Expected result
###############
Pings should not get a response.
Check connectivity Vms to public network without floating ip.
-------------------------------------------------------------
ID
##
dvs_connectivity_public_net_without_floating_ip
Description
###########
Check connectivity Vms to public network without floating ip.
Complexity
##########
core
Steps
#####
1. Setup for system tests.
2. Log in to Horizon Dashboard.
3. Create net_01: net01_subnet, 192.168.112.0/24 and attach it to default router.
4. Launch instance VM_1 of nova AZ with image TestVM and flavor m1.micro in the default internal network.
5. Launch instance VM_2 of vcenter AZ with image TestVM-VMDK and flavor m1.micro in the net_01.
6. Send ping from instances VM_1 and VM_2 to 8.8.8.8 or other outside ip.
Expected result
###############
Pings should get a response
Check connectivity Vms to public network with floating ip.
----------------------------------------------------------
ID
##
dvs_connectivity_public_net_with_floating_ip
Description
###########
Check connectivity Vms to public network with floating ip.
Complexity
##########
core
Steps
#####
1. Setup for system tests.
2. Log in to Horizon Dashboard.
3. Create net01: net01__subnet, 192.168.112.0/24 and attach it to the default router.
4. Launch instance VM_1 of nova AZ with image TestVM and flavor m1.micro in the default internal network. Associate floating ip.
5. Launch instance VM_2 of vcenter AZ with image TestVM-VMDK and flavor m1.micro in the net_01. Associate floating ip.
6. Send ping from instances VM_1 and VM_2 to 8.8.8.8 or other outside ip.
Expected result
###############
Instances have access to an internet.
Check abilities to create and delete security group.
----------------------------------------------------
ID
##
dvs_create_delete_security_group
Description
###########
Check abilities to create and delete security group.
Complexity
##########
core
Steps
#####
1. Setup for system tests
2. Create non default network with subnet net_01.
3. Launch 2 instances of vcenter az and 2 instances of nova az in the tenant network net_01
4. Launch 2 instances of vcenter az and 2 instances of nova az in the internal tenant network.
5. Attach net_01 to default router.
6. Create security group SG_1 to allow ICMP traffic.
7. Add Ingress rule for ICMP protocol to SG_1.
8. Create security groups SG_2 to allow TCP traffic 22 port.
9. Add Ingress rule for TCP protocol to SG_2.
10. Remove default security group and attach SG_1 and SG_2 to VMs
11. Check ping is available between instances.
12. Check ssh connection is available between instances.
13. Delete all rules from SG_1 and SG_2.
14. Check that ssh aren't available to instances.
15. Add Ingress and egress rules for TCP protocol to SG_2.
16. Check ssh connection is available between instances.
17. Check ping is not available between instances.
18. Add Ingress and egress rules for ICMP protocol to SG_1.
19. Check ping is available between instances.
20. Delete Ingress rule for ICMP protocol from SG_1 (if OS cirros skip this step).
21. Add Ingress rule for ICMP ipv6 to SG_1 (if OS cirros skip this step).
22. Check ping6 is available between instances. (if OS cirros skip this step).
23. Delete SG1 and SG2 security groups.
24. Attach instances to default security group.
25. Check ping is available between instances.
26. Check ssh is available between instances.
Expected result
###############
We should have the ability to send ICMP and TCP traffic between VMs in different tenants.
Verify that only the associated MAC and IP addresses can communicate on the logical port.
-----------------------------------------------------------------------------------------
ID
##
dvs_port_security_group
Description
###########
Verify that only the associated MAC and IP addresses can communicate on the logical port.
Complexity
##########
core
Steps
#####
1. Setup for system tests.
2. Log in to Horizon Dashboard.
3. Launch 2 instances on each of hypervisors.
4. Verify that traffic can be successfully sent from and received on the MAC and IP address associated with the logical port.
5. Configure a new IP address on the instance associated with the logical port.
6. Confirm that the instance cannot communicate with that IP address.
7. Configure a new MAC address on the instance associated with the logical port.
8. Confirm that the instance cannot communicate with that MAC address and the original IP address.
Expected result
###############
Instance should not communicate with new ip and mac addresses but it should communicate with old IP.
Check connectivity between VMs with same ip in different tenants.
-----------------------------------------------------------------
ID
##
dvs_connectivity_vm_with_same_ip_in_diff_tenants
Description
###########
Check connectivity between VMs with same ip in different tenants.
Complexity
##########
core
Steps
#####
1. Setup for system tests
2. Log in to Horizon Dashboard.
3. Create 2 non-admin tenants "test_1" and "test_2": Identity -> Projects -> Create Project. On tab Project Members add admin with admin and member.
4. In tenant "test_1" create net1 and subnet1 with CIDR 10.0.0.0/24.
5. In tenant "test_1" create security group "SG_1" and add rule that allows ingress icmp traffic.
6. In tenant "test_2" create net2 and subnet2 with CIDR 10.0.0.0/24.
7. In tenant "test_2" create security group "SG_2".
8. In tenant "test_1" add VM_1 of vcenter in net1 with ip 10.0.0.4 and "SG_1" as security group.
9. In tenant "test_1" add VM_2 of nova in net1 with ip 10.0.0.5 and "SG_1" as security group.
10. In tenant "test_2" create net1 and subnet1 with CIDR 10.0.0.0/24.
11. In tenant "test_2" create security group "SG_1" and add rule that allows ingress icmp traffic.
12. In tenant "test_2" add VM_3 of nova in net1 with ip 10.0.0.4 and "SG_1" as security group.
13. In tenant "test_2" add VM_4 of vcenter in net1 with ip 10.0.0.5 and "SG_1" as security group.
14. Verify that VMs with same ip on different tenants should communicate between each other. Send icmp ping from VM_1 to VM_3, VM_2 to VM_4 and vice versa.
Expected result
###############
Pings should get a response.
Check creation instance in the one group simultaneously.
--------------------------------------------------------
ID
##
dvs_vcenter_create_batch_instances
Description
###########
Create a batch of instances.
Complexity
##########
core
Steps
#####
1. Setup for system tests.
2. Navigate to Project -> Compute -> Instances
3. Launch few instance VM_1 simultaneously with image TestVM and flavor m1.micro in nova availability zone in default internal network.
4. Launch few instance VM_2 simultaneously with image TestVM-VMDK and flavor m1.micro in vcenter availability zone in default internal network.
5. Check connection between VMs (ping, ssh).
6. Delete all Vms from horizon simultaneously.
Expected result
###############
All instances should be created and deleted without any error.
Check that we can create volumes to an instance from different availability zones, which have different types of hypervisors.
-----------------------------------------------------------------------------------------------------------------------------
ID
##
dvs_vcenter_volume
Description
###########
Create volumes in different availability zones and attach them to appropriate instances.
Complexity
##########
core
Steps
#####
1. Install plugin on master node.
2. Create a new environment with following parameters:
* Compute: KVM/QEMU with vCenter
* Networking: Neutron with VLAN segmentation
* Storage: default
* Additional services: default
3. Add nodes with following roles:
* Controller
* Compute
* Cinder
* CinderVMware
4. Configure interfaces on nodes.
5. Configure network settings.
6. Enable and configure DVS plugin.
7. Configure VMware vCenter Settings. Add 1 vSphere clusters and configure Nova Compute instances on conrollers.
8. Verify networks.
9. Deploy cluster.
10. Create VM for each of hypervisor's type
11. Create 2 volumes each in his own availability zone.
12. Attach each volume to his instance.
Expected result
###############
Each volume should be attached to his instance.
Check abilities to create stack heat from template.
---------------------------------------------------
ID
##
dvs_vcenter_heat
Description
###########
Check abilities to stack heat from template.
Complexity
##########
core
Steps
#####
1. Create stack with heat template.
2. Check that stack was created.
Expected result
###############
Stack was successfully created.
Deploy cluster with DVS plugin, Neutron, Ceph and network template
------------------------------------------------------------------
ID
##
dvs_vcenter_net_template
Description
###########
Deploy cluster with DVS plugin, Neutron, Ceph and network template.
Complexity
##########
core
Steps
#####
1. Upload plugins to the master node.
2. Install plugin.
3. Create cluster with vcenter.
4. Set CephOSD as backend for Glance and Cinder
5. Add nodes with following roles:
controller
compute-vmware
compute-vmware
compute
3 ceph-osd
6. Upload network template.
7. Check network configuration.
8. Deploy the cluster
9. Run OSTF
Expected result
###############
Cluster should be deployed and all OSTF test cases should be passed.

View File

@ -0,0 +1,61 @@
=======
Upgrade
=======
Deploy cluster with plugin on Fuel 8.0 and upgrade to Fuel 9.0.
---------------------------------------------------------------
ID
##
dvs_vcenter_upgrade
Description
###########
Deploy cluster with plugin on Fuel 8.0 and upgrade to Fuel 9.0.
Complexity
##########
core
Steps
#####
1. Install plugin on master node.
2. Create a new environment with following parameters:
* Compute: KVM/QEMU with vCenter
* Networking: Neutron with VLAN segmentation
* Storage: default
* Additional services: default
3. Add nodes with following roles:
* Controller
* CinderVMware
* Cinder
* ComputeVMware
4. Configure interfaces on nodes.
5. Configure network settings.
6. Enable and configure DVS plugin.
7. Configure VMware vCenter Settings. Add 1 vSphere clusters and configure Nova Compute instances on compute-vmware.
8. Verify Networks.
9. Deploy cluster.
10. Run OSTF.
11. Upgrade fuel node:
* Upload upgrade script to master node in /var folder
* Untar script and run ./upgrade.sh
12. Check that all containers and version of iso were upgraded (docker ps).
13. Check that previously created environment is present.
14. Run OSTF tests again.
Expected result
###############
Cluster should be deployed and all OSTF test cases should be passed.

View File

@ -0,0 +1,275 @@
=============================================
Test Plan for VMware DVS plugin version 2.0.0
=============================================
.. contents:: Table of contents
:depth: 3
************
Introduction
************
Purpose
=======
Main purpose of this document is intended to describe Quality Assurance
activities, required to insure that Fuel plugin for Neutron ML2 vmware_dvs
driver is ready for production. The project will be able to offer VMware DVS
integration functionality with MOS.
The scope of this plan defines the following objectives:
* Identify testing activities;
* Outline testing approach, test types, test cycle that will be used;
* List of metrics and deliverable elements;
* List of items for testing and out of testing scope;
* Detect exit criteria in testing purposes;
* Describe test environment.
Scope
=====
Fuel VMware DVS plugin includes Neutron ML2 Driver For VMWare vCenter DVS
which is developed by third party. This test plan covers a full functionality
of Fuel VMware DVS plugin, include basic scenarios related with DVS driver for
Neutron.
Following test types should be provided:
* Smoke/BVT tests
* Integration tests
* System tests
* Destructive tests
* GUI tests
Performance testing will be executed on the scale lab and a custom set of
rally scenarios must be run with DVS environment. Configuration, enviroment
and scenarios for performance/scale testing should be determine separately.
Intended Audience
=================
This document is intended for project team staff (QA and Dev engineers and
managers) and all other persons who are interested in testing results.
Limitation
==========
Plugin (or its components) has the following limitations:
* VMware DVS plugin be enabled only in environments with Neutron as the networking option.
* Only VLANs are supported for tenant network separation.
* Only vSphere 5.5 & 6.0 are supported.
Product compatibility matrix
============================
.. list-table:: product compatibility matrix
:widths: 15 10 30
:header-rows: 1
* - Requirement
- Version
- Comment
* - MOS
- 8.0 with Liberty
-
* - Operating System
- Ubuntu 14.04
-
* - vSphere
- 5.5, 6.0
-
Test environment, infrastructure and tools
==========================================
Following configuration should be used in the testing:
* 1 physnet to 1 DVS switch (dvSwitch).
Other recommendation you can see in the test cases.
**************************************
Evaluation Mission and Test Motivation
**************************************
Project main goal is to build a MOS plugin that integrates a Neutron ML2
Driver For VMWare vCenter DVS. This will allow to use Neutron for networking
in vmware-related environments. The plugin must be compatible with the version
8.0 of Mirantis OpenStack and should be tested with sofware/hardware described
in `product compatibility matrix`_.
See the VMware DVS Plugin specification for more details.
Evaluation mission
==================
* Find important problems with integration of Neutron ML2 driver for DVS.
* Verify a specification.
* Provide tests for maintenance update.
* Lab environment deployment.
* Deploy MOS with developed plugin installed.
* Create and run specific tests for plugin/deployment.
* Verify a documentation.
*****************
Target Test Items
*****************
* Install/uninstall Fuel Vmware-DVS plugin
* Deploy Cluster with Fuel Vmware-DVS plugin by Fuel
* Roles of nodes
* controller
* compute
* cinder
* mongo
* compute-vmware
* cinder-vmware
* Hypervisors:
* KVM+Vcenter
* Qemu+Vcenter
* Storage:
* Ceph
* Cinder
* VMWare vCenter/ESXi datastore for images
* Network
* Neutron with Vlan segmentation
* HA + Neutron with VLAN
* Additional components
* Ceilometer
* Health Check
* Upgrade master node
* MOS and VMware-DVS plugin
* Computes(Nova)
* Launch and manage instances
* Launch instances in batch
* Networks (Neutron)
* Create and manage public and private networks.
* Create and manage routers.
* Port binding / disabling
* Port security
* Security groups
* Assign vNIC to a VM
* Connection between instances
* Heat
* Create stack from template
* Delete stack
* Keystone
* Create and manage roles
* Horizon
* Create and manage projects
* Create and manage users
* Glance
* Create and manage images
* GUI
* Fuel UI
* CLI
* Fuel CLI
*************
Test approach
*************
The project test approach consists of Smoke, Integration, System, Regression
Failover and Acceptance test levels.
**Smoke testing**
The goal of smoke testing is to ensure that the most critical features of Fuel
VMware DVS plugin work after new build delivery. Smoke tests will be used by
QA to accept software builds from Development team.
**Integration and System testing**
The goal of integration and system testing is to ensure that new or modified
components of Fuel and MOS work effectively with Fuel VMware DVS plugin
without gaps in dataflow.
**Regression testing**
The goal of regression testing is to verify that key features of Fuel VMware
DVS plugin are not affected by any changes performed during preparation to
release (includes defects fixing, new features introduction and possible
updates).
**Failover testing**
Failover and recovery testing ensures that the target-of-test can successfully
failover and recover from a variety of hardware, software, or network
malfunctions with undue loss of data or data integrity.
**Acceptance testing**
The goal of acceptance testing is to ensure that Fuel VMware DVS plugin has
reached a level of stability that meets requirements and acceptance criteria.
***********************
Entry and exit criteria
***********************
Criteria for test process starting
==================================
Before test process can be started it is needed to make some preparation
actions - to execute important preconditions. The following steps must be
executed successfully for starting test phase:
* all project requirements are reviewed and confirmed;
* implementation of testing features has finished (a new build is ready for testing);
* implementation code is stored in GIT;
* test environment is prepared with correct configuration, installed all needed software, hardware;
* test environment contains the last delivered build for testing;
* test plan is ready and confirmed internally;
* implementation of manual tests and autotests (if any) has finished.
Feature exit criteria
=====================
Testing of a feature can be finished when:
* All planned tests (prepared before) for the feature are executed; no defects are found during this run;
* All planned tests for the feature are executed; defects found during this run are verified or confirmed to be acceptable (known issues);
* The time for testing of that feature according to the project plan has run out and Project Manager confirms that no changes to the schedule are possible.
Suspension and resumption criteria
==================================
Testing of a particular feature is suspended if there is a blocking issue
which prevents tests execution. Blocking issue can be one of the following:
* Testing environment for the feature is not ready
* Testing environment is unavailable due to failure
* Feature has a blocking defect, which prevents further usage of this feature and there is no workaround available
************
Deliverables
************
List of deliverables
====================
Project testing activities are to be resulted in the following reporting documents:
* Test plan
* Test report
* Automated test cases
Acceptance criteria
===================
* All acceptance criteria for user stories are met.
* All test cases are executed. BVT tests are passed
* Critical and high issues are fixed
* All required documents are delivered
* Release notes including a report on the known errors of that release
**********
Test cases
**********
.. include:: test_suite_smoke.rst
.. include:: test_suite_system.rst
.. include:: test_suite_failover.rst
.. include:: test_suite_upgrade.rst