From 559ae97a2a26df22c7fd679e119f9e3845a6900c Mon Sep 17 00:00:00 2001 From: Ilya Bumarskov Date: Sun, 1 Nov 2015 22:21:26 +0300 Subject: [PATCH] Test Plan for NSX-v plugin 1.0.0 Change-Id: I4ed74066a473713e5dc93482438c7cfe3189fe26 --- .gitignore | 1 + .gitreview | 2 +- README.md | 6 +- doc/{source => }/Makefile | 0 doc/{source => }/conf.py | 0 doc/{source => }/image/floating-ip.png | Bin .../image/neutron-network-settings.png | Bin .../image/nsxv-settings-filled.png | Bin doc/image/public-network-assignment.png | Bin 0 -> 12581 bytes doc/{source => }/image/wizard-step1.png | Bin doc/{source => }/image/wizard-step2.png | Bin doc/{source => }/index.rst | 18 +- doc/test_plan/nsx-v_test_plan.rst | 216 ++++++ doc/test_plan/test_suite_destructive.rst | 470 +++++++++++++ doc/test_plan/test_suite_gui.rst | 36 + doc/test_plan/test_suite_integration.rst | 281 ++++++++ doc/test_plan/test_suite_smoke.rst | 156 ++++ doc/test_plan/test_suite_system.rst | 664 ++++++++++++++++++ 18 files changed, 1842 insertions(+), 8 deletions(-) rename doc/{source => }/Makefile (100%) rename doc/{source => }/conf.py (100%) rename doc/{source => }/image/floating-ip.png (100%) rename doc/{source => }/image/neutron-network-settings.png (100%) rename doc/{source => }/image/nsxv-settings-filled.png (100%) create mode 100644 doc/image/public-network-assignment.png rename doc/{source => }/image/wizard-step1.png (100%) rename doc/{source => }/image/wizard-step2.png (100%) rename doc/{source => }/index.rst (83%) create mode 100644 doc/test_plan/nsx-v_test_plan.rst create mode 100644 doc/test_plan/test_suite_destructive.rst create mode 100644 doc/test_plan/test_suite_gui.rst create mode 100644 doc/test_plan/test_suite_integration.rst create mode 100644 doc/test_plan/test_suite_smoke.rst create mode 100644 doc/test_plan/test_suite_system.rst diff --git a/.gitignore b/.gitignore index 7844abd..d0eb4b6 100644 --- a/.gitignore +++ b/.gitignore @@ -2,3 +2,4 @@ .build _build *.pyc +*.rpm diff --git a/.gitreview b/.gitreview index 8d2326c..9dd5d65 100644 --- a/.gitreview +++ b/.gitreview @@ -1,4 +1,4 @@ [gerrit] host=review.openstack.org port=29418 -project=stackforge/fuel-plugin-nsxv.git +project=openstack/fuel-plugin-nsxv.git diff --git a/README.md b/README.md index dad3cdf..4c63a48 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,9 @@ To build HTML variant of documentation you need to install sphinx document generator, easiest way to do this is to use doc/requirements.txt. $ pip install -r doc/requirements.txt - $ cd doc/source + + $ cd doc/ + $ make html -After that you can start exploring documentation in doc/source/_build/html/ directory. +After that you can start exploring documentation in doc/_build/html/ directory. diff --git a/doc/source/Makefile b/doc/Makefile similarity index 100% rename from doc/source/Makefile rename to doc/Makefile diff --git a/doc/source/conf.py b/doc/conf.py similarity index 100% rename from doc/source/conf.py rename to doc/conf.py diff --git a/doc/source/image/floating-ip.png b/doc/image/floating-ip.png similarity index 100% rename from doc/source/image/floating-ip.png rename to doc/image/floating-ip.png diff --git a/doc/source/image/neutron-network-settings.png b/doc/image/neutron-network-settings.png similarity index 100% rename from doc/source/image/neutron-network-settings.png rename to doc/image/neutron-network-settings.png diff --git a/doc/source/image/nsxv-settings-filled.png b/doc/image/nsxv-settings-filled.png similarity index 100% rename from doc/source/image/nsxv-settings-filled.png rename to doc/image/nsxv-settings-filled.png diff --git a/doc/image/public-network-assignment.png b/doc/image/public-network-assignment.png new file mode 100644 index 0000000000000000000000000000000000000000..516514f592da0713eacdd1689bd099464a5c423b GIT binary patch literal 12581 zcmb8W1#lce((_ zIf~d=Ss7b9LW$TL>pL17k-C^UnvzOL$ttM&q2fY8kwQs{3MsiRon`?&m6hj0Kn|Mr z2^ni1RDpuRNox^7rT%(&&lW=Zguu+2@dGZNBd$9|aE(DeGAd~(>9hw*ApVa}em5}F z4##eQZ-VaD!h}8Q^4^d3=iI=mj`^kMu6cpy?EK==(yyQVNk63^;G2CELWdCwlzJEF zqN(o!<}>9#1YIBFerq+tJy~T4%vRd z6VSTR%BZJy>_k9OCJM3gjpYW9B`yO@u{CStS?6$ep@BoMJ~5szrc{E@C!Fxb{uHZ{ z1U+2wL$$5_p+|cyb%4EZy-V}?TI&qII}3{?V4e7Gb8sJI@I#W{Mg9BZ&ZVll_TJ#e zI{yT;_VL)`X^`jrN}Z>6>Zpy5SM1suqF&L>fq18St6!FcJx!h8IF|6wuFVx@Gv>=E zxcwy_8?wV1+-6SFfB{_wli4*bS|z4I{#U}TaO+3ZoDL)S^S8%ejhb!tU{*9{%H{$? z?r^+G2co#6e%Rr>N}Sv3vuTSx)JS&A)T!YR&dk&vnu@IU8G>(xx(; zPiRB~3)y}RHJdwjW8koGZX3=B+_&yQ=*7$iHlWMLF1P2;?gYFz1h^A!(PqjPsrRcU z({f3MA|XIm&S3f0xMPpjggRsH^_S*^*Ps%NhCfPeSc&G-)Q%uI9(p0+f4#J zHWF~o-L?d7$B+RBhr3^Xan--GZ!V@4)NRA_(}Jjj8%@ScP(80Hn6Lgd}h(y#Q_vr)k-7o44vwJ(9vZlUKCQYA+oT#y;+^kBtU6k;C z`74@gK|Q+G3s}?a_6F%hrY@i*Z2f5RP1e+{;2HGi6fpJ>qsDx*Wy%k@t!OmcbJ4xH z#evsfZDlR3ViLC?du}>0d0m+wQUq-t*7%tozob0(@xIu3Q~t2UfR?!LOP~!$l5?Q; zf-Vz=XRTEAh_(pY912j6wyM^%Tlb!M~>v~r^pXJ6N}Vu{xrxx z%M7xqWy1X_t~;i^y;8k*3LL0i^?W`jahY`r@@2-1ArdUeJ< z=sC3FdatHpUIh8hdi^S*!GHgiYQF9oRX zsuaM?q9!3YOe!9-tlnhGsVqbj`6Vsz((~8OEoq`m?fsqt>z}eS2ZBelXX?4YX-?{p ztmpeUizu>EIp|T~H21FWZ2)BsMFLP_seB~_RDd8 zrQ_o%=?gmwYkFi0(gl$xN!3bRxV)yUHJ#oq0Mb)Shz7R?LH~srX(gkiCDnfTStpHH zMzZ7Y!63`!%kI!4p=MpcO58TW{D($Qj`H#TGaq)J$L()%uWA-ok`6S0KDtj}u7}lQ-V)TaiCK}?NgZKJEiS5&!9XgK{lbOo-J5(D zJ&G7_&%i!-%bONR%;?#^a?{de%m)N~Z^#z@+?%aG!TGuSW~Z8mLB%Ua6EVpDBB8ui zcL@caPOJ_0vZvHR?`Xtnb%mG0@T(ro%+oU`I(LP=Z?$_Yh_Ej_phNX)ki4-ZCw!n)U(fHcx9BV|H$H zAghT1r{hkww}gI1TaROBnB%p7Iwt&|IxRIi!}ChvPp9C}w;qN1l#YQ$qyURIKbg{t zuPqr&JeE7w8e=RW(L0CpGZPZadOBO{=i=7Kr3218@Em*WhEbY;v$X8#Mr!V1splgr z0sfqL*BQ&R)BPycWxaahToh3R!Q=UWp3Ux-87Mkt{FO+CZg%9yIH6|tytv|1R5~rZ!CmEp_5a<6Y@tvpDn;W06?KcM8##G8>i4F=+ zv9uam>zCZUQL`VXTJGge{U$=qC*KRV*l@hva9mE*O2^|ouBfTPt@J|mheduJp3TwJ z23k1crdQ&aT}Vmx$5`5<;_BfYoxaG?HiRd@Bl>-UbG$vN#?Q#hyRiPgMBALI1%;H9 z+#07)v1jB?jQ+!40y`)Og>zxavC|bwjOs`A^x!!VS?DI(MSr#uIXaOi6k$>i;1OEE zy}`*C*1Y5JK(7J5gD!|fyAwI4-aqIuusxNn(&H~KFe0Y^`1O#)#+HRW3@GIU9#u)srvhdcrZfnTGuqfRPj}`yYMgkUFdUULftYX?ONpoZ)Zq65U3f86n1OT){cSEzYw6X<1-&#?n(>T!q8ju-0r z18NnaFDnIPm+&V03ov#O7s{whVQzFD@Yu&F$E>~A4!FhK^kJR(2vBuC6mkUgJMVd{ ztvG!o_;5pj6)<%87?w4iuc`Lbr*daxl%T6`K>?b_M?Tl4o|4YOORp)2XWH1%7KG1 z_y5XCMjmK>S5_wIca|`GpMa5icd2B0Fx?&(pSlc>xKKTg^{W~u$!1%lXF6A8N=|); z3#>#`*~Y`u^R1xR^>T^Nt?|)!1aQ@|Bh6NDLVrcx zz!>)xg(kQ^+c6!0a>qH@4@Bwp0B=;*Gp9DNP149}N+>Q>I!sk|G)sUtoW3X60Mx{* zii%?OEXxW`v8;n`w91IT9dzf4p=UQ-UKK?UK#T> zp1baIqZ4{#S4P%eH^i0Ewk5Y!bzt_HIqR?)*B4Z}Dz8R=PLQVECEdB`AmD}0+Oj8Y z92|UYRrb|#lGNfrIT93h(E>rM4_r{Eor-<|$pi5DPAmi9=i7gUo);z;19|!Tu+I|( z;y~8I*@tdCjzW;rnFRQUmCAlKr$fC!Z1$x%HP>idz6Y22R2&y+sWmXspQe(jCOjxI z3(VEt1to*Wy-1~6|vYax!cxvmZMD#o@|yrX^_sx-P?#{BWYmp72&>1Myw>iW-ixWBTQbX`DdWiIf(>wY}-CMMRYLhO*}u{!q2tgp@HKE zcSC{Oh9Q;ugqHPPKeDizRsLme-=j}NY?-DfgqcmT6=EoL?6WE&Q5~$iKO`=4SR7|d z2|<96-L%;#7@>l~Zy_&75h(d~w}YB!cQYZ}hSr|^xL@7H@+d;0j?ok=!}M)m#6BzX zVSf37up7cr_m&vWNSSnjB+aAo@j0d-q^GCnalN{ue@*a2+L>vqtk0RvF8>R@XBa}g zK%m80Ti7=*3Qd70e+G&Q$%OI1E&>HbNp)zKg+HtHiLz1KeTuR=)^0N%5$~?VUhE#j zW$M^^v14F}R8CPaf(uae3F33_dVP0*eZCN-gXl z&ZHf}wZVIW>kG7LJDx7Lgw|DN|8%#B>w~%MoA669)g+OpJCU75EvXhS9 z%C;XKysPsW}@_skMmx~1(yXz zywoSh0TsLGymtEN{Wj*ilKp1>LhA>IC=*vS)w>g&z7ZcfpqeVvi3yXq9D!`2P!9uT zl5Mo@i6F|46W7)`U8)(^gcZ&&IK(?#=<~esPXvc-0$z_@?<1ke=WHVj%=%bk!oe+=?zh9Z6n+H3Jbe+CG1E(HBP^38^e}HDr3Xw2QQpPXjE_TY7U9LBECMI^_fK`tepydyAq`fM= z?HG^ZpH8k~cSghg$7u^QT>;|92n4UBJ}5eUoaR&QtsT&~Qwd^*<6de>XSM9V9hsu3KEXB4( zStx@qY)~|-8=wnWZU(%%t*r=vT)ZVW(kw*M9~H}x5Pp$t&Ih~Et2Oz@+?C|mQL(v^ z-t&1eXh(OARRPLKNnB6FQ+Y! zMuYNOo0=h$4Ap0A@kiY%zXsEw0T*Y_7DHj_=h7OM7> z7`;21;8+YLo4$ZNA>a*V=|1`s{6}mGrD0;Ie?gi+5FvI>dL4G7Nmn^nW6&7v2FB+I z5MR3zy7%|r9taWXN{vks!iiPP78N%DjtSY@v*{tGRM@@```p%Y5t@+CSJhh<+F0e-`>rjZo%)i2tXA z|HHjV;z> zy*gr0FM?A0?E6I!f0558LgU@|BQ@?C}PDYBn*hWzaqrY(fo%!EpJ9n z4kAD!>|oY&ae5@?T|N*_QySf(JbEkt$lxo1$*^}I zYru>O+Qmub(a4MoAO&<1ME~0N%2$(%tn8k65rIV+wRHn1Zht-lsC9YsLq*`chSc-7 z9d|lp9|e%u%~!}@g0wWLP`+@}dmbsiwKm+OEGqikJ(XmxUf}y^sRzE+TK%T$5q}>E z_CHhWao?IO7e7*?2=GGC5FroXcca#;K@L>1>HRr9B1fGr&M(EJUXR4(*%aj(4saGg zCE;VStI5yE;cR(YWYJ1kyTc_Qjh;=0G$qKqlH$Oo0+Ry)tPodBWgSy-2>~ep;)#lc z%V^Xe`gt__^%$;8%ASj`{rXO`T(hk)n_AXnZxp_yghjVGn3Ze1@bZ)lP}xNuj_u&{ zZ9BM4N}vlOT(R?~cH&__KFdhC*mM>8kmJaiI4dras=^b1^ab&`23$~VyHdLm*b|YUvC>_eCa1Yn@)a?UKN3iPG^~} zr$3!*=uer}WNG!RLte=<>gal~daBLtZP!zamyX%gTg|ip3S->m&*rGQRlLG+Vm%B- zP-y%POwjlQHX}}@m8IM6qSP@|Bd2QlqDG}YVSn0$)0`mNf5Da+cBt9^Jq9=U3KznI8(pk&MiY()fLDBUY zR<(#xFrC`KX7x3@Um0Yt5y#SGL;`t7)M{D}I1KF5%g|BY%;CDZX_23na1V9qJ`fyl zn_O)X4Y_98&`Fs6A_xHt65LbjnO3Zrz|>qFAPPKqvYVv^DJ`JD+!IcSuu@Hr{q>^} z;$7-wCzuV-r+?bO#5o%+SlW<3n%$PN{JXDl11c)K^)C36L#q|s``Z-D`F(xp4MO^$ z*5=GWjo9_(RKdkfh`Q$fT(LB9oXY3+9c?@_Y|&EOZ2o(8mVX7nmd}*B?rBi5b%Q;b zsQkC4jSE({yFHB!eW_mWFB=v{Eory+sE|tVW~DEC-R^n19{*S`CyoO18O<9zBh2j$ z;@eG8!|#_vH|wzJfK$SSa5YY*0QCuhiBWXu#+Tcd+aaGzw_&=`ANFQK%5+?v&&(y5 z-Xj?<<|X4=PXNahu%M5%6CAtRNeVOfvpv-rOGFe7v;Uoj`a1e*Dp1#o^w7ov`ih_ zeHk%m`rQHYSAu6GY8*STIxR~i3>@S1I-|HAatZ?6FeC-R+ZpnD7flJBw=rsJTXI7do*o2hnCna@I$9X(D(yq4XhxQ zd={u>K7YtfEJxQ~mEby0ayv2Mh1Z`{c{$i85~rc}R0qH;?wg@wD{o3g<~Wsh#4LnG#(oeca5#g8UovU(&As_++<{V4Uu zBiQqWHGLVb)Kc3nGyP5${({5-bKdDvyozOG-H^`(<>zAS)tB*=J=Kx>I@7k~n)}uM z*e@h}{LoSd#34EBN+- z5X(_8+(vcoxi^vv3c^6KqG>ET7p)}Zid}#aZy*5;g>rJ}FzraDCCUOPc6c&7?xEFu z%59Lv(wAgk4$H7Dka5|3xk*32!4fbst&Qig@9O^0lEWGF-F8XIy_NcQE7u9qP;~yj zekZUjo^TMmKj=f@FzFz7r?Xoy^5NzAQS-kr<3to>rTt{s8=Wl|z@5nA@0iL#7ce4S zTdy*bo`M>LH)rmb(c3$4aXqb(PGK9-Ni)I-msY z33(T)4hQqSFoxdln@$+;Vsv%zTXgs(@8d_nuLB0;Rl81o z^$pHrGe)d48S6EuaXd+LHK%}2w%IOkNuLLxRuRIna4=37%QBA_{7zcMsn5nWob6Q=X;uXS-k_R zUwGe{DOcj+GJY)~8xL$c-rnEVjMwpn{L%(!V~;!edKFb{_)>wm^|x(1xU_Ws9Gc3w z8?-8ep}4QL69tCyof(qrq`|?sm#;krt(oJ(8C=2W{Pk?*bWw~IQ)cN)2gRF4!Mypc zMm0Ah43XZJ)E_i}9lKgBxZh7#lG-n>?ETN5h?vl#x_>vsHEhsibGe6kmCrW1^omEp zS-mnAnNFEu@4rgWc}XN+2aed$$SWoyK~&0;7VAt{jE589z%psygWezX8#D+Abs=42 zLV1pW^+Gc|wqx$Kw`bksH{Fdzf?jI1x?q{>0mhR1C}981V*6dul+s&uAoT3Grz(If z%;@M-{m+{vU5|Grws+ZEM~=X0SHCR2Gms- zyY$NgnkK?!^o7Ffy! zPR`mjNGtFFT7$X&EZD}@ISW>7?xc%Z(pw~cr=cAn*thP!jT^(XCN`(8@>;}4HxXl8me0~by9Y8sCy_~~B2P)wW!TiF=P=oGm6g#3fZ zcgtVbH@jW?a3`HeZOmDz{(*wRkf$X)322CivyEtaakz%tHHu9?ft2SXa~h03kk9)I zzOOU}r-ro)KjV=;q!ZMa0^CaDV_Trs!Qs}&ySf1e@A;eYdboR1&hPvD6Ob`pNE&Y#Ch^84 z-syN*{_wDo(MN^wqM_o(gU z#2HJ|SQt@O!#&dQz1Otm=%n0M3Fec=b**-}<$S!?GYJ3vXj)%L3?vcPPGY;5_ zu@omkLVNam?rlf6IgVu41u9pGZ}W#Zu>ENy4r^dhn)oB-4u zHT8y{xvZYEF=YrB6YZ9~zosF2gfw$7z2W`qnxj#}dV7W583Qf)75-d6Aj9p9vON!u z<@GGvv;DA0sgPp{&L()&Gu7W`e6t3=QxND#&TNdu=`>+fR`*udFzo`9%i61^JS+E% zu$d7Ja&cBwOJ+eioe{^O{2dh{iLiZtKh`p1_EXkrZ!|})w@W#y2%%7+Kny!?Eo1WJ3d%e|1)r^|glfCFPvDV*4`oa=oL-^k36q)nIz8YNw)x4P;wgzRoz(^22Ovt$@7 z^4uEhL_z?>$QpiEor@i~_F7fvFY+LMNzbjs%t<

l$l|42NH+8F6S4i z_K)X0=oh=lmTEc%j;3Aj*=YB6&s78GK- zFUCAVm$6Q*k?FKZgMo1}U+@Do$H?})lY4yIs4%Uds$(C=Bu->HGR!{9EopXXYjwwW zg(%ZC+MbkSBljS;o6)|$)T5l3jp4p$5JpBX*+&-ARehb`s7{>GDF4PsIrVGHgnk#i zRmAbGMh4H`vow?ugtKStf-0$up2i%@7Qv<1nnUz;erqw6eU*lys!a9CGB`e2Lc*oP z+1iV0!h`lBsZY5I1D8n-i#Y@XH;PkVumu@_>+`yH=C%a8I)GTr5trGCvYbH9=Kg?Lx#!#YsN4rQs;z+DSO!}?kw{OxZFiBFzz%(4LiQLr=)AcC;(39 z`g_tmbCZ5ry4`Hl&h#yW!t&LUtRVk<=ctv+Vi}R+;<3rOvds`is1<^s|FOe{P-u@~ zN2LVjzT#Fa=pd_I zBpQvub`$R;YFkzsg81+~lgXLO^QWvS0-J8dxB;mc-c*5J+^mj3BHc0I_F>@ChrM15 zah$R)-q8EmduH}QZ0T{z&bLYs|{1I1<78u!&om5;Lx z4+N(>l^K0L9Q(0sWEjvnb2|xE?P<$rXajoOeSe%kQ315%UZnzod`;IcbWGOw3 zskjd9;AgDwge(~Jj#nVnv?tD#!Hq1^4uflO{9A0t$tQvOO)RWam<5FxZMkMmmQ%R7 z4A0Eg52o>Qm1Gx%nv%DU3iXE$ont&947BJT-f#ukI$Y6v`e*nNoi|vIPt;FG4r-h> zRIQ3lmwK595<>!=+SP$XADWoEp$QA%7vllhW-dCvo;&HJO|%Cpu+G$akHQ6HvVOi3 z)Nq1aMaC2JGA)-N%+svTO@HU&fSS?pvVBrI(lz_0WI7hn4d~!o-49NTS;`*B$Ze;n zdD^8zYFF6s@r5j3MbVO@AE!&WR8|NcwLXlPd8@7X0|;ciH9|#3%;p%ZDTvd{q~$(h zrZ2=T%AC|2WJn;%*;G``unAa1d(HGV&lICIHboapN?fzu11Znrvy;&AwqNf zKqUzXynJvHG_G73Lkkw%@*3vS(HqS43B&-2`pfuzgG-K!`xSL-Nf|WwrT0h&r3XFs>e{UKdOd^W~((HP-Y-&I=O`n@`tpL@BHP*Me`!%Z~fchA$2Si3JR|M z=@4)qsDXs$FxUX!K^{d--3(I{i@$K=j8Sf4>aUKLno_>yu_!utzsf6ZihnUv#!k^cI?$&m$v+#&S(7xTJBI*gM_109w z`g*^?$|k^D#W6G?m>4m0YUpken;-e1j+ ziVDiMe-jne9Yftn&MK76W~0S!DaH%^i4|^|@e-dbsARK0omDFtI7RiIwl}dex;C>B zsXGaYG#@UpaoA)}z?Z#9;t3=RA!M5pDI1+H{<(`39u3DaM=Ws;tK#%x-tgA{J*UTf zSfi3&Gw+~l@n8L5RSs5m(O-(`>1bTp%Pv~Wk|EDGBJ#Gc zaiUF#<3^enr)I6LGVq>@Q(VsVoAOo!b_;WlyE?cwGaKkZywyw@CnZ4C*5k8_h>6elwfB9w9VJ8Vzh%#bs>}^@wdVV?d4&J* z`FYl3)#*Co4j&h7w`?|BTlb2Hody^gR68vac=Jxlq$+bx0Z>}{+WN{T4rg3 zyV^)rLzNU-&ZuY2%26Hr!HV|ehM*aG5{Ehco%7;y^{dRmsPW&07OsQ(Z4RF@0n5z7 z7MRb#rS7l!=OJJl z|8Ek+64m{;0r^td``l8>NYYM^H^{% zuXjF_Y51x0Gq`~$L;dD0EKr$f%hv96D}Z#v=hoiau`mhak-d%ZcxydbZhur7a*9s2Hc ztc0P7HX_TzVubxb#?@jX{+D^#vZrI@ji*K1iB+?~LQ-hDmnA?kn#fj-w%9TRTH`iX zJVsGd%)4ZF>or>vCCwu`r(fJ)+VmncT6}Clq>wUPffLqC&;{e<&~l=@`@_OGvB|O> zbQr=K!+Pvo^OiAU&V2*))QQ^oc%FFG)PMZm#*IB+0oIYeT$}=4}6*3$(=mUvkvv0i zAekeeIi3ly95ys7sUY+91NLp^4oDbr`Qa&mkCuVwtj;dxQ#%Oo0KE3)49(X14Aj&UL!sF*8u?K+> zW?W?ELP8SV-o^NLq*Ag(GSxwfCUH2O#lOCV9ArBoXnRa7(A;q4BMiWf0 zDAm)^QQl}tunKBW1SB>rA?LE*OZ8!AW?;KQJ{lzQk^WIGm%>3O@# zk!M3J6WjUbRmd@X+I@biVQEp2#G%MIz9w7IuXBA_+LJx=vXAmocEH@SQ+)-hs=Is-6CdUKN6aSL%Iy3_0h*5)it^h|K?p|Wn>lp%e>Q}9-y za*cAP(d|k@vwTZd&zpu>o^JKIJOd`_. diff --git a/doc/test_plan/nsx-v_test_plan.rst b/doc/test_plan/nsx-v_test_plan.rst new file mode 100644 index 0000000..8e08c22 --- /dev/null +++ b/doc/test_plan/nsx-v_test_plan.rst @@ -0,0 +1,216 @@ +================================== +Test Plan for NSXv plugin v.1.0.0 +================================== + +.. contents:: Table of contents + :depth: 3 + +************ +Introduction +************ + +Purpose +======= + +Main purpose of this document is intended to describe Quality Assurance +activities, required to insure that Fuel plugin for VMware NSXv driver is +ready for production. The project will be able to offer VMware NSXv +integration functionality with MOS. The scope of this plan defines the +following objectives: + +* Identify testing activities; +* Outline testing approach, test types, test cycle that will be used; +* List of metrics and deliverable elements; +* List of items for testing and out of testing scope; +* Detect exit criteria in testing purposes; +* Describe test environment. + +Scope +===== + +Fuel NSXv plugin includes NSX plugin for Neutron which is developed by +third party. This test plan covers a full functionality of Fuel NSXv plugin, +include basic scenarios related with NSXv Neutron plugin. + +Following test types should be provided: + +* Smoke/BVT tests +* Integration tests +* System tests +* Destructive tests +* GUI tests + +Performance testing will be executed on the scale lab and a custom set of +rally scenarios must be run with NSXv environment. Configuration, enviroment +and scenarios for performance/scale testing should be determine separately. + +Intended Audience +================= + +This document is intended for project team staff (QA and Dev engineers and +managers) and all other persons who are interested in testing results. + +Limitation +========== + +Plugin (or its components) has the following limitations: + +* VMware NSXv plugin can be enabled only with Neutron tunnel segmentation. +* Enviroment with enabled VMware NSXv plugin can't contains compute nodes. +* Only VMware NSX Manager Virtual Appliance 6.1.4 or later is supported. + +Product compatibility matrix +============================ + +.. list-table:: product compatibility matrix + :widths: 15 10 30 + :header-rows: 1 + + * - Requirement + - Version + - Comment + * - MOS + - 7.0 with Kilo + - + * - Operatin System + - Ubuntu 14.0.4 + - Only Ubuntu is supported in MOS 7.0 + * - vSphere + - 5.5 and 6.0 + - + * - NSXv + - 6.1.4 and 6.2.0 + - + +************************************** +Evaluation Mission and Test Motivation +************************************** + +Project main goal is to build a MOS plugin that integrates a Neutron VMware +NSX plugin. This will allow to use Neutron for networking in vmware-related +environments. The plugin must be compatible with the version 7.0 of Mirantis +OpenStack and should be tested with sofware/hardware described in +`product compatibility matrix`_. + +See the VMware NSX Plugin specification for more details. + +Evaluation mission +================== + +* Find important problems with integration of Neutron VMware NSX plugin. +* Verify a specification. +* Provide tests for maintenance update. +* Lab environment deployment. +* Deploy MOS with developed plugin installed. +* Create and run specific tests for plugin/deployment. +* Documentation. + +************* +Test approach +************* + +The project test approach consists of Smoke, Integration, System, Regression +Failover and Acceptance test levels. + +**Smoke testing** + +The goal of smoke testing is to ensure that the most critical features of Fuel +VMware NSXv plugin work after new build delivery. Smoke tests will be used by +QA to accept software builds from Development team. + +**Integration and System testing** + +The goal of integration and system testing is to ensure that new or modified +components of Fuel and MOS work effectively with Fuel VMware NSXv plugin +without gaps in dataflow. + +**Regression testing** + +The goal of regression testing is to verify that key features of Fuel VMware +NSXv plugin are not affected by any changes performed during preparation to +release (includes defects fixing, new features introduction and possible +updates). + +**Failover testing** + +Failover and recovery testing ensures that the target-of-test can successfully +failover and recover from a variety of hardware, software, or network +malfunctions with undue loss of data or data integrity. + +**Acceptance testing** + +The goal of acceptance testing is to ensure that Fuel VMware NSXv plugin has +reached a level of stability that meets requirements and acceptance criteria. + + +*********************** +Entry and exit criteria +*********************** + +Criteria for test process starting +================================== + +Before test process can be started it is needed to make some preparation +actions - to execute important preconditions. The following steps must be +executed successfully for starting test phase: + +* all project requirements are reviewed and confirmed; +* implementation of testing features has finished (a new build is ready for testing); +* implementation code is stored in GIT; +* test environment is prepared with correct configuration, installed all needed software, hardware; +* test environment contains the last delivered build for testing; +* test plan is ready and confirmed internally; +* implementation of manual tests and autotests (if any) has finished. + +Feature exit criteria +===================== + +Testing of a feature can be finished when: + +* All planned tests (prepared before) for the feature are executed; no defects are found during this run; +* All planned tests for the feature are executed; defects found during this run are verified or confirmed to be acceptable (known issues); +* The time for testing of that feature according to the project plan has run out and Project Manager confirms that no changes to the schedule are possible. + +Suspension and resumption criteria +================================== + +Testing of a particular feature is suspended if there is a blocking issue +which prevents +tests execution. Blocking issue can be one of the following: + +* Testing environment for the feature is not ready +* Testing environment is unavailable due to failure +* Feature has a blocking defect, which prevents further usage of this feature and there is no workaround available +* CI tests fail + +************ +Deliverables +************ + +List of deliverables +==================== + +Project testing activities are to be resulted in the following reporting documents: + +* Test plan +* Test report +* Automated test cases + +Acceptance criteria +=================== + +* All acceptance criteria for user stories are met. +* All test cases are executed. BVT tests are passed +* Critical and high issues are fixed +* All required documents are delivered +* Release notes including a report on the known errors of that release + +********** +Test cases +********** + +.. include:: test_suite_smoke.rst +.. include:: test_suite_integration.rst +.. include:: test_suite_system.rst +.. include:: test_suite_destructive.rst +.. include:: test_suite_gui.rst diff --git a/doc/test_plan/test_suite_destructive.rst b/doc/test_plan/test_suite_destructive.rst new file mode 100644 index 0000000..62deb47 --- /dev/null +++ b/doc/test_plan/test_suite_destructive.rst @@ -0,0 +1,470 @@ +Destructive +=========== + +TC-101: Check abilities to bind port on NSXv to VM, disable and enable this port. +---------------------------------------------------------------------------------- + +**ID** + +nsxv_ability_to_bind_port + +**Description** +:: + + Verifies that system could manipulate with port. + +**Complexity** + +core + +**Requre to automate** + +Yes + +**Steps** +:: + + Log in to Horizon Dashboard. + Navigate to Project -> Compute -> Instances + + Launch instance VM_1 with image TestVM-VMDK and flavor m1.tiny. + + Launch instance VM_2 with image TestVM-VMDK and flavor m1.tiny. + + Verify that VMs should communicate between each other. Send icmp ping from VM _1 to VM_2 and vice versa. + Disable NSXv_port of VM_1. + Verify that VMs should not communicate between each other. Send icmp ping from VM _2 to VM_1 and vice versa. + + Enable NSXv_port of VM_1. + + Verify that VMs should communicate between each other. Send icmp ping from VM _1 to VM_2 and vice versa. + Pings should get a response + +TC-102: Verify that vmclusters should migrate after shutdown controller. +------------------------------------------------------------------------ + +**ID** + +nsxv_shutdown_controller + +**Description** +:: + + Verify that vmclusters should migrate after shutdown controller. + +**Complexity** + +core + +**Requre to automate** + +No + +**Steps** +:: + + Create a new environment using the Fuel UI Wizard: + add name of env and select release version with OS + as hypervisor type: select vcenter check box and QEMU/KVM radio button + network setup : Neutron with tunnel segmentation. + storage backends: default + additional services: all by default + + In Settings tab: + enable NSXv plugin + + Add nodes: + 3 controllers + + Setup Fuel interfaces on slaves: + eth0 - admin(PXE) + eth1 - public + eth2 - management + eth3 - VM(Fixed) ID:103 + eth4 – storage + + Networks tab: + Public network: start '172.16.0.2' end '172.16.0.126' + CIDR '172.16.0.0/24' + Gateway 172.16.0.1 + Floating ip range start '172.16.0.130' end '172.16.0.254' + Storage: CIDR '192.168.1.0/24' + Vlan tag is not set + Managment: CIDR '192.168.0.0/24' + Vlan tag is not set + Neutron L2 configuration by default + Neutron L3 configuration by default + Click button 'save settings' + Click button 'verify networks' + Fill vcenter credentials: + Availability zone: vcenter + vCenter host: '172.16.0.254' + vCenter username: + vCenter password: + + Add 2 vSphere Clusters: + vSphere Cluster: Cluster1 + Service name: vmcluster1 + Datastore regex:.* + vSphere Cluster: Cluster2 + Service name: vmcluster2 + Datastore regex: .* + + Deploy Cluster + + Run OSTF + + Shutdown controller with vmclusters. + + Check that vmclusters should migrate to another controller. Vmclusters should migrate to another controller. + +TC-103: Deploy cluster with plugin, addition and deletion of nodes. +------------------------------------------------------------------- + +**ID** + +nsxv_add_delete_nodes + +**Description** +:: + + Verify that system functionality is ok after redeploy. + +**Complexity** + +advanced + +**Requre to automate** + +No + +**Steps** +:: + + Create a new environment using the Fuel UI Wizard: + add name of env and select release version with OS + as hypervisor type: select vcenter check box and QEMU/KVM radio button + network setup : Neutron with tunnel segmentation. + storage backends: default + additional services: all by default + + In Settings tab: + enable NSXv plugin + select Vmware vcenter esxi datastore for images (glance) + + Add nodes: + 3 controllers + 2 compute-vmwares + 1 cinder-vmdk + + Interfaces on slaves should be setup this way in Fuel interface: + eth0 - admin(PXE) + eth1 - public + eth2 - management + eth3 - VM(Fixed) ID:103 + eth4 – storage + + Networks tab: + Public network: start '172.16.0.2' end '172.16.0.126' + CIDR '172.16.0.0/24' + Gateway 172.16.0.1 + Floating ip range start '172.16.0.130' end '172.16.0.254' + Storage: CIDR '192.168.1.0/24' + Vlan tag is not set + Management: CIDR '192.168.0.0/24' + Vlan tag is not set + Neutron L2 configuration by default + Neutron L3 configuration by default + + Verify networks + + Fill vcenter credentials: + Availability zone: vcenter + vCenter host: '172.16.0.254' + vCenter username: + vCenter password: + + Add 2 vSphere Clusters: + vSphere Cluster: Cluster1 + Service name: vmcluster1 + Datastore regex:.* + vSphere Cluster: Cluster2 + Service name: vmcluster2 + Datastore regex: .* + + Run OSTF + + Remove node with cinder-vmdk role. + + Add node with cinder role + + Redeploy cluster. + + Run OSTF + + Remove node with compute-vmware role + Add node with cinder-vmware role + + Redeploy cluster. + + Run OSTFCluster should be deployed and all OSTF test cases should be passed. + +TC-104: Deploy cluster with plugin and deletion one node with controller role. +------------------------------------------------------------------------------ + +**ID** + +nsxv_add_delete_controller + +**Description** +:: + + Verifies that system functionality is ok when controller has been removed. + +**Complexity** + +advanced + +**Requre to automate** + +No + +**Steps** +:: + + Create a new environment using the Fuel UI Wizard: + add name of env and select release version with OS + as hypervisor type: select vcenter check box and QEMU/KVM radio button + network setup : Neutron with tunnel segmentation. + storage backends: default + additional services: all by default + + In Settings tab: + enable NSXv plugin + select Vmware vcenter esxi datastore for images (glance) + + Add nodes: + 4 controller + 1 compute-vmware + 1 cinder-vmdk + + Interfaces on slaves should be setup this way in Fuel interface: + eth0 - admin(PXE) + eth1 - public + eth2 - management + eth3 - VM(Fixed) ID:103 + eth4 – storage + + Networks tab: + Public network: start '172.16.0.2' end '172.16.0.126' + CIDR '172.16.0.0/24' + Gateway 172.16.0.1 + Floating ip range start '172.16.0.130' end '172.16.0.254' + Storage: CIDR '192.168.1.0/24' + Vlan tag is not set + Management: CIDR '192.168.0.0/24' + Vlan tag is not set + Neutron L2 configuration by default + Neutron L3 configuration by default + + Verify networks + Fill vcenter credentials: + Availability zone: vcenter + vCenter host: '172.16.0.254' + vCenter username: + vCenter password: + + Add 2 vSphere Clusters: + vSphere Cluster: Cluster1 + Service name: vmcluster1 + Datastore regex:.* + vSphere Cluster: Cluster2 + Service name: vmcluster2 + Datastore regex: .* + + Run OSTF + Remove node with controller role. + + Redeploy cluster + + Run OSTF + Add controller + Redeploy cluster + + Run OSTFCluster should be deployed and all OSTF test cases should be passed. + +TC-105: Verify that it is not possible to uninstall of Fuel NSXv plugin with deployed environment. +--------------------------------------------------------------------------------------------------- + +**ID** + +nsxv_plugin + +**Description** +:: + + It is not possible to remove plugin while at least one environment exists. + +**Complexity** + +core + +**Requre to automate** + +Yes + +**Steps** +:: + + Copy plugin to to the Fuel master node using scp. + Install plugin + fuel plugins --install plugin-name-1.0-0.0.1-0.noarch.rpm + + Ensure that plugin is installed successfully using cli, run command 'fuel plugins'. + Connect to the Fuel web UI. + + Create a new environment using the Fuel UI Wizard: + add name of env and select release version with OS + as hypervisor type: select vcenter check box and Qemu radio button + network setup : Neutron with tunnel segmentation + storage backends: default + additional services: all by default + + Click on the Settings tab. + + In Settings tab: + enable NSXv plugin + + Add nodes: + 1 controller + + Interfaces on slaves should be setup this way in Fuel interface: + eth0 - admin(PXE) + eth1 - public + eth2 - management + eth3 - VM(Fixed) ID:103 + eth4 – storage + + Networks tab: + Public network: start '172.16.0.2' end '172.16.0.126' + CIDR '172.16.0.0/24' + Gateway 172.16.0.1 + Floating ip range start '172.16.0.130' end '172.16.0.254' + Storage: CIDR '192.168.1.0/24' + Vlan tag is not set-Management: CIDR '192.168.0.0/24' + Vlan tag is not set + Neutron L2 configuration by default + Neutron L3 configuration by default + + Verify networks. + + Fill vcenter credentials: + Availability zone: vcenter + vCenter host: '172.16.0.254' + vCenter username: + vCenter password: + + Add 2 vSphere Clusters: + vSphere Cluster: Cluster1 + Service name: vmcluster1 + Datastore regex:.* + + Deploy cluster + Run OSTF + Try to delete plugin via cli Remove plugin from master node fuel plugins --remove plugin-name==1.0.0 + Alert: "400 Client Error: Bad Request (Can't delete plugin which is enabled for some environment.)" should be displayed. + +TC-106: Check cluster functionality after reboot vcenter. +--------------------------------------------------------- + +**ID** + +nsxv_plugin + +**Description** +:: + + Verifies that system functionality is ok when vcenter has been rebooted. + +**Complexity** + +core + +**Requre to automate** + +Yes + +**Steps** +:: + + Create a new environment using the Fuel UI Wizard: + add name of env and select release version with OS + as hypervisor type: select vcenter check box and QEMU/KVM radio button + network setup : Neutron with tunnel segmentation. + storage backends: default + additional services: all by default + In Settings tab: + enable NSXv plugin + select Vmware vcenter esxi datastore for images (glance) + + Add nodes: + 3 controller + 1 computer + 1 cinder-vmware + 1 cinder + + Interfaces on slaves should be setup this way in Fuel interface: + eth0 - admin(PXE) + eth1 - public + eth2 - management + eth3 - VM(Fixed) ID:103 + eth4 – storage + + Networks tab: + Public network: start '172.16.0.2' end '172.16.0.126' + CIDR '172.16.0.0/24' + Gateway 172.16.0.1 + Floating ip range start '172.16.0.130' end '172.16.0.254' + Storage: CIDR '192.168.1.0/24' + Vlan tag is not set + Management: CIDR '192.168.0.0/24' + Vlan tag is not set + Neutron L2 configuration by default + Neutron L3 configuration by default + + Verify networks + + Fill vcenter credentials: + Availability zone: vcenter + vCenter host: '172.16.0.254' + vCenter username: + vCenter password: + + Add 2 vSphere Clusters: + vSphere Cluster: Cluster1 + Service name: vmcluster1 + Datastore regex:.* + vSphere Cluster: Cluster2 + Service name: vmcluster2 + Datastore regex: .* + + Run OSTF + + Launch instance VM_1 with image TestVM-VMDK and flavor m1.tiny. + + Launch instance VM_2 with image TestVM-VMDK and flavor m1.tiny. + + Check connection between VMs, send ping from VM_1 to VM_2 and vice verse. + Reboot vcenter + vmrun -T ws-shared -h https://localhost:443/sdk -u vmware -p VMware01 reset "[standard] vcenter/vcenter.vmx" + + Check that controller lost connection with vCenter + + Wait for vCenter + + Ensure that all instances from vCenter displayed in dashboard. + + Ensure connectivity between vcenter1's and vcenter2's VM. + Run OSTF + Cluster should be deployed and all OSTF test cases should be passed. Ping should get response. + diff --git a/doc/test_plan/test_suite_gui.rst b/doc/test_plan/test_suite_gui.rst new file mode 100644 index 0000000..50ea1dc --- /dev/null +++ b/doc/test_plan/test_suite_gui.rst @@ -0,0 +1,36 @@ +GUI Testing +=========== + +TC-131: Verify that all elements of NSXv plugin section require GUI regiments. +------------------------------------------------------------------------------- + +**ID** + +nsxv_plugin + +**Description** +:: + + Verify that all elements of NSXv plugin section require GUI regiments. + +**Complexity** + +smoke + +**Requre to automate** + +Yes + +**Steps** +:: + + Login to the Fuel web UI. + Click on the Settings tab. + + Verify that section of NSXv plugin is present on the Settings tab. + Verify that check box ‘NSXv plugin’ is disabled by default. + + Verify that user can enabled. Enable NSXv plugin by click on check box ‘NSXv plugin’. + Verify that all labels of NSXv plugin section have same font style and color. + Verify that all elements of NSXv plugin section are vertical aligned. All elements of NSXv plugin section are required GUI regiments. + diff --git a/doc/test_plan/test_suite_integration.rst b/doc/test_plan/test_suite_integration.rst new file mode 100644 index 0000000..239788c --- /dev/null +++ b/doc/test_plan/test_suite_integration.rst @@ -0,0 +1,281 @@ +Integration +=========== + +TC-031: Deploy HA cluster with Fuel NSXv plugin. +------------------------------------------------- + +**ID** + +nsxv_ha_mode + +**Description** +:: + + Installation in HA mode with 3 controllers. + +**Complexity** + +core + +**Requre to automate** + +No + +**Steps** +:: + + Create a new environment using the Fuel UI Wizard. + add name of env and select release version with OS + as hypervisor type: select vcenter check box and QEMU/KVM radio button + network setup : Neutron with tunnel segmentation. + storage backends: default + additional services: all by default + In Settings tab: + enable NSXv plugin + Add nodes: + 3 controller + Interfaces on slaves should be setup this way in Fuel interface: + eth0 - admin(PXE) + eth1 - public + eth2 - management + eth3 - VM(Fixed) ID:103 + eth4 – storage + Networks tab: + Public network: start '172.16.0.2' end '172.16.0.126' + CIDR '172.16.0.0/24' + Gateway 172.16.0.1 + Floating ip range start '172.16.0.130' end '172.16.0.254' + Storage: CIDR '192.168.1.0/24' + Vlan tag is not set-Managment: CIDR '192.168.0.0/24' + Vlan tag is not set + Neutron L2 configuration by default + Neutron L3 configuration by default + Verify networks. + Fill vcenter credentials: + Availability zone: vcenter + vCenter host: '172.16.0.254' + vCenter username: + vCenter password: + Add 2 vSphere Clusters: + vSphere Cluster: Cluster1 + Service name: vmcluster1 + Datastore regex:.* + vSphere Cluster: Cluster2 + Service name: vmcluster2 + Datastore regex: .* + Deploy cluster + Run OSTFCluster should be deployed and all OSTF test cases should be passed. + +TC-032: Deploy cluster with Fuel NSXv plugin and Ceph for Glance and Cinder. +----------------------------------------------------------------------------- + +**ID** + +nsxv_ceph_no_vcenter + +**Description** +:: + + Verifies installation of plugin with Glance and Cinder. + +**Complexity** + +core + +**Requre to automate** + +No + +**Steps** +:: + + Create a new environment using the Fuel UI Wizard. + add name of env and select release version with OS + as hypervisor type: select vcenter check box and QEMU/KVM radio button + network setup : Neutron with tunnel segmentation. + storage backends: default + additional services: all by default + In Settings tab: + enable NSXv plugin + select 'Ceph RBD for volumes' (Cinder) and 'Ceph RBD for images(Glance)' + Add nodes: + 1 controller + 1 controller + ceph-osd + 1 controller + cinder-vmware + ceph-osd + 1 cinder-vmware + ceph-osd + Interfaces on slaves should be setup this way in Fuel interface: + eth0 - admin(PXE) + eth1 - public + eth2 - management + eth3 - VM(Fixed) ID:103 + eth4 – storage + Networks tab: + Public network: start '172.16.0.2' end '172.16.0.126' + CIDR '172.16.0.0/24' + Gateway 172.16.0.1 + Floating ip range start '172.16.0.130' end '172.16.0.254' + Storage: CIDR '192.168.1.0/24' + Vlan tag is not set-Management: CIDR '192.168.0.0/24' + Vlan tag is not set + Neutron L2 configuration by default + Neutron L3 configuration by default + Verify networks. + Fill vcenter credentials: + Availability zone: vcenter + vCenter host: '172.16.0.254' + vCenter username: + vCenter password: + + Add 3 vSphere Clusters: + vSphere Cluster: Cluster1 + Service name: vmcluster1 + Datastore regex:.* + vSphere Cluster: Cluster2 + Service name: vmcluster2 + Datastore regex: .* + + Deploy cluster + Run OSTFCluster should be deployed and all OSTF test cases should be passed. + +TC-034: Deploy cluster with Fuel VMware NSXv plugin and ceilometer. +-------------------------------------------------------------------- + +**ID** + +nsxv_ceilometer + +**Description** +:: + + Installation of plugin with ceilometer. + +**Complexity** + +core + +**Requre to automate** + +No + +**Steps** +:: + + Create a new environment using the Fuel UI Wizard. + add name of env and select release version with OS + as hypervisor type: select vcenter check box and QEMU/KVM radio button + network setup : Neutron with tunnel segmentation. + storage backends: default + additional services: install ceilometer + + In Settings tab: + enable NSXv plugin + Add nodes: + 3 controller + mongo + 1 compute-vmware + + Interfaces on slaves should be setup this way in Fuel interface: + eth0 - admin(PXE) + eth1 - public + eth2 - management + eth3 - VM(Fixed) ID:103 + eth4 – storage + + Networks tab: + Public network: start '172.16.0.2' end '172.16.0.126' + CIDR '172.16.0.0/24' + Gateway 172.16.0.1 + Floating ip range start '172.16.0.130' end '172.16.0.254' + Storage: CIDR '192.168.1.0/24' + Vlan tag is not set-Management: CIDR '192.168.0.0/24' + Vlan tag is not set + Neutron L2 configuration by default + Neutron L3 configuration by default + + Verify networks. + Fill vcenter credentials: + Availability zone: vcenter + vCenter host: '172.16.0.254' + vCenter username: + vCenter password: + + Add 1 vSphere Clusters: + vSphere Cluster: Cluster1 + Service name: vmcluster1 + Datastore regex:.* + + Deploy cluster + Run OSTF. Cluster should be deployed and all OSTF test cases should be passed. + +TC-035: Deploy cluster with Fuel VMware NSXv plugin, Ceph for Cinder and VMware datastore backend for Glance. +------------------------------------------------------------------------------------------------------------- + +**ID** + +nsxv_ceph + +**Description** +:: + + Verifies installation of plugin for vcenter with Glance and Cinder. + +**Complexity** + +core + +**Requre to automate** + +No + +**Steps** +:: + + Create a new environment using the Fuel UI Wizard. + add name of env and select release version with OS + as hypervisor type: select vcenter check box and QEMU/KVM radio button + network setup : Neutron with tunnel segmentation. + storage backends: default + additional services: default + + In Settings tab: + enable NSXv plugin + select 'Ceph RBD for volumes' (Cinder) and 'Vmware Datastore for images(Glance)' + + Add nodes: + 3 controller + ceph-osd + 2 cinder-vmware + + Interfaces on slaves should be setup this way in Fuel interface: + eth0 - admin(PXE) + eth1 - public + eth2 - management + eth3 - VM(Fixed) ID:103 + eth4 – storage + + Networks tab: + Public network: start '172.16.0.2' end '172.16.0.126' + CIDR '172.16.0.0/24' + Gateway 172.16.0.1 + Floating ip range start '172.16.0.130' end '172.16.0.254' + Storage: CIDR '192.168.1.0/24' + Vlan tag is not set-Management: CIDR '192.168.0.0/24' + Vlan tag is not set + Neutron L2 configuration by default + Neutron L3 configuration by default + + Verify networks. + + Fill vcenter credentials: + Availability zone: vcenter + vCenter host: '172.16.0.254' + vCenter username: + vCenter password: + Add 2 vSphere Clusters: + vSphere Cluster: Cluster1 + Service name: vmcluster1 + Datastore regex:.* + vSphere Cluster: Cluster2 + Service name: vmcluster2 + Datastore regex: .* + Deploy cluster + Run OSTF + diff --git a/doc/test_plan/test_suite_smoke.rst b/doc/test_plan/test_suite_smoke.rst new file mode 100644 index 0000000..33f06f9 --- /dev/null +++ b/doc/test_plan/test_suite_smoke.rst @@ -0,0 +1,156 @@ +Smoke +===== + +TC-001: Verify that Fuel VMware NSXv plugin is installed. +---------------------------------------------------------- + +**ID** + +nsxv_plugin + +**Description** +:: + + Test case verifies plugin installation. + +**Complexity** + +smoke + +**Requre to automate** + +Yes + +**Steps** +:: + + Copy plugin to to the Fuel master node using scp. + Install plugin + fuel plugins --install plugin-name-1.0-0.0.1-0.noarch.rpm + Ensure that plugin is installed successfully using cli, run command 'fuel plugins list'. + Connect to the Fuel web UI. + Create a new environment using the Fuel UI Wizard: + add name of env and select release version with OS + as hypervisor type: select vcenter check box and Qemu radio button + network setup : Neutron with tunnel segmentation + storage backends: default + additional services: all by default + Click on the Settings tab and check that section of NSXv plugin is displayed with all required GUI elements. + Section of NSXv plugin is displayed with all required GUI elements. + +TC-002: Verify that Fuel VMware NSXv plugin is uninstalled. +------------------------------------------------------------- + +**ID** + +nsxv_plugin + +**Description** +:: + + Test verifies that plugin could be uninstalled. + +**Complexity** + +smoke + +**Requre to automate** + +Yes + +**Steps** +:: + + Remove plugin from master node + fuel plugins --remove plugin-name==1.0.0 + Verify that plugin is removed, run command 'fuel plugins'. + Connect to the Fuel web UI. + Create a new environment using the Fuel UI Wizard: + add name of env and select release version with OS + as hypervisor type: select vcenter check box and Qemu radio button + network setup : Neutron with tunnel segmentation. + storage backhands: default + additional services: all by default + + Click on the Settings tab and check that section of NSXv plugin is not displayed. + Section of NSXv plugin is not displayed. + +TC-003: Deploy cluster with plugin and vmware datastore backend. +---------------------------------------------------------------- + +**ID** + +nsxv_smoke + +**Description** +:: + + Test verifies installation with base configuration. + +**Complexity** + +smoke + +**Requre to automate** + +No + +**Steps** +:: + + Create a new environment using the Fuel UI Wizard. + add name of env and select release version with OS + as hypervisor type: select vcenter check box and QEMU/KVM radio button + network setup : Neutron with tunnel segmentation. + storage backends: default + additional services: all by default + In Settings tab: + enable NSXv plugin + select Vmware vcenter esxi datastore for images (glance) + Add nodes: + 1 controller + 1 compute-vmware + Interfaces on slaves should be setup this way in Fuel interface: + eth0 - admin(PXE) + eth1 - public + eth2 - management + eth3 - VM(Fixed) ID:103 + eth4 – storage + Networks tab: + Public network: start '172.16.0.2' end '172.16.0.126' + CIDR '172.16.0.0/24' + Gateway 172.16.0.1 + Floating ip range start '172.16.0.130' end '172.16.0.254' + Storage: CIDR '192.168.1.0/24' + Vlan tag is not set-Management: CIDR '192.168.0.0/24' + Vlan tag is not set + Neutron L2 configuration by default + Neutron L3 configuration by default + + Verify networks. + Fill vcenter credentials: + Availability zone: vcenter + vCenter host: '172.16.0.254' + vCenter username: + vCenter password: + + Add 2 vSphere Clusters: + vSphere Cluster: Cluster1 + Service name: vmcluster1 + Datastore regex:.* + vSphere Cluster: Cluster2 + Service name: vmcluster2 + Datastore regex: .* + + Fill Glance credentials: + vCenter host: 172.16.0.254 + vCenter username: + vCenter password: + Datacenter name: Datacenter + Datastore name: nfs + + Deploy cluster + + Run OSTF + Cluster should be deployed and all OSTF test cases should be passed. + diff --git a/doc/test_plan/test_suite_system.rst b/doc/test_plan/test_suite_system.rst new file mode 100644 index 0000000..bf6697b --- /dev/null +++ b/doc/test_plan/test_suite_system.rst @@ -0,0 +1,664 @@ +System +====== + +Setup for system tests +---------------------- + +**ID** + +TO DO + +**Description** +:: + + It is a config for all system tests. + +**Complexity** + +advanced + +**Requre to automate** + +Yes + +**Steps** +:: + + Install NSXv plugin on master node. + Launch instances from tcl.vmdk image which is included in plugin package and is available under Horizon. + Create a new environment using the Fuel UI Wizard. + add name of an env and select release version with OS + as hypervisor type: select vcenter check box and QEMU/KVM radio button + network setup : Neutron with tunnel segmentation. + storage backends: default + additional services: all by default + + In Settings tab: + enable NSXv plugin + Add nodes: + 3 controller + 1 compute-vmware + + Interfaces on slaves should be setup this way in Fuel interface: + eth0 - admin(PXE) + eth1 - public + eth2 - management + eth3 - VM(Fixed) ID:103 + eth4 – storage + + Networks tab: + Public network: start '172.16.0.2' end '172.16.0.126' + CIDR '172.16.0.0/24' + Gateway 172.16.0.1 + Floating ip range start '172.16.0.130' end '172.16.0.254' + Storage: CIDR '192.168.1.0/24' + Vlan tag is not set-Management: CIDR '192.168.0.0/24' + Vlan tag is not set + Neutron L2 configuration by default + Neutron L3 configuration by default + + Verify networks. + Add 2 vSphere Clusters: + vSphere Cluster: Cluster1 + Service name: vmcluster1 + Datastore regex:.* + vSphere Cluster: Cluster2 + Service name: vmcluster2 + Datastore regex: .* + + Deploy cluster + + Run OSTF + Cluster should be deployed and all OSTF test cases should be passed. + +TC-061: Check abilities to create and terminate networks on NSX. +---------------------------------------------------------------- + +**ID** + +nsxv_create_terminate_networks + +**Description** +:: + + Verifies that creation of network is translated to vcenter. + +**Complexity** + +core + +**Requre to automate** + +Yes + +**Steps** +:: + + Log in to Horizon Dashboard. + + Add private networks net_01 and net_02. + + Check that networks are present in the vSphere. + + Remove private network net_01. + + Check that network net_01 is not present in the vSphere. + Add private network net_01. + + Check that networks is present in the vSphere. Networks net_01 and net_02 should be added. + +TC-062: Check abilities to assign multiple vNIC to a single VM. +--------------------------------------------------------------- + +**ID** + +nsxv_assign_multiple_vnic + +**Description** +:: + + It is possible to assign multiple vNICs. + +**Complexity** + +core + +**Requre to automate** + +Yes + +**Steps** +:: + + Log in to Horizon Dashboard. + Add two private networks (net01, and net02). + Add one subnet (net01_subnet01: 192.168.101.0/24, net02_subnet01, 192.168.102.0/24) to each network. + Launch instance VM_1 with image TestVM-TCL and flavor m1.tiny in vcenter1 az. + Launch instance VM_2 with image TestVM-TCL and flavor m1.tiny vcenter2 az. + Check abilities to assign multiple vNIC net01 and net02 to VM_1 . + + Check abilities to assign multiple vNIC net01 and net02 to VM_2. + Send icmp ping from VM _1 to VM_2 and vice versa.VM_1 and VM_2 should be attached to multiple vNIC net01 and net02. Pings should get a response. + +TC-063: Check connection between VMs in one tenant. +--------------------------------------------------- + +**ID** + +TO DO + +**Description** +:: + + Checks connections between VMs inside a tenant. + +**Complexity** + +core + +**Requre to automate** + +Yes + +**Steps** +:: + + Log in to Horizon Dashboard. + + Navigate to Project -> Compute -> Instances + + Launch instance VM_1 with image TestVM-TCL and flavor m1.tiny in vcenter1 az. + + Launch instance VM_2 with image TestVM-TCL and flavor m1.tiny in vcenter2 az. + + Verify that VMs on same tenants should communicate between each other. Send icmp ping from VM _1 to VM_2 and vice versa. + Pings should get a response + +TC-064: Check connectivity between VMs attached to different networks with a router between them. +------------------------------------------------------------------------------------------------- + +**ID** + +nsxv_connectivity_between_different_networks + +**Description** +:: + + Verifies that there is a connection between networks connected through the router. + +**Complexity** + +core + +**Requre to automate** + +Yes + +**Steps** +:: + + Log in to Horizon Dashboard. + + Add two private networks (net01, and net02). + + Add one subnet (net01_subnet01: 192.168.101.0/24, net02_subnet01, 192.168.102.0/24) to each network. + + Navigate to Project -> Compute -> Instances + + Launch instances VM_1 and VM_2 in the network192.168.101.0/24 with image TestVM-TCL and flavor m1.tiny in vcenter1 az. + + Launch instances VM_3 and VM_4 in the 192.168.102.0/24 with image TestVM-TCL and flavor m1.tiny in vcenter2 az. + + Verify that VMs of same networks should communicate + between each other. Send icmp ping from VM 1 to VM2, VM 3 to VM4 and vice versa. + Verify that VMs of different networks should not communicate + between each other. Send icmp ping from VM 1 to VM3, VM_4 to VM_2 and vice versa. + Create Router_01, set gateway and add interface to external network. + Attach private networks to router. + + Verify that VMs of different networks should communicate between each other. Send icmp ping from VM 1 to VM3, VM_4 to VM_2 and vice versa. + Add new Router_02, set gateway and add interface to external network. + Detach net_02 from Router_01 and attach to Router_02 + + Verify that VMs of different networks should communicate between each other. Send icmp ping from VM 1 to VM3, VM_4 to VM_2 and vice versa + Pings should get a response. + +TC-065: Check connectivity between VMs attached on the same provider network with shared router. +------------------------------------------------------------------------------------------------ + +**ID** + +nsxv_connectivity_via_shared_router + +**Description** +:: + + Checks that it is possible to connect via shared router type. + +**Complexity** + +core + +**Requre to automate** + +Yes + +**Steps** +:: + + Add provider network via cli. + + Log in to Horizon Dashboard. + Create shared router(default type) and use it for routing between instances. + Navigate to Project -> compute -> Instances + Launch instance VM_1 in the provider network with image TestVM-TCL and flavor m1.tiny in the vcenter1 az. + + Launch instance VM_2 in the provider network with image TestVM-TCL and flavor m1.tiny in the vcenter2 az. + + Verify that VMs of same provider network should communicate + between each other. Send icmp ping from VM _1 to VM_2 and vice versa. + Pings should get a response. + +TC-066: Check connectivity between VMs attached on the same provider network with distributed router. +----------------------------------------------------------------------------------------------------- + +**ID** + +nsxv_connectivity_via_distributed_router + +**Description** +:: + + Verifies that there is possibility to connect via distributed router type. + +**Complexity** + +core + +**Requre to automate** + +Yes + +**Steps** +:: + + Add provider network via cli. + + Log in to Horizon Dashboard. + + Create distributed router and use it for routing between instances. Only available via CLI: + neutron router-create rdistributed --distributed True + + Navigate to Project -> compute -> Instances + Launch instance VM_1 in the provider network with image TestVM-TCL and flavor m1.tiny in the vcenter1 az. + + Launch instance VM_2 in the provider network with image TestVM-TCL and flavor m1.tiny in the vcenter2 az. + + Verify that VMs of same provider network should communicate + between each other. Send icmp ping from VM _1 to VM_2 and vice versa. + Pings should get a response. + +TC-067: Check connectivity between VMs attached on the same provider network with exclusive router. +--------------------------------------------------------------------------------------------------- + +**ID** + +nsxv_connectivity_via_exclusive_router + +**Description** +:: + + Verifies that there is possibility to connect via exclusive router type. + +**Complexity** + +core + +**Requre to automate** + +Yes + +**Steps** +:: + + Add provider network via cli. + + Log in to Horizon Dashboard. + + Create exclusive router and use it for routing between instances. Only available via CLI: + neutron router-create rexclusive --router_type exclusive + + Navigate to Project -> compute -> Instances + Launch instance VM_1 in the provider network with image TestVMDK-TCL and flavor m1.tiny in the vcenter1 az. + + Launch instance VM_2 in the provider network with image TestVMDK-TCL and flavor m1.tiny in the vcenter2 az. + + Verify that VMs of same provider network should communicate + between each other. Send icmp ping from VM _1 to VM_2 and vice versa. Pings should get a response. + +TC-068: Check isolation between VMs in different tenants. +--------------------------------------------------------- + +**ID** + +nsxv_different_tenants + +**Description** +:: + + Verifies isolation in different tenants. + +**Complexity** + +core + +**Requre to automate** + +Yes + +**Steps** +:: + + Log in to Horizon Dashboard. + Create non-admin tenant test_tenant. + + Navigate to Identity -> Projects. + + Click on Create Project. + Type name test_tenant. + + On tab Project Members add admin with admin and member + + Navigate to Project -> Network -> Networks + + Create network with 2 subnet + Navigate to Project -> compute -> Instances + Launch instance VM_1 + Navigate to test_tenant + + Navigate to Project -> Network -> Networks + + Create network with subnet. + Create Router, set gateway and add interface + + Navigate to Project -> compute -> Instances + + Launch instance VM_2 + + Verify that VMs on different tenants should not communicate + between each other. Send icmp ping from VM _1 of admin tenant to VM_2 of test_tenant and vice versa. Pings should not get a response. + +TC-069: Check connectivity between VMs with same ip in different tenants. +------------------------------------------------------------------------- + +**ID** + +nsxv_same_ip_different_tenants + +**Description** +:: + + Verifies connectivity with same IP in different tenants. + +**Complexity** + +advanced + +**Requre to automate** + +Yes + +**Steps** +:: + + Log in to Horizon Dashboard. + + Create 2 non-admin tenants ‘test_1’ and ‘test_2’. + Navigate to Identity -> Projects. + Click on Create Project. + + Type name ‘test_1’ of tenant. + + Click on Create Project. + + Type name ‘test_2’ of tenant. + + On tab Project Members add admin with admin and member. + + In tenant ‘test_1’ create net1 and subnet1 with CIDR 10.0.0.0/24 + In tenant ‘test_1’ create security group ‘SG_1’ and add rule that allows ingress icmp traffic + In tenant ‘test_2’ create net2 and subnet2 with CIDR 10.0.0.0/24 + In tenant ‘test_2’ create security group ‘SG_2’ + + In tenant ‘test_1’ add VM_1 of vcenter1 in net1 with ip 10.0.0.4 and ‘SG_1’ as security group. + In tenant ‘test_1’ add VM_2 of vcenter2 in net1 with ip 10.0.0.5 and ‘SG_1’ as security group. + In tenant ‘test_2’ create net1 and subnet1 with CIDR 10.0.0.0/24 + In tenant ‘test_2’ create security group ‘SG_1’ and add rule that allows ingress icmp traffic + In tenant ‘test_2’ add VM_3 of vcenter1 in net1 with ip 10.0.0.4 and ‘SG_1’ as security group. + In tenant ‘test_2’ add VM_4 of vcenter2 in net1 with ip 10.0.0.5 and ‘SG_1’ as security group. + Verify that VMs with same ip on different tenants should communicate + between each other. Send icmp ping from VM _1 to VM_3, VM_2 to Vm_4 and vice versa. Pings should get a response. + +TC-070: Check connectivity Vms to public network. +------------------------------------------------- + +**ID** + +nsxv_public_network_availability + +**Description** +:: + + Verifies that public network is available. + +**Complexity** + +core + +**Requre to automate** + +Yes + +**Steps** +:: + + Log in to Horizon Dashboard. + + Create net01: net01_subnet, 192.168.112.0/24 and attach it to the router04 + Launch instance VM_1 of vcenter1 AZ with image TestVM-TCL and flavor m1.tiny in the net_04. + Launch instance VM_1 of vcenter2 AZ with image TestVM-TCL and flavor m1.tiny in the net_01. + Send ping from instances VM_1 and VM_2 to 8.8.8.8 or other outside ip. Pings should get a response + +TC-071: Check connectivity Vms to public network with floating ip. +------------------------------------------------------------------ + +**ID** + +nsxv_floating_ip_to_public + +**Description** +:: + + Verifies that public network is available via floating ip. + +**Complexity** + +core + +**Requre to automate** + +Yes + +**Steps** +:: + + Log in to Horizon Dashboard + Create net01: net01_subnet, 192.168.112.0/24 and attach it to the router04 + Launch instance VM_1 of vcenter1 AZ with image TestVM-TCL and flavor m1.tiny in the net_04. Associate floating ip. + + Launch instance VM_1 of vcenter2 AZ with image TestVM-TCL and flavor m1.tiny in the net_01. Associate floating ip. + + Send ping from instances VM_1 and VM_2 to 8.8.8.8 or other outside ip. Pings should get a response + +TC-072: Check abilities to create and delete security group. +------------------------------------------------------------ + +**ID** + +nsxv_create_and_delete_secgroups + +**Description** +:: + + Verifies that creation and deletion security group works fine. + +**Complexity** + +advanced + +**Requre to automate** + +Yes + +**Steps** +:: + + Log in to Horizon Dashboard. + Launch instance VM_1 in the tenant network net_02 with image TestVM-TCL and flavor m1.tiny in the vcenter1 az. + Launch instance VM_2 in the tenant net_02 with image TestVM-TCL and flavor m1.tiny in the vcenter2 az. + + Create security groups SG_1 to allow ICMP traffic. + Add Ingress rule for ICMP protocol to SG_1 + + Attach SG_1 to VMs + + Check ping between VM_1 and VM_2 and vice verse + + Create security groups SG_2 to allow TCP traffic 80 port. + Add Ingress rule for TCP protocol to SG_2 + + Attach SG_2 to VMs + + ssh from VM_1 to VM_2 and vice verse + Delete all rules from SG_1 and SG_2 + + Check ping and ssh aren’t available from VM_1 to VM_2 and vice verse + Add Ingress rule for ICMP protocol to SG_1 + + Add Ingress rule for TCP protocol to SG_2 + + Check ping between VM_1 and VM_2 and vice verse + + Check ssh from VM_1 to VM_2 and vice verse + Delete security groups. + Attach Vms to default security group. + + Check ping between VM_1 and VM_2 and vice verse + Check SSH from VM_1 to VM_2 and vice verse + We should have the ability to send ICMP and TCP traffic between VMs in different tenants. + +TC-073: Verify that only the associated MAC and IP addresses can communicate on the logical port. +------------------------------------------------------------------------------------------------- + +**ID** + +nsxv_associated_addresses_communication_on_port + +**Description** +:: + + Verifies that only associated addresses can communicate on the logical port. + +**Complexity** + +core + +**Requre to automate** + +Yes + +**Steps** +:: + + Log in to Horizon Dashboard. + + Launch 2 instances. + Verify that traffic can be successfully sent from and received on the MAC and IP address associated with the logical port. + Configure a new IP address on the instance associated with the logical port. + Confirm that the instance cannot communicate with that IP address. + Configure a new MAC address on the instance associated with the logical port. + Confirm that the instance cannot communicate with that MAC address and the original IP address. + Instance should not communicate with new ip and mac addresses but it should communicate with old IP. + +TC-075: Check creation instance in the one group simultaneously. +---------------------------------------------------------------- + +**ID** + +nsxv_create_and_delete_vms + +**Description** +:: + + Verifies that system could create and delete several instances simultaneously. + +**Complexity** + +core + +**Requre to automate** + +Yes + +**Steps** +:: + + Navigate to Project -> Compute -> Instances + Launch 5 instance VM_1 simultaneously with image TestVM-TCL and flavor m1.micro in vcenter1 az in default net_04 + + All instance should be created without any error. + + Launch 5 instance VM_2 simultaneously with image TestVM-TCL and flavor m1.micro in vcenter2 az in default net_04 + + All instance should be created without any error. + + Check connection between VMs (ping, ssh) + + Delete all VMs from horizon simultaneously. + All instance should be created without any error. + +TC-076: Check that environment support assigning public network to all nodes +---------------------------------------------------------------------------- + +**ID** + +nsxv_public_network_to_all_nodes + +**Description** +:: + + Verifies that checkbox "Assign public network to all nodes" works as designed. + + Assuming default installation has been done with unchecked option "Assign public network to all nodes". + +**Complexity** + +core + +**Requre to automate** + +Yes + +**Steps** +:: + + Connect through ssh to Controller node. + Run 'ifconfig'There is an interface with ip from public network IP Range (Networks tab). + Connect through ssh to compute-vmware node. + Run 'ifconfig'There is no interface with ip from public network IP Range. + Redeploy environment with checked option Public network assignment -> Assign public network to all nodes.Option is checked after deploy. + Connect through ssh to Controller node. + Run 'ifconfig'There is an interface with ip from public network IP Range. + Connect through ssh to compute-vmware node. + Run 'ifconfig'There is an interface with ip from public network IP Range also. +