Retire master branch of nova-lxd

Drop content and replace with retirement notice.

Change-Id: I2de2eff7694d60597a6413a0a64124fbbede69bb
This commit is contained in:
James Page 2019-07-23 13:56:57 +01:00
parent 09ea20c600
commit 6603a7f323
89 changed files with 10 additions and 10986 deletions

View File

@ -1,7 +0,0 @@
[run]
branch = True
source = nova.virt.lxd
omit = nova/tests/*
[report]
ignore_errors = True

58
.gitignore vendored
View File

@ -1,58 +0,0 @@
*.py[cod]
*.idea
# C extensions
*.so
# Packages
*.egg
*.eggs
*.egg-info
dist
build
eggs
parts
bin
var
sdist
develop-eggs
.installed.cfg
lib
lib64
# Installer logs
pip-log.txt
# Unit test / coverage reports
.coverage
.tox
nosetests.xml
.stestr
.venv
.stestr
# Translations
*.mo
# Mr Developer
.mr.developer.cfg
.project
.pydevproject
# Complexity
output/*.html
output/*/index.html
# Sphinx
doc/build
# pbr generates these
AUTHORS
ChangeLog
# Editors
*~
.*.swp
.*sw?
cover

View File

@ -1,3 +0,0 @@
# Format is:
# <preferred e-mail> <other e-mail 1>
# <preferred e-mail> <other e-mail 2>

View File

@ -1,3 +0,0 @@
[DEFAULT]
test_path=./nova/tests/unit/virt/lxd
top_dir=./nova/tests/unit/virt/lxd/

View File

@ -1,30 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This job will execute 'tox -e func_lxd' from the OSA
# repo specified in 'osa_test_repo'.
- job:
name: openstack-ansible-nova-lxd
parent: openstack-ansible-cross-repo-functional
voting: false
required-projects:
- name: openstack/openstack-ansible-os_nova
vars:
tox_env: lxd
osa_test_repo: openstack/openstack-ansible-os_nova
- project:
templates:
- openstack-lower-constraints-jobs
check:
jobs:
- openstack-ansible-nova-lxd

View File

@ -1,91 +0,0 @@
Crash course in lxd setup
=========================
nova-lxd absolutely requires lxd, though its installation and configuration
is out of scope here. If you're running Ubuntu, here is the easy path
to a running lxd.
.. code-block: bash
add-apt-repository ppa:ubuntu-lxc/lxd-git-master && sudo apt-get update
apt-get -y install lxd
usermod -G lxd ${your_username|stack}
service lxd start
If you're currently logged in as the user you just added to lxd, you'll
need to log out and log back in again.
Using nova-lxd with devstack
============================
nova-lxd includes a plugin for use in devstack. If you'd like to run
devstack with nova-lxd, you'll want to add the following to `local.conf`:
.. code-block: bash
enable_plugin nova-lxd https://git.openstack.org/openstack/nova-lxd
In this case, nova-lxd will run HEAD from master. You may want to point
this at your own fork. A final argument to `enable_plugin` can be used
to specify a git revision.
Configuration and installation of devstack is beyond the scope
of this document. Here's an example `local.conf` file that will
run the very minimum you`ll need for devstack.
.. code-block: bash
[[local|localrc]]
ADMIN_PASSWORD=password
DATABASE_PASSWORD=$ADMIN_PASSWORD
RABBIT_PASSWORD=$ADMIN_PASSWORD
SERVICE_PASSWORD=$ADMIN_PASSWORD
SERVICE_TOKEN=$ADMIN_PASSWORD
disable_service cinder c-sch c-api c-vol
disable_service n-net n-novnc
disable_service horizon
disable_service ironic ir-api ir-cond
enable_service q-svc q-agt q-dhcp q-13 q-meta
# Optional, to enable tempest configuration as part of devstack
enable_service tempest
enable_plugin nova-lxd https://git.openstack.org/openstack/nova-lxd
# More often than not, stack.sh explodes trying to configure IPv6 support,
# so let's just disable it for now.
IP_VERSION=4
Once devstack is running, you'll want to add the lxd image to glance. You can
do this (as an admin) with:
.. code-block: bash
wget http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-root.tar.xz
glance image-create --name lxd --container-format bare --disk-format raw \
--visibility=public < trusty-server-cloudimg-amd64-root.tar.xz
To run the tempest tests, you can use:
.. code-block: bash
/opt/stack/tempest/run_tempest.sh -N tempest.api.compute
Errata
======
Patches should be submitted to Openstack Gerrit via `git-review`.
Bugs should be filed on Launchpad:
https://bugs.launchpad.net/nova-lxd
If you would like to contribute to the development of OpenStack,
you must follow the steps in this page:
https://docs.openstack.org/infra/manual/developers.html

View File

@ -1,4 +0,0 @@
nova-lxd Style Commandments
===============================================
Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/

175
LICENSE
View File

@ -1,175 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.

View File

@ -1,6 +0,0 @@
include AUTHORS
include ChangeLog
exclude .gitignore
exclude .gitreview
global-exclude *.pyc

View File

@ -1,42 +0,0 @@
# nova-lxd [![Build Status](https://travis-ci.org/lxc/nova-lxd.svg?branch=master)](https://travis-ci.org/lxc/nova-lxd)
An OpenStack Compute driver for managing containers using LXD.
## nova-lxd on Devstack
For development purposes, nova-lxd provides a devstack plugin. To use it, just include the
following in your devstack `local.conf`:
```
[[local|localrc]]
enable_plugin nova-lxd https://git.openstack.org/openstack/nova-lxd
# You should enable the following if you use lxd 3.0.
# In addition, this setting requires zfs >= 0.7.0.
#LXD_BACKEND_DRIVER=zfs
```
Change git repositories as needed (it's probably not very useful to point to the main
nova-lxd repo). If you have a local tree you'd like to use, you can symlink your tree to
`/opt/stack/nova-lxd` and do your development from there.
The devstack default images come cirros LXD, you can still download
Ubuntu. Once your stack is up and you've configured authentication
against your devstack, do the following::
```
wget http://cloud-images.ubuntu.com/bionic/current/bionic-server-cloudimg-amd64-root.tar.xz
glance image-create --name bionic-amd64 --disk-format raw --container-format bare --file bionic-server-cloudimg-amd64-root.tar.xz
```
# Support and discussions
We use the LXC mailing-lists for developer and user discussions, you can
find and subscribe to those at: https://lists.linuxcontainers.org
If you prefer live discussions, some of us also hang out in
[#lxcontainers](http://webchat.freenode.net/?channels=#lxcontainers) on irc.freenode.net.
## Bug reports
Bug reports can be filed at https://bugs.launchpad.net/nova-lxd

10
README.rst Normal file
View File

@ -0,0 +1,10 @@
This project is no longer maintained.
The contents of this repository are still available in the Git
source code management system. To see the contents of this
repository before it reached its end of life, please check out the
previous commit with "git checkout HEAD^1".
For any further questions, please email
openstack-discuss@lists.openstack.org or join #openstack-dev on
Freenode.

View File

@ -1 +0,0 @@
[python: **.py]

View File

@ -1,25 +0,0 @@
#!/bin/bash -xe
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This script is executed inside post_test function in devstack gate.
source $BASE/new/devstack/functions
INSTALLDIR=${INSTALLDIR:-/opt/stack}
source $INSTALLDIR/devstack/functions-common
LOGDIR=/opt/stack/logs
# Collect logs from the containers
sudo mkdir -p $LOGDIR/containers/
sudo cp -rp /var/log/lxd/* $LOGDIR/containers

View File

@ -1,28 +0,0 @@
#!/bin/bash -xe
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This script is executed inside pre_test_hook function in devstack gate.
# First argument ($1) expects boolean as value where:
# 'False' means share driver will not handle share servers
# 'True' means it will handle share servers.
# Import devstack function 'trueorfalse'
source $BASE/new/devstack/functions
# Note, due to Bug#1822182 we have to set this to default for the disk backend
# otherwise rescue tests will not work.
DEVSTACK_LOCAL_CONFIG+=$'\n'"LXD_BACKEND_DRIVER=default"
export DEVSTACK_LOCAL_CONFIG

View File

@ -1,26 +0,0 @@
{
"namespace": "OS::Nova::LXDFlavor",
"display_name": "LXD properties",
"description": "You can pass several options to the LXD container hypervisor that will affect the container's capabilities.",
"visibility": "public",
"protected": false,
"resource_type_associations": [
{
"name": "OS::Nova::Flavor"
}
],
"properties": {
"lxd:nested_allowed": {
"title": "Allow nested containers",
"description": "Allow or disallow creation of nested containers. If True, you can install and run LXD inside the VM itself and provision another level of containers.",
"type": "string",
"default": false
},
"lxd:privileged_allowed": {
"title": "Create privileged container",
"description": "Containers created as Privileged have elevated powers on the compute host. You should not set this option on containers that you don't fully trust.",
"type": "string",
"default": false
}
}
}

View File

@ -1 +0,0 @@
Run run_tempest_lxd.sh to run tempest.api.compute tests to run against nova-lxd

View File

@ -1,33 +0,0 @@
#!/bin/bash
# Construct a regex t ouse when limiting scope of tempest
# to avoid features unsupported by nova-lxd
# Note that several tests are disabled by the use of tempest
# feature toggels in devstack for an LXD config
# so this regex is not entiriely representative of
# what's excluded
# Wen adding entries to the ignored_tests, add a comment explaining
# why since this list should not grow
# Temporarily skip the image tests since they give false positivies
# for nova-lxd
ignored_tests="|^tempest.api.compute.images"
# Regressions
ignored_tests="$ignored_tests|.*AttachInterfacesTestJSON.test_create_list_show_delete_interfaces"
# backups are not supported
ignored_tests="$ignored_tests|.*ServerActionsTestJSON.test_create_backup"
# failed verfication tests
ignored_tests="$ignored_tests|.*ServersWithSpecificFlavorTestJSON.test_verify_created_server_ephemeral_disk"
ignored_tests="$ignored_tests|.*AttachVolumeShelveTestJSON.test_attach_detach_volume"
ignored_tests="$ignored_tests|.*AttachVolumeTestJSON.test_attach_detach_volume"
regex="(?!.*\\[.*\\bslow\\b.*\\]$ignored_tests)(^tempest\\.api.\\compute)";
ostestr --serial --regex $regex run

View File

@ -1,29 +0,0 @@
[[local|localrc]]
# Set the HOST_IP and FLAT_INTERFACE if automatique detection is
# unreliable
#HOST_IP=
#FLAT_INTERFACE=
DATABASE_PASSWORD=password
RABBIT_PASSWORD=password
SERVICE_PASSWORD=password
SERVICE_TOKEN=password
ADMIN_PASSWORD=password
# run the services you want to use
ENABLED_SERVICES=rabbit,mysql,key
ENABLED_SERVICES+=,g-api,g-reg
ENABLED_SERVICES+=,n-cpu,n-api,n-crt,n-obj,n-cond,n-sch,n-novnc,n-cauth,placement-api,placement-client
ENABLED_SERVICES+=,neutron,q-svc,q-agt,q-dhcp,q-meta,q-l3
ENABLED_SERVICES+=,cinder,c-sch,c-api,c-vol
ENABLED_SERVICES+=,horizon
# disabled services
disable_service n-net
# enable nova-lxd
enable_plugin nova-lxd https://git.openstack.org/openstack/nova-lxd
# You should enable the following if you use lxd 3.0.
# In addition, this setting requires zfs >= 0.7.0.
#LXD_BACKEND_DRIVER=zfs

View File

@ -1,2 +0,0 @@
# Plug-in overrides
VIRT_DRIVER=lxd

View File

@ -1,202 +0,0 @@
#!/bin/bash
# Save trace setting
MY_XTRACE=$(set +o | grep xtrace)
set +o xtrace
# Defaults
# --------
# Set up base directories
NOVA_DIR=${NOVA_DIR:-$DEST/nova}
NOVA_CONF_DIR=${NOVA_CONF_DIR:-/etc/nova}
NOVA_CONF=${NOVA_CONF:-NOVA_CONF_DIR/nova.conf}
# Configure LXD storage backends
# Note Bug:1822182 - ZFS backend is broken for Rescue's so don't use it!
LXD_BACKEND_DRIVER=${LXD_BACKEND_DRIVER:-default}
LXD_DISK_IMAGE=${DATA_DIR}/lxd.img
LXD_LOOPBACK_DISK_SIZE=${LXD_LOOPBACK_DISK_SIZE:-8G}
LXD_POOL_NAME=${LXD_POOL_NAME:-default}
# nova-lxd directories
NOVA_COMPUTE_LXD_DIR=${NOVA_COMPUTE_LXD_DIR:-${DEST}/nova-lxd}
NOVA_COMPUTE_LXD_PLUGIN_DIR=$(readlink -f $(dirname ${BASH_SOURCE[0]}))
# glance directories
GLANCE_CONF_DIR=${GLANCE_CONF_DIR:-/etc/glance}
GLANCE_API_CONF=$GLANCE_CONF_DIR/glance-api.conf
function pre_install_nova-lxd() {
# Install OS packages if necessary with "install_package ...".
echo_summary "Installing LXD"
if is_ubuntu; then
if [ "$DISTRO" == "trusty" ]; then
sudo add-apt-repository -y ppa:ubuntu-lxc/lxd-stable
fi
is_package_installed lxd || install_package lxd
add_user_to_group $STACK_USER $LXD_GROUP
needs_restart=false
is_package_installed apparmor || \
install_package apparmor && needs_restart=true
is_package_installed apparmor-profiles-extra || \
install_package apparmor-profiles-extra && needs_restart=true
is_package_installed apparmor-utils || \
install_package apparmor-utils && needs_restart=true
if $needs_restart; then
restart_service lxd
fi
fi
}
function install_nova-lxd() {
# Install the service.
setup_develop $NOVA_COMPUTE_LXD_DIR
}
function configure_nova-lxd() {
# Configure the service.
iniset $NOVA_CONF DEFAULT compute_driver lxd.LXDDriver
iniset $NOVA_CONF DEFAULT force_config_drive False
iniset $NOVA_CONF lxd pool $LXD_POOL_NAME
if is_service_enabled glance; then
iniset $GLANCE_API_CONF DEFAULT disk_formats "ami,ari,aki,vhd,raw,iso,qcow2,root-tar"
iniset $GLANCE_API_CONF DEFAULT container_formats "ami,ari,aki,bare,ovf,tgz"
fi
# Install the rootwrap
sudo install -o root -g root -m 644 $NOVA_COMPUTE_LXD_DIR/etc/nova/rootwrap.d/*.filters $NOVA_CONF_DIR/rootwrap.d
}
function init_nova-lxd() {
# Initialize and start the service.
mkdir -p $TOP_DIR/files
# Download and install the cirros lxc image
CIRROS_IMAGE_FILE=cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-lxc.tar.gz
if [ ! -f $TOP_DIR/files/$CIRROS_IMAGE_FILE ]; then
wget --progress=dot:giga \
-c http://download.cirros-cloud.net/${CIRROS_VERSION}/${CIRROS_IMAGE_FILE} \
-O $TOP_DIR/files/${CIRROS_IMAGE_FILE}
fi
openstack --os-cloud=devstack-admin \
--os-region-name="$REGION_NAME" image create "cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-lxd" \
--public --container-format bare \
--disk-format raw < $TOP_DIR/files/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-lxc.tar.gz
if is_service_enabled cinder; then
# Enable user namespace for ext4, this has only been tested on xenial+
echo Y | sudo tee /sys/module/ext4/parameters/userns_mounts
fi
}
function test_config_nova-lxd() {
# Configure tempest or other tests as required
if is_service_enabled tempest; then
TEMPEST_CONFIG=${TEMPEST_CONFIG:-$TEMPEST_DIR/etc/tempest.conf}
TEMPEST_IMAGE=`openstack image list | grep cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-lxd | awk {'print $2'}`
TEMPEST_IMAGE_ALT=$TEMPEST_IMAGE
iniset $TEMPEST_CONFIG image disk_formats "ami,ari,aki,vhd,raw,iso,root-tar"
iniset $TEMPEST_CONFIG compute volume_device_name sdb
# TODO(jamespage): Review and update
iniset $TEMPEST_CONFIG compute-feature-enabled shelve False
iniset $TEMPEST_CONFIG compute-feature-enabled resize False
iniset $TEMPEST_CONFIG compute-feature-enabled config_drive False
iniset $TEMPEST_CONFIG compute-feature-enabled attach_encrypted_volume False
iniset $TEMPEST_CONFIG compute-feature-enabled vnc_console False
iniset $TEMPEST_CONFIG compute image_ref $TEMPEST_IMAGE
iniset $TEMPEST_CONFIG compute image_ref_alt $TEMPEST_IMAGE_ALT
iniset $TEMPEST_CONFIG scenario img_file cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-lxc.tar.gz
fi
}
function configure_lxd_block() {
echo_summary "Configure LXD storage backend."
if is_ubuntu; then
if [ "$LXD_BACKEND_DRIVER" == "default" ]; then
if [ "$LXD_POOL_NAME" == "default" ]; then
echo_summary " . Configuring '${LXD_POOL_NAME}' dir backend for bionic lxd"
sudo lxd init --auto --storage-backend dir
else
echo_summary " . LXD_POOL_NAME != default, considering lxd already initialized"
fi
elif [ "$LXD_BACKEND_DRIVER" == "zfs" ]; then
pool=`lxc profile device get default root pool 2>> /dev/null || :`
if [ "$pool" != "$LXD_POOL_NAME" ]; then
echo_summary " . Configuring ZFS backend"
truncate -s $LXD_LOOPBACK_DISK_SIZE $LXD_DISK_IMAGE
# TODO(sahid): switch to use snap
sudo apt-get install -y zfsutils-linux
lxd_dev=`sudo losetup --show -f ${LXD_DISK_IMAGE}`
sudo lxd init --auto --storage-backend zfs --storage-pool $LXD_POOL_NAME \
--storage-create-device $lxd_dev
else
echo_summary " . ZFS backend already configured"
fi
fi
fi
}
function shutdown_nova-lxd() {
# Shut the service down.
:
}
function cleanup_nova-lxd() {
# Cleanup the service.
if [ "$LXD_BACKEND_DRIVER" == "zfs" ]; then
pool=`lxc profile device get default root pool 2>> /dev/null || :`
if [ "$pool" == "$LXD_POOL_NAME" ]; then
sudo lxc profile device remove default root
sudo lxc storage delete $LXD_POOL_NAME
fi
fi
}
if is_service_enabled nova-lxd; then
if [[ "$1" == "stack" && "$2" == "pre-install" ]]; then
# Set up system services
echo_summary "Configuring system services nova-lxd"
pre_install_nova-lxd
configure_lxd_block
elif [[ "$1" == "stack" && "$2" == "install" ]]; then
# Perform installation of service source
echo_summary "Installing nova-lxd"
install_nova-lxd
elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
# Configure after the other layer 1 and 2 services have been configured
echo_summary "Configuring nova-lxd"
configure_nova-lxd
elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
# Initialize and start the nova-lxd service
echo_summary "Initializing nova-lxd"
init_nova-lxd
elif [[ "$1" == "stack" && "$2" == "test-config" ]]; then
# Configure any testing configuration
echo_summary "Test configuration - nova-lxd"
test_config_nova-lxd
fi
if [[ "$1" == "unstack" ]]; then
# Shut down nova-lxd services
# no-op
shutdown_nova-lxd
fi
if [[ "$1" == "clean" ]]; then
# Remove state and transient data
# Remember clean.sh first calls unstack.sh
# no-op
cleanup_nova-lxd
fi
fi

View File

@ -1,6 +0,0 @@
# Add nova-lxd to enabled services
enable_service nova-lxd
# LXD install/upgrade settings
INSTALL_LXD=${INSTALL_LXD:-False}
LXD_GROUP=${LXD_GROUP:-lxd}

View File

@ -1,93 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# This script is executed in the OpenStack CI *tempest-dsvm-lxd job.
# It's used to configure which tempest tests actually get run. You can find
# the CI job configuration here:
#
# http://git.openstack.org/cgit/openstack-infra/project-config/tree/jenkins/jobs/devstack-gate.yaml
#
# Construct a regex to use when limiting scope of tempest
# to avoid features unsupported by Nova's LXD support.
# Note that several tests are disabled by the use of tempest
# feature toggles in devstack/lib/tempest for an lxd config,
# so this regex is not entirely representative of what's excluded.
# When adding entries to the regex, add a comment explaining why
# since this list should not grow.
r="^(?!.*"
r="$r(?:.*\[.*\bslow\b.*\])"
# (zulcss) nova-lxd does not support booting ami/aki images
r="$r|(?:tempest\.scenario\.test_minimum_basic\.TestMinimumBasicScenario\.test_minimum_basic_scenario)"
# XXX: zulcss (18 Oct 2016) nova-lxd does not support booting from ebs volumes
r="$r|(?:tempest\.scenario\.test_volume_boot_pattern.*)"
r="$r|(?:tempest\.api\.compute\.servers\.test_create_server\.ServersTestBootFromVolume)"
# XXX: zulcss (18 Oct 2016) tempest test only passes when there is more than 10 lines in the
# console output, and cirros LXD consoles have only a single line of output
r="$r|(?:tempest\.api\.compute\.servers\.test_server_actions\.ServerActionsTestJSON\.test_get_console_output_with_unlimited_size)"
# tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_get_console_output_with_unlimited_size
# also tempest get console fails for the following two for length of output reasons
r="$r|(?:tempest\.api\.compute\.servers\.test_server_actions\.ServerActionsTestJSON\.test_get_console_output)"
# tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_get_console_output
r="$r|(?:tempest\.api\.compute\.servers\.test_server_actions\.ServerActionsTestJSON\.test_get_console_output_server_id_in_shutoff_status)"
# tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_get_console_output_server_id_in_shutoff_status
# XXX: jamespage (09 June 2017) veth pair nics not detected/configured by tempest
# https://review.openstack.org/#/c/472641/
# XXX: jamespage (09 June 2017) instance not accessible via floating IP.
r="$r|(?:tempest\.scenario\.test_network_v6\.TestGettingAddress\.test_dualnet_multi_prefix_dhcpv6_stateless)"
r="$r|(?:tempest\.scenario\.test_network_v6\.TestGettingAddress\.test_dualnet_multi_prefix_slaac)"
#tempest.scenario.test_network_v6.TestGettingAddress.test_dualnet_multi_prefix_dhcpv6_stateless
#tempest.scenario.test_network_v6.TestGettingAddress.test_dualnet_multi_prefix_slaac
# XXX: zulcss (18 Oct 2016) Could not connect to instance
#r="$r|(?:tempest\.scenario\.test_network_advanced_server_ops\.TestNetworkAdvancedServerOps\.test_server_connectivity_suspend_resume)"
# XXX: jamespage (08 June 2017): test failures with a mismatch in the number of disks reported
r="$r|(?:tempest\.api\.compute\.admin\.test_create_server\.ServersWithSpecificFlavorTestJSON\.test_verify_created_server_ephemeral_disk)"
#tempest.api.compute.admin.test_create_server.ServersWithSpecificFlavorTestJSON.test_verify_created_server_ephemeral_disk
# XXX: jamespage (08 June 2017): nova-lxd driver does not support device tagging
r="$r|(?:tempest\.api\.compute\.servers\.test_device_tagging.*)"
#tempest.api.compute.servers.test_device_tagging.DeviceTaggingTestV2_42.test_device_tagging
#tempest.api.compute.servers.test_device_tagging.DeviceTaggingTestV2_42.test_device_tagging
# XXX: jamespage (08 June 2017): mismatching output on LXD instance use-case
#tempest.api.compute.volumes.test_attach_volume.AttachVolumeTestJSON.test_attach_detach_volume
#tempest.api.compute.volumes.test_attach_volume.AttachVolumeShelveTestJSON.test_attach_detach_volume
r="$r|(?:tempest\.api\.compute\.volumes\.test_attach_volume\.AttachVolumeTestJSON\.test_attach_detach_volume)"
r="$r|(?:tempest\.api\.compute\.volumes\.test_attach_volume\.AttachVolumeShelveTestJSON\.test_attach_detach_volume)"
#testtools.matchers._impl.MismatchError: u'NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT\nsda 8:0 0 1073741824 0 disk \nsdb 8:16 0 1073741824 0 disk \nvda 253:0 0 85899345920 0 disk \nvdb 253:16 0 42949672960 0 disk ' matches Contains('\nsdb ')
# XXX: jamespage (26 June 2017): disable diagnostic checks until driver implements them
# https://bugs.launchpad.net/nova-lxd/+bug/1700516
r="$r|(?:.*test_get_server_diagnostics.*)"
#test_get_server_diagnostics
# XXX: ajkavanagh (2018-07-23): disable test_show_update_rebuild_list_server as nova-lxd doesn't have the
# 'supports_trusted_certs' capability, and the test uses it.
# BUG: https://bugs.launchpad.net/nova-lxd/+bug/1783080
r="$r|(?:.*ServerShowV263Test.test_show_update_rebuild_list_server.*)"
r="$r).*$"
export DEVSTACK_GATE_TEMPEST_REGEX="$r"
# set the concurrency to 1 for devstack-gate
# See: https://bugs.launchpad.net/nova-lxd/+bug/1790943
#export TEMPEST_CONCURRENCY=1

View File

@ -1,76 +0,0 @@
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx_feature_classification.support_matrix',
#'sphinx.ext.intersphinx',
'oslosphinx'
]
# autodoc generation is a bit aggressive and a nuisance when doing heavy
# text edit cycles.
# execute "export SPHINX_DEBUG=1" in your terminal to disable
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'nova-lxd'
copyright = u'2015, Canonical Ltd'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# html_static_path = ['static']
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index',
'%s.tex' % project,
u'%s Documentation' % project,
u'OpenStack Foundation', 'manual'),
]
# Example configuration for intersphinx: refer to the Python standard library.
#intersphinx_mapping = {'http://docs.python.org/': None}

View File

@ -1,4 +0,0 @@
============
Contributing
============
.. include:: ../../CONTRIBUTING.rst

View File

@ -1,125 +0,0 @@
Nova-LXD Exclusive Machine
==========================
As LXD is a system container format, it is possible to provision "bare metal"
machines with nova-lxd without exposing the kernel and firmware to the tenant.
This is done by means of host aggregates and flavor assignment. The instance
will fill the entirety of the host, and no other instances will be assigned
to it.
This document describes the method used to achieve this exclusive machine
scheduling. It is meant to serve as an example; the names of flavors and
aggregates may be named as desired.
Prerequisites
-------------
Exclusive machine scheduling requires two scheduler filters to be enabled in
`scheduler_default_filters` in `nova.conf`, namely
`AggregateInstanceExtraSpecsFilter` and `AggregateNumInstancesFilter`.
If juju was used to install and manage the openstack environment, the following
command will enable these filters::
juju set nova-cloud-controller scheduler-default-filters="AggregateInstanceExtraSpecsFilter,AggregateNumInstancesFilter,RetryFilter,AvailabilityZoneFilter,CoreFilter,RamFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter"
Host Aggregate
--------------
Each host designed to be exclusively available to a single instance must be
added to a special host aggregate.
In this example, the following is a nova host listing::
user@openstack$ nova host-list
+------------+-----------+----------+
| host_name | service | zone |
+------------+-----------+----------+
| machine-9 | cert | internal |
| machine-9 | scheduler | internal |
| machine-9 | conductor | internal |
| machine-12 | compute | nova |
| machine-11 | compute | nova |
| machine-10 | compute | nova |
+------------+-----------+----------+
Create the host aggregate itself. In this example, the aggregate is called
"exclusive-machines"::
user@openstack$ nova aggregate-create exclusive-machines
+----+--------------------+-------------------+-------+----------+
| 1 | exclusive-machines | - | | |
+----+--------------------+-------------------+-------+----------+
Two metadata properties are then set on the host aggregate itself::
user@openstack$ nova aggregate-set-metadata 1 aggregate_instance_extra_specs:exclusive=true
Metadata has been successfully updated for aggregate 1.
+----+--------------------+-------------------+-------+-------------------------------------------------+
| Id | Name | Availability Zone | Hosts | Metadata |
+----+--------------------+-------------------+-------+-------------------------------------------------+
| 1 | exclusive-machines | - | | 'aggregate_instance_extra_specs:exclusive=true' |
+----+--------------------+-------------------+-------+-------------------------------------------------+
user@openstack$ nova aggregate-set-metadata 1 max_instances_per_host=1
Metadata has been successfully updated for aggregate 1.
+----+--------------------+-------------------+-------+-----------------------------------------------------------------------------+
| Id | Name | Availability Zone | Hosts | Metadata |
+----+--------------------+-------------------+-------+-----------------------------------------------------------------------------+
| 1 | exclusive-machines | - | | 'aggregate_instance_extra_specs:exclusive=true', 'max_instances_per_host=1' |
+----+--------------------+-------------------+-------+-----------------------------------------------------------------------------
The first aggregate metadata property is the link between the flavor (still to
be created) and the compute hosts (still to be added to the aggregate). The
second metadata property ensures that nova doesn't ever try to add another
instance to this one in (e.g. if nova is configured to overcommit resources).
Now the hosts must be added to the aggregate. Once they are added to the
host aggregate, they will not be available for other flavors. This will be
important in resource sizing efforts. To add the hosts::
user@openstack$ nova aggregate-add-host exclusive-machines machine-10
Host juju-serverstack-machine-10 has been successfully added for aggregate 1
+----+--------------------+-------------------+--------------+-----------------------------------------------------------------------------+
| Id | Name | Availability Zone | Hosts | Metadata |
+----+--------------------+-------------------+--------------+-----------------------------------------------------------------------------+
| 1 | exclusive-machines | - | 'machine-10' | 'aggregate_instance_extra_specs:exclusive=true', 'max_instances_per_host=1' |
+----+--------------------+-------------------+--------------+-----------------------------------------------------------------------------+
Exclusive machine flavors
-------------------------
When planning for exclusive machine flavors, there is still a small amount
of various resources that will be needed for nova compute and lxd itself.
In general, it's a safe bet that this can be quantified in 100MB of RAM,
though specific hosts may need to be configured more closely to their
use cases.
In this example, `machine-10` has 4096MB of total memory, 2 CPUS, and 500GB
of disk space. The flavor that is created will have a quantity of 3996MB of
RAM, 2 CPUS, and 500GB of disk.::
user@openstack$ nova flavor-create --is-public true e1.medium 100 3996 500 2
+-----+-----------+-----------+------+-----------+------+-------+-------------+-----------+
| ID | Name | Memory_MB | Disk | Ephemeral | Swap | VCPUs | RXTX_Factor | Is_Public |
+-----+-----------+-----------+------+-----------+------+-------+-------------+-----------+
| 100 | e1.medium | 3996 | 500 | 0 | | 2 | 1.0 | True |
+-----+-----------+-----------+------+-----------+------+-------+-------------+-----------+
The `e1.medium` flavor must now have some metadata set to link it with the
`exclusive-machines` host aggregate.::
user@openstack$ nova flavor-key 100 set exclusive=true
Booting an exclusive instance
-----------------------------
Once the host aggregate and flavor have been created, exclusive machines
can be provisioned by using the flavor `e1.medium`::
user@openstack$ nova boot --flavor 100 --image $IMAGE exclusive
The `exclusive` instance, once provisioned, will fill the entire host
machine.

View File

@ -1,25 +0,0 @@
.. nova-lxd documentation master file, created by
sphinx-quickstart on Tue Jul 9 22:26:36 2013.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to nova-lxd's documentation!
========================================================
Contents:
.. toctree::
:maxdepth: 2
usage
contributing
exclusive_machine
vif_wiring
support_matrix/support-matrix
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`

View File

@ -1,701 +0,0 @@
# Driver definition
[driver.nova-lxd]
title=Nova-LXD
# Functions:
[operation.attach-volume]
title=Attach block volume to instance
status=optional
notes=The attach volume operation provides a means to hotplug
additional block storage to a running instance. This allows
storage capabilities to be expanded without interruption of
service. In a cloud model it would be more typical to just
spin up a new instance with large storage, so the ability to
hotplug extra storage is for those cases where the instance
is considered to be more of a pet than cattle. Therefore
this operation is not considered to be mandatory to support.
cli=nova volume-attach <server> <volume>
driver.nova-lxd=complete
[operation.attach-tagged-volume]
title=Attach tagged block device to instance
status=optional
notes=Attach a block device with a tag to an existing server instance. See
"Device tags" for more information.
cli=nova volume-attach <server> <volume> [--tag <tag>]
driver.nova-lxd=unknown
[operation.detach-volume]
title=Detach block volume from instance
status=optional
notes=See notes for attach volume operation.
cli=nova volume-detach <server> <volume>
driver.nova-lxd=missing
[operation.extend-volume]
title=Extend block volume attached to instance
status=optional
notes=The extend volume operation provides a means to extend
the size of an attached volume. This allows volume size
to be expanded without interruption of service.
In a cloud model it would be more typical to just
spin up a new instance with large storage, so the ability to
extend the size of an attached volume is for those cases
where the instance is considered to be more of a pet than cattle.
Therefore this operation is not considered to be mandatory to support.
cli=cinder extend <volume> <new_size>
driver.nova-lxd=unknown
[operation.attach-interface]
title=Attach virtual network interface to instance
status=optional
notes=The attach interface operation provides a means to hotplug
additional interfaces to a running instance. Hotplug support
varies between guest OSes and some guests require a reboot for
new interfaces to be detected. This operation allows interface
capabilities to be expanded without interruption of service.
In a cloud model it would be more typical to just spin up a
new instance with more interfaces.
cli=nova interface-attach <server>
driver.nova-lxd=complete
[operation.attach-tagged-interface]
title=Attach tagged virtual network interface to instance
status=optional
notes=Attach a virtual network interface with a tag to an existing
server instance. See "Device tags" for more information.
cli=nova interface-attach <server> [--tag <tag>]
driver.nova-lxd=unknown
[operation.detach-interface]
title=Detach virtual network interface from instance
status=optional
notes=See notes for attach-interface operation.
cli=nova interface-detach <server> <port_id>
driver.nova-lxd=complete
[operation.maintenance-mode]
title=Set the host in a maintenance mode
status=optional
notes=This operation allows a host to be placed into maintenance
mode, automatically triggering migration of any running
instances to an alternative host and preventing new
instances from being launched. This is not considered
to be a mandatory operation to support.
The driver methods to implement are "host_maintenance_mode" and
"set_host_enabled".
cli=nova host-update <host>
driver.nova-lxd=unknown
[operation.evacuate]
title=Evacuate instances from a host
status=optional
notes=A possible failure scenario in a cloud environment is the outage
of one of the compute nodes. In such a case the instances of the down
host can be evacuated to another host. It is assumed that the old host
is unlikely ever to be powered back on, otherwise the evacuation
attempt will be rejected. When the instances get moved to the new
host, their volumes get re-attached and the locally stored data is
dropped. That happens in the same way as a rebuild.
This is not considered to be a mandatory operation to support.
cli=nova evacuate <server>;nova host-evacuate <host>
driver.nova-lxd=complete
[operation.rebuild]
title=Rebuild instance
status=optional
notes=A possible use case is additional attributes need to be set
to the instance, nova will purge all existing data from the system
and remakes the VM with given information such as 'metadata' and
'personalities'. Though this is not considered to be a mandatory
operation to support.
cli=nova rebuild <server> <image>
driver.nova-lxd=complete
[operation.get-guest-info]
title=Guest instance status
status=mandatory
notes=Provides realtime information about the power state of the guest
instance. Since the power state is used by the compute manager for
tracking changes in guests, this operation is considered mandatory to
support.
cli=
driver.nova-lxd=unknown
[operation.get-host-uptime]
title=Guest host uptime
status=optional
notes=Returns the result of host uptime since power on,
it's used to report hypervisor status.
cli=
driver.nova-lxd=unknown
[operation.get-host-ip]
title=Guest host ip
status=optional
notes=Returns the ip of this host, it's used when doing
resize and migration.
cli=
driver.nova-lxd=unknown
[operation.live-migrate]
title=Live migrate instance across hosts
status=optional
notes=Live migration provides a way to move an instance off one
compute host, to another compute host. Administrators may use
this to evacuate instances from a host that needs to undergo
maintenance tasks, though of course this may not help if the
host is already suffering a failure. In general instances are
considered cattle rather than pets, so it is expected that an
instance is liable to be killed if host maintenance is required.
It is technically challenging for some hypervisors to provide
support for the live migration operation, particularly those
built on the container based virtualization. Therefore this
operation is not considered mandatory to support.
cli=nova live-migration <server>;nova host-evacuate-live <host>
driver.nova-lxd=complete
[operation.force-live-migration-to-complete]
title=Force live migration to complete
status=optional
notes=Live migration provides a way to move a running instance to another
compute host. But it can sometimes fail to complete if an instance has
a high rate of memory or disk page access.
This operation provides the user with an option to assist the progress
of the live migration. The mechanism used to complete the live
migration depends on the underlying virtualization subsystem
capabilities. If libvirt/qemu is used and the post-copy feature is
available and enabled then the force complete operation will cause
a switch to post-copy mode. Otherwise the instance will be suspended
until the migration is completed or aborted.
cli=nova live-migration-force-complete <server> <migration>
driver.nova-lxd=unknown
[operation.launch]
title=Launch instance
status=mandatory
notes=Importing pre-existing running virtual machines on a host is
considered out of scope of the cloud paradigm. Therefore this
operation is mandatory to support in drivers.
cli=
driver.nova-lxd=unknown
[operation.pause]
title=Stop instance CPUs (pause)
status=optional
notes=Stopping an instances CPUs can be thought of as roughly
equivalent to suspend-to-RAM. The instance is still present
in memory, but execution has stopped. The problem, however,
is that there is no mechanism to inform the guest OS that
this takes place, so upon unpausing, its clocks will no
longer report correct time. For this reason hypervisor vendors
generally discourage use of this feature and some do not even
implement it. Therefore this operation is considered optional
to support in drivers.
cli=nova pause <server>
driver.nova-lxd=complete
[operation.reboot]
title=Reboot instance
status=optional
notes=It is reasonable for a guest OS administrator to trigger a
graceful reboot from inside the instance. A host initiated
graceful reboot requires guest co-operation and a non-graceful
reboot can be achieved by a combination of stop+start. Therefore
this operation is considered optional.
cli=nova reboot <server>
driver.nova-lxd=complete
[operation.rescue]
title=Rescue instance
status=optional
notes=The rescue operation starts an instance in a special
configuration whereby it is booted from an special root
disk image. The goal is to allow an administrator to
recover the state of a broken virtual machine. In general
the cloud model considers instances to be cattle, so if
an instance breaks the general expectation is that it be
thrown away and a new instance created. Therefore this
operation is considered optional to support in drivers.
cli=nova rescue <server>
driver.nova-lxd=complete
[operation.resize]
title=Resize instance
status=optional
notes=The resize operation allows the user to change a running
instance to match the size of a different flavor from the one
it was initially launched with. There are many different
flavor attributes that potentially need to be updated. In
general it is technically challenging for a hypervisor to
support the alteration of all relevant config settings for a
running instance. Therefore this operation is considered
optional to support in drivers.
cli=nova resize <server> <flavor>
driver.nova-lxd=missing
[operation.resume]
title=Restore instance
status=optional
notes=See notes for the suspend operation
cli=nova resume <server>
driver.nova-lxd=complete
[operation.set-admin-password]
title=Set instance admin password
status=optional
notes=Provides a mechanism to (re)set the password of the administrator
account inside the instance operating system. This requires that the
hypervisor has a way to communicate with the running guest operating
system. Given the wide range of operating systems in existence it is
unreasonable to expect this to be practical in the general case. The
configdrive and metadata service both provide a mechanism for setting
the administrator password at initial boot time. In the case where this
operation were not available, the administrator would simply have to
login to the guest and change the password in the normal manner, so
this is just a convenient optimization. Therefore this operation is
not considered mandatory for drivers to support.
cli=nova set-password <server>
driver.nova-lxd=unknown
[operation.snapshot]
title=Save snapshot of instance disk
status=optional
notes=The snapshot operation allows the current state of the
instance root disk to be saved and uploaded back into the
glance image repository. The instance can later be booted
again using this saved image. This is in effect making
the ephemeral instance root disk into a semi-persistent
storage, in so much as it is preserved even though the guest
is no longer running. In general though, the expectation is
that the root disks are ephemeral so the ability to take a
snapshot cannot be assumed. Therefore this operation is not
considered mandatory to support.
cli=nova image-create <server> <name>
driver.nova-lxd=complete
[operation.suspend]
title=Suspend instance
status=optional
notes=Suspending an instance can be thought of as roughly
equivalent to suspend-to-disk. The instance no longer
consumes any RAM or CPUs, with its live running state
having been preserved in a file on disk. It can later
be restored, at which point it should continue execution
where it left off. As with stopping instance CPUs, it suffers from the fact
that the guest OS will typically be left with a clock that
is no longer telling correct time. For container based
virtualization solutions, this operation is particularly
technically challenging to implement and is an area of
active research. This operation tends to make more sense
when thinking of instances as pets, rather than cattle,
since with cattle it would be simpler to just terminate
the instance instead of suspending. Therefore this operation
is considered optional to support.
cli=nova suspend <server>
driver.nova-lxd=complete
[operation.swap-volume]
title=Swap block volumes
status=optional
notes=The swap volume operation is a mechanism for changing a running
instance so that its attached volume(s) are backed by different
storage in the host. An alternative to this would be to simply
terminate the existing instance and spawn a new instance with the
new storage. In other words this operation is primarily targeted towards
the pet use case rather than cattle, however, it is required for volume
migration to work in the volume service. This is considered optional to
support.
cli=nova volume-update <server> <attachment> <volume>
driver.nova-lxd=missing
[operation.terminate]
title=Shutdown instance
status=mandatory
notes=The ability to terminate a virtual machine is required in
order for a cloud user to stop utilizing resources and thus
avoid indefinitely ongoing billing. Therefore this operation
is mandatory to support in drivers.
cli=nova delete <server>
driver.nova-lxd=complete
[operation.trigger-crash-dump]
title=Trigger crash dump
status=optional
notes=The trigger crash dump operation is a mechanism for triggering
a crash dump in an instance. The feature is typically implemented by
injecting an NMI (Non-maskable Interrupt) into the instance. It provides
a means to dump the production memory image as a dump file which is useful
for users. Therefore this operation is considered optional to support.
cli=nova trigger-crash-dump <server>
driver.nova-lxd=unknown
[operation.unpause]
title=Resume instance CPUs (unpause)
status=optional
notes=See notes for the "Stop instance CPUs" operation
cli=nova unpause <server>
driver.nova-lxd=unknown
[operation.guest.disk.autoconfig]
title=[Guest]Auto configure disk
status=optional
notes=Partition and resize FS to match the size specified by
flavors.root_gb, As this is hypervisor specific feature.
Therefore this operation is considered optional to support.
cli=
driver.nova-lxd=complete
[operation.guest.disk.rate-limit]
title=[Guest]Instance disk I/O limits
status=optional
notes=The ability to set rate limits on virtual disks allows for
greater performance isolation between instances running on the
same host storage. It is valid to delegate scheduling of I/O
operations to the hypervisor with its default settings, instead
of doing fine grained tuning. Therefore this is not considered
to be an mandatory configuration to support.
cli=nova limits
driver.nova-lxd=unknown
[operation.guest.setup.configdrive]
title=[Guest]Config drive support
status=choice(guest.setup)
notes=The config drive provides an information channel into
the guest operating system, to enable configuration of the
administrator password, file injection, registration of
SSH keys, etc. Since cloud images typically ship with all
login methods locked, a mechanism to set the administrator
password or keys is required to get login access. Alternatives
include the metadata service and disk injection. At least one
of the guest setup mechanisms is required to be supported by
drivers, in order to enable login access.
cli=
driver.nova-lxd=complete
[operation.guest.setup.inject.file]
title=[Guest]Inject files into disk image
status=optional
notes=This allows for the end user to provide data for multiple
files to be injected into the root filesystem before an instance
is booted. This requires that the compute node understand the
format of the filesystem and any partitioning scheme it might
use on the block device. This is a non-trivial problem considering
the vast number of filesystems in existence. The problem of injecting
files to a guest OS is better solved by obtaining via the metadata
service or config drive. Therefore this operation is considered
optional to support.
cli=
driver.nova-lxd=unknown
[operation.guest.setup.inject.networking]
title=[Guest]Inject guest networking config
status=optional
notes=This allows for static networking configuration (IP
address, netmask, gateway and routes) to be injected directly
into the root filesystem before an instance is booted. This
requires that the compute node understand how networking is
configured in the guest OS which is a non-trivial problem
considering the vast number of operating system types. The
problem of configuring networking is better solved by DHCP
or by obtaining static config via
config drive. Therefore this operation is considered optional
to support.
cli=
driver.nova-lxd=unknown
[operation.console.rdp]
title=[Console]Remote desktop over RDP
status=choice(console)
notes=This allows the administrator to interact with the graphical
console of the guest OS via RDP. This provides a way to see boot
up messages and login to the instance when networking configuration
has failed, thus preventing a network based login. Some operating
systems may prefer to emit messages via the serial console for
easier consumption. Therefore support for this operation is not
mandatory, however, a driver is required to support at least one
of the listed console access operations.
cli=nova get-rdp-console <server> <console-type>
driver.nova-lxd=missing
[operation.console.serial.log]
title=[Console]View serial console logs
status=choice(console)
notes=This allows the administrator to query the logs of data
emitted by the guest OS on its virtualized serial port. For
UNIX guests this typically includes all boot up messages and
so is useful for diagnosing problems when an instance fails
to successfully boot. Not all guest operating systems will be
able to emit boot information on a serial console, others may
only support graphical consoles. Therefore support for this
operation is not mandatory, however, a driver is required to
support at least one of the listed console access operations.
cli=nova console-log <server>
driver.nova-lxd=complete
[operation.console.serial.interactive]
title=[Console]Remote interactive serial console
status=choice(console)
notes=This allows the administrator to interact with the serial
console of the guest OS. This provides a way to see boot
up messages and login to the instance when networking configuration
has failed, thus preventing a network based login. Not all guest
operating systems will be able to emit boot information on a serial
console, others may only support graphical consoles. Therefore support
for this operation is not mandatory, however, a driver is required to
support at least one of the listed console access operations.
This feature was introduced in the Juno release with blueprint
https://blueprints.launchpad.net/nova/+spec/serial-ports
cli=nova get-serial-console <server>
driver.nova-lxd=unknown
[operation.console.spice]
title=[Console]Remote desktop over SPICE
status=choice(console)
notes=This allows the administrator to interact with the graphical
console of the guest OS via SPICE. This provides a way to see boot
up messages and login to the instance when networking configuration
has failed, thus preventing a network based login. Some operating
systems may prefer to emit messages via the serial console for
easier consumption. Therefore support for this operation is not
mandatory, however, a driver is required to support at least one
of the listed console access operations.
cli=nova get-spice-console <server> <console-type>
driver.nova-lxd=missing
[operation.console.vnc]
title=[Console]Remote desktop over VNC
status=choice(console)
notes=This allows the administrator to interact with the graphical
console of the guest OS via VNC. This provides a way to see boot
up messages and login to the instance when networking configuration
has failed, thus preventing a network based login. Some operating
systems may prefer to emit messages via the serial console for
easier consumption. Therefore support for this operation is not
mandatory, however, a driver is required to support at least one
of the listed console access operations.
cli=nova get-vnc-console <server> <console-type>
driver.nova-lxd=missing
[operation.storage.block]
title=[Storage]Block storage support
status=optional
notes=Block storage provides instances with direct attached
virtual disks that can be used for persistent storage of data.
As an alternative to direct attached disks, an instance may
choose to use network based persistent storage. OpenStack provides
object storage via the Swift service, or a traditional filesystem
such as NFS may be used. Some types of instances may
not require persistent storage at all, being simple transaction
processing systems reading requests & sending results to and from
the network. Therefore support for this configuration is not
considered mandatory for drivers to support.
cli=
driver.nova-lxd=partial
driver-notes.nova-lxd=Booting instances from block storages is not supported.
[operation.storage.block.backend.fibrechannel]
title=[Storage]Block storage over fibre channel
status=optional
notes=To maximise performance of the block storage, it may be desirable
to directly access fibre channel LUNs from the underlying storage
technology on the compute hosts. Since this is just a performance
optimization of the I/O path it is not considered mandatory to support.
cli=
driver.nova-lxd=unknown
[operation.storage.block.backend.iscsi]
title=[Storage]Block storage over iSCSI
status=condition(storage.block==complete)
notes=If the driver wishes to support block storage, it is common to
provide an iSCSI based backend to access the storage from cinder.
This isolates the compute layer for knowledge of the specific storage
technology used by Cinder, albeit at a potential performance cost due
to the longer I/O path involved. If the driver chooses to support
block storage, then this is considered mandatory to support, otherwise
it is considered optional.
cli=
driver.nova-lxd=complete
[operation.storage.block.backend.iscsi.auth.chap]
title=[Storage]CHAP authentication for iSCSI
status=optional
notes=If accessing the cinder iSCSI service over an untrusted LAN it
is desirable to be able to enable authentication for the iSCSI
protocol. CHAP is the commonly used authentication protocol for
iSCSI. This is not considered mandatory to support. (?)
cli=
driver.nova-lxd=unknown
[operation.storage.block.ceph]
title=[Storage]Block storage over RBD(Ceph)
status=condition(storage.block==complete)
notes=Ceph is an open source software storage platform based upon RADOS.
Instances can access to the ceph storage cluster by ceph's RBD(RADOS
Block Device).
cli=
driver.nova-lxd=complete
[operation.storage.image]
title=[Storage]Image storage support
status=mandatory
notes=This refers to the ability to boot an instance from an image
stored in the glance image repository. Without this feature it
would not be possible to bootstrap from a clean environment, since
there would be no way to get block volumes populated and reliance
on external PXE servers is out of scope. Therefore this is considered
a mandatory storage feature to support.
cli=nova boot --image <image> <name>
driver.nova-lxd=complete
[operation.networking.firewallrules]
title=[Networking]Network firewall rules
status=optional
notes=Unclear how this is different from security groups
cli=
driver.nova-lxd=complete
[operation.networking.routing]
title=Network routing
status=optional
notes=Unclear what this refers to
cli=
driver.nova-lxd=complete
[operation.networking.securitygroups]
title=[Networking]Network security groups
status=optional
notes=The security groups feature provides a way to define rules
to isolate the network traffic of different instances running
on a compute host. This would prevent actions such as MAC and
IP address spoofing, or the ability to setup rogue DHCP servers.
In a private cloud environment this may be considered to be a
superfluous requirement. Therefore this is considered to be an
optional configuration to support.
cli=
driver.nova-lxd=complete
[operation.networking.topology.flat]
title=[Networking]Flat networking
status=choice(networking.topology)
notes=Provide network connectivity to guests using a
flat topology across all compute nodes. At least one
of the networking configurations is mandatory to
support in the drivers.
cli=
driver.nova-lxd=complete
[operation.networking.topology.vlan]
title=[Networking]VLAN networking
status=choice(networking.topology)
notes=Provide network connectivity to guests using VLANs to define the
topology. At least one of the networking configurations is mandatory
to support in the drivers.
cli=
driver.nova-lxd=complete
[operation.networking.topology.vxlan]
title=[Networking]VXLAN networking
status=choice(networking.topology)
notes=Provide network connectivity to guests using VXLANs to define the
topology. At least one of the networking configurations is mandatory
to support in the drivers.
cli=
driver.nova-lxd=complete
[operation.uefi-boot]
title=uefi boot
status=optional
notes=This allows users to boot a guest with uefi firmware.
cli=
driver.nova-lxd=unknown
[operation.device-tags]
title=Device tags
status=optional
notes=This allows users to set tags on virtual devices when creating a
server instance. Device tags are used to identify virtual device
metadata, as exposed in the metadata API and on the config drive.
For example, a network interface tagged with "nic1" will appear in
the metadata along with its bus (ex: PCI), bus address
(ex: 0000:00:02.0), MAC address, and tag (nic1). If multiple networks
are defined, the order in which they appear in the guest operating
system will not necessarily reflect the order in which they are given
in the server boot request. Guests should therefore not depend on
device order to deduce any information about their network devices.
Instead, device role tags should be used. Device tags can be
applied to virtual network interfaces and block devices.
cli=nova boot
driver.nova-lxd=unknown
[operation.quiesce]
title=quiesce
status=optional
notes=Quiesce the specified instance to prepare for snapshots.
For libvirt, guest filesystems will be frozen through qemu
agent.
cli=
driver.nova-lxd=unknown
[operation.unquiesce]
title=unquiesce
status=optional
notes=See notes for the quiesce operation
cli=
driver.nova-lxd=unknown
[operation.multiattach-volume]
title=Attach block volume to multiple instances
status=optional
notes=The multiattach volume operation is an extension to
the attach volume operation. It allows to attach a
single volume to multiple instances. This operation is
not considered to be mandatory to support.
Note that for the libvirt driver, this is only supported
if qemu<2.10 or libvirt>=3.10.
cli=nova volume-attach <server> <volume>
driver.nova-lxd=unknown
[operation.encrypted-volume]
title=Attach encrypted block volume to server
status=optional
notes=This is the same as the attach volume operation
except with an encrypted block device. Encrypted
volumes are controlled via admin-configured volume
types in the block storage service. Since attach
volume is optional this feature is also optional for
compute drivers to support.
cli=nova volume-attach <server> <volume>
driver.nova-lxd=unknown
[operation.trusted-certs]
title=Validate image with trusted certificates
status=optional
notes=Since trusted image certification validation is configurable
by the cloud deployer it is considered optional. However, it is
a virt-agnostic feature so there is no good reason that all virt
drivers cannot support the feature since it is mostly just plumbing
user requests through the virt driver when downloading images.
cli=nova boot --trusted-image-certificate-id ...
driver.nova-lxd=unknown
[operation.file-backed-memory]
title=File backed memory
status=optional
notes=The file backed memory feature in Openstack allows a Nova node to serve
guest memory from a file backing store. This mechanism uses the libvirt
file memory source, causing guest instance memory to be allocated as files
within the libvirt memory backing directory. This is only supported if
qemu>2.6 and libivrt>4.0.0
cli=
driver.nova-lxd=unknown
[operation.report-cpu-traits]
title=Report CPU traits
status=optional
notes=The report CPU traits feature in OpenStack allows a Nova node to report
its CPU traits according to CPU mode configuration. This gives users the ability
to boot instances based on desired CPU traits.
cli=
driver.nova-lxd=unknown

View File

@ -1,16 +0,0 @@
===============================
Nova-lxd Feature Support Matrix
===============================
The following support matrix reflects the nova-lxd that is currently available
or is available at the time of release.
.. Note::
Notes for each operation of this matrix were basically quoted from
`support-matrix of Nova <https://docs.openstack.org/nova/latest/user/support-matrix.html>`_.
.. _driver_support_matrix:
.. support_matrix:: support-matrix.ini

View File

@ -1,7 +0,0 @@
========
Usage
========
To use nova-lxd in a project::
import nova.virt.lxd

View File

@ -1,59 +0,0 @@
Nova-LXD VIF Design Notes
=========================
VIF plugging workflow
---------------------
Nova-LXD makes use of the os-vif interface plugging library to wire LXD
instances into underlying Neutron networking; however there are some
subtle differences between the Nova-Libvirt driver and the Nova-LXD driver
in terms of how the last mile wiring is done to the instances.
In the Nova-Libvirt driver, Libvirt is used to start the instance in a
paused state, which creates the required tap device and any required wiring
to bridges created in previous os-vif plugging events.
The concept of 'start-and-pause' does not exist in LXD, so the driver
creates a veth pair instead, allowing the last mile wiring to be created
in advance of the actual LXD container being created.
This allows Neutron to complete the underlying VIF plugging at which
point it will notify Nova and the Nova-LXD driver will create the LXD
container and wire the pre-created veth pair into its profile.
tap/tin veth pairs
------------------
The veth pair created to wire the LXD instance into the underlying Neutron
networking uses the tap and tin prefixes; the tap named device is present
on the host OS, allowing iptables based firewall rules to be applied as
they are for other virt drivers, and the tin named device is passed to
LXD as part of the container profile. LXD will rename this device
internally within the container to an ethNN style name.
The LXD profile devices for network interfaces are created as 'physical'
rather than 'bridged' network devices as the driver handles creation of
the veth pair, rather than LXD (as would happen with a bridged device).
LXD profile interface naming
----------------------------
The name of the interfaces in each containers LXD profile maps to the
devname provided by Neutron as part of VIF plugging - this will typically
be of the format tapXXXXXXX. This allows for easier identification of
the interface during detachment events later in instance lifecycle.
Prior versions of the nova-lxd driver did not take this approach; interface
naming was not consistent depending on when the interface was attached. The
legacy code used to detach interfaces based on MAC address is used as a
fallback in the event that the new style device name is not found, supporting
upgraders from previous versions of the driver.
Supported Interface Types
-------------------------
The Nova-LXD driver has been validated with:
- OpenvSwitch (ovs) hybrid bridge ports.
- OpenvSwitch (ovs) standard ports.
- Linuxbridge (bridge) ports

View File

@ -1,11 +0,0 @@
# nova-rootwrap filters for compute nodes running nova-lxd
# This file should be owned by (and only-writable by) the root user
[Filters]
zfs: CommandFilter, zfs, root
zpool: CommandFilter, zpool, root
btrfs: CommandFilter, btrfs, root
chown: CommandFilter, chown, root
chmod: CommandFilter, chmod, root
mount: CommandFilter, mount, root
umount: CommandFilter, umount, root

View File

@ -1,112 +0,0 @@
alabaster==0.7.10
amqp==2.2.2
appdirs==1.4.3
asn1crypto==0.24.0
Babel==2.5.3
cachetools==2.0.1
certifi==2018.1.18
cffi==1.11.5
chardet==3.0.4
cliff==2.11.0
cmd2==0.8.1
contextlib2==0.5.5
coverage==4.5.1
cryptography==2.1.4
ddt==1.1.2
debtcollector==1.19.0
docutils==0.14
enum-compat==0.0.2
eventlet==0.20.0
extras==1.0.0
fasteners==0.14.1
fixtures==3.0.0
flake8==2.5.5
future==0.16.0
futurist==1.6.0
greenlet==0.4.13
hacking==0.12.0
idna==2.6
imagesize==1.0.0
iso8601==0.1.12
Jinja2==2.10
keystoneauth1==3.4.0
kombu==4.1.0
linecache2==1.0.0
MarkupSafe==1.0
mccabe==0.2.1
mock==2.0.0
monotonic==1.4
mox3==0.25.0
msgpack==0.5.6
netaddr==0.7.19
netifaces==0.10.6
nose==1.3.7
nosexcover==1.0.11
os-brick==2.3.0
os-client-config==1.29.0
os-testr==1.0.0
os-vif==1.9.0
os-win==4.0.0
oslo.concurrency==3.26.0
oslo.config==5.2.0
oslo.context==2.20.0
oslo.i18n==3.20.0
oslo.log==3.37.0
oslo.messaging==5.36.0
oslo.middleware==3.35.0
oslo.privsep==1.28.0
oslo.serialization==2.25.0
oslo.service==1.30.0
oslo.utils==3.36.0
oslo.versionedobjects==1.32.0
oslosphinx==4.18.0
oslotest==3.3.0
Paste==2.0.3
PasteDeploy==1.5.2
pbr==3.1.1
pep8==1.5.7
pika==0.10.0
pika-pool==0.1.3
prettytable==0.7.2
pycparser==2.18
pyflakes==0.8.1
Pygments==2.2.0
pyinotify==0.9.6
pylxd==2.2.6
pyparsing==2.2.0
pyperclip==1.6.0
pyroute2==0.4.21
python-dateutil==2.7.0
python-mimeparse==1.6.0
python-subunit==1.2.0
pytz==2018.3
PyYAML==3.12
repoze.lru==0.7
requests==2.18.4
requests-toolbelt==0.8.0
requests-unixsocket==0.1.5
requestsexceptions==1.4.0
retrying==1.3.3
rfc3986==1.1.0
Routes==2.4.1
six==1.11.0
snowballstemmer==1.2.1
Sphinx==1.6.5
sphinx-feature-classification==0.1.0
sphinxcontrib-websupport==1.0.1
statsd==3.2.2
stestr==1.0.0
stevedore==1.28.0
tenacity==4.9.0
testrepository==0.0.20
testscenarios==0.5.0
testtools==2.3.0
traceback2==1.4.0
unittest2==1.1.0
urllib3==1.22
vine==1.1.4
voluptuous==0.11.1
WebOb==1.7.4
wrapt==1.10.11
ws4py==0.5.1
wsgi-intercept==1.6.0

View File

@ -1 +0,0 @@
__import__('pkg_resources').declare_namespace(__name__)

View File

@ -1,397 +0,0 @@
# Copyright (c) 2015 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
def fake_standard_return():
return {
"type": "sync",
"status": "Success",
"status_code": 200,
"metadata": {}
}
def fake_host():
return {
"type": "sync",
"status": "Success",
"status_code": 200,
"metadata": {
"api_compat": 1,
"auth": "trusted",
"config": {},
"environment": {
"backing_fs": "ext4",
"driver": "lxc",
"kernel_version": "3.19.0-22-generic",
"lxc_version": "1.1.2",
"lxd_version": "0.12"
}
}
}
def fake_image_list_empty():
return {
"type": "sync",
"status": "Success",
"status_code": 200,
"metadata": []
}
def fake_image_list():
return {
"type": "sync",
"status": "Success",
"status_code": 200,
"metadata": ['/1.0/images/trusty']
}
def fake_image_info():
return {
"type": "sync",
"status": "Success",
"status_code": 200,
"metadata": {
"aliases": [
{
"target": "ubuntu",
"description": "ubuntu"
}
],
"architecture": 2,
"fingerprint": "04aac4257341478b49c25d22cea8a6ce"
"0489dc6c42d835367945e7596368a37f",
"filename": "",
"properties": {},
"public": 0,
"size": 67043148,
"created_at": 0,
"expires_at": 0,
"uploaded_at": 1435669853
}
}
def fake_alias():
return {
"type": "sync",
"status": "Success",
"status_code": 200,
"metadata": {
"target": "ubuntu",
"description": "ubuntu"
}
}
def fake_alias_list():
return {
"type": "sync",
"status": "Success",
"status_code": 200,
"metadata": [
"/1.0/images/aliases/ubuntu"
]
}
def fake_container_list():
return {
"type": "sync",
"status": "Success",
"status_code": 200,
"metadata": [
"/1.0/containers/trusty-1"
]
}
def fake_container_state(status):
return {
"type": "sync",
"status": "Success",
"status_code": 200,
"metadata": {
"status_code": status
}
}
def fake_container_log():
return {
"type": "sync",
"status": "Success",
"status_code": 200,
"metadata": {
"log": "fake log"
}
}
def fake_container_migrate():
return {
"type": "async",
"status": "Operation created",
"status_code": 100,
"metadata": {
"id": "dbd9f22c-6da5-4066-8fca-c02f09f76738",
"class": "websocket",
"created_at": "2016-02-07T09:20:53.127321875-05:00",
"updated_at": "2016-02-07T09:20:53.127321875-05:00",
"status": "Running",
"status_code": 103,
"resources": {
"containers": [
"/1.0/containers/instance-00000010"
]
},
"metadata": {
"control": "fake_control",
"fs": "fake_fs"
},
"may_cancel": 'false',
"err": ""
},
"operation": "/1.0/operations/dbd9f22c-6da5-4066-8fca-c02f09f76738"
}
def fake_snapshots_list():
return {
"type": "sync",
"status": "Success",
"status_code": 200,
"metadata": [
"/1.0/containers/trusty-1/snapshots/first"
]
}
def fake_certificate_list():
return {
"type": "sync",
"status": "Success",
"status_code": 200,
"metadata": [
"/1.0/certificates/ABCDEF01"
]
}
def fake_certificate():
return {
"type": "sync",
"status": "Success",
"status_code": 200,
"metadata": {
"type": "client",
"certificate": "ABCDEF01"
}
}
def fake_profile_list():
return {
"type": "sync",
"status": "Success",
"status_code": 200,
"metadata": [
"/1.0/profiles/fake-profile"
]
}
def fake_profile():
return {
"type": "sync",
"status": "Success",
"status_code": 200,
"metadata": {
"name": "fake-profile",
"config": {
"resources.memory": "2GB",
"network.0.bridge": "lxcbr0"
}
}
}
def fake_operation_list():
return {
"type": "sync",
"status": "Success",
"status_code": 200,
"metadata": [
"/1.0/operations/1234"
]
}
def fake_operation():
return {
"type": "async",
"status": "OK",
"status_code": 100,
"operation": "/1.0/operation/1234",
"metadata": {
"created_at": "2015-06-09T19:07:24.379615253-06:00",
"updated_at": "2015-06-09T19:07:23.379615253-06:00",
"status": "Running",
"status_code": 103,
"resources": {
"containers": ["/1.0/containers/1"]
},
"metadata": {},
"may_cancel": True
}
}
def fake_operation_info_ok():
return {
"type": "async",
"status": "OK",
"status_code": 200,
"operation": "/1.0/operation/1234",
"metadata": {
"created_at": "2015-06-09T19:07:24.379615253-06:00",
"updated_at": "2015-06-09T19:07:23.379615253-06:00",
"status": "Completed",
"status_code": 200,
"resources": {
"containers": ["/1.0/containers/1"]
},
"metadata": {},
"may_cancel": True
}
}
def fake_operation_info_failed():
return {
"type": "async",
"status": "OK",
"status_code": 200,
"operation": "/1.0/operation/1234",
"metadata": {
"created_at": "2015-06-09T19:07:24.379615253-06:00",
"updated_at": "2015-06-09T19:07:23.379615253-06:00",
"status": "Failure",
"status_code": 400,
"resources": {
"containers": ["/1.0/containers/1"]
},
"metadata": "Invalid container name",
"may_cancel": True
}
}
def fake_network_list():
return {
"type": "sync",
"status": "Success",
"status_code": 200,
"metadata": [
"/1.0/networks/lxcbr0"
]
}
def fake_network():
return {
"type": "async",
"status": "OK",
"status_code": 100,
"operation": "/1.0/operation/1234",
"metadata": {
"name": "lxcbr0",
"type": "bridge",
"members": ["/1.0/containers/trusty-1"]
}
}
def fake_container_config():
return {
'name': "my-container",
'profiles': ["default"],
'architecture': 2,
'config': {"limits.cpus": "3"},
'expanded_config': {"limits.cpus": "3"},
'devices': {
'rootfs': {
'type': "disk",
'path': "/",
'source': "UUID=8f7fdf5e-dc60-4524-b9fe-634f82ac2fb6"
}
},
'expanded_devices': {
'rootfs': {
'type': "disk",
'path': "/",
'source': "UUID=8f7fdf5e-dc60-4524-b9fe-634f82ac2fb6"}
},
"eth0": {
"type": "nic",
"parent": "lxcbr0",
"hwaddr": "00:16:3e:f4:e7:1c",
"name": "eth0",
"nictype": "bridged",
}
}
def fake_container_info():
return {
'name': "my-container",
'profiles': ["default"],
'architecture': 2,
'config': {"limits.cpus": "3"},
'expanded_config': {"limits.cpus": "3"},
'devices': {
'rootfs': {
'type': "disk",
'path': "/",
'source': "UUID=8f7fdf5e-dc60-4524-b9fe-634f82ac2fb6"
}
},
'expanded_devices': {
'rootfs': {
'type': "disk",
'path': "/",
'source': "UUID=8f7fdf5e-dc60-4524-b9fe-634f82ac2fb6"}
},
"eth0": {
"type": "nic",
"parent": "lxcbr0",
"hwaddr": "00:16:3e:f4:e7:1c",
"name": "eth0",
"nictype": "bridged",
},
'status': {
'status': "Running",
'status_code': 103,
'ips': [{'interface': "eth0",
'protocol': "INET6",
'address': "2001:470:b368:1020:1::2",
'host_veth': "vethGMDIY9"},
{'interface': "eth0",
'protocol': "INET",
'address': "172.16.15.30",
'host_veth': "vethGMDIY9"}]},
}

View File

@ -1,114 +0,0 @@
# Copyright (c) 2015 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import mock
import uuid
from nova import context
from nova.tests.unit import fake_instance
from pylxd.deprecated import api
class MockConf(mock.Mock):
def __init__(self, lxd_args=(), lxd_kwargs={}, *args, **kwargs):
default = {
'config_drive_format': None,
'instances_path': '/fake/instances/path',
'image_cache_subdirectory_name': '/fake/image/cache',
'vif_plugging_timeout': 10,
'my_ip': '1.2.3.4',
'vlan_interface': 'vlanif',
'flat_interface': 'flatif',
}
default.update(kwargs)
super(MockConf, self).__init__(*args, **default)
lxd_default = {
'root_dir': '/fake/lxd/root',
'timeout': 20,
'retry_interval': 2,
}
lxd_default.update(lxd_kwargs)
self.lxd = mock.Mock(lxd_args, **lxd_default)
class MockInstance(mock.Mock):
def __init__(self, name='fake-uuid', uuid='fake-uuid',
image_ref='mock_image', ephemeral_gb=0, memory_mb=-1,
vcpus=0, *args, **kwargs):
super(MockInstance, self).__init__(
uuid=uuid,
image_ref=image_ref,
ephemeral_gb=ephemeral_gb,
*args, **kwargs)
self.uuid = uuid
self.name = name
self.flavor = mock.Mock(memory_mb=memory_mb, vcpus=vcpus)
def lxd_mock(*args, **kwargs):
mock_api = mock.Mock(spec=api.API)
default = {
'profile_list.return_value': ['fake_profile'],
'container_list.return_value': ['mock-instance-1', 'mock-instance-2'],
'host_ping.return_value': True,
}
default.update(kwargs)
mock_api.configure_mock(**default)
return mock_api
def annotated_data(*args):
class List(list):
pass
class Dict(dict):
pass
new_args = []
for arg in args:
if isinstance(arg, (list, tuple)):
new_arg = List(arg)
new_arg.__name__ = arg[0]
elif isinstance(arg, dict):
new_arg = Dict(arg)
new_arg.__name__ = arg['tag']
else:
raise TypeError('annotate_data can only handle dicts, '
'lists and tuples')
new_args.append(new_arg)
return lambda func: ddt.data(*new_args)(ddt.unpack(func))
def _fake_instance():
ctxt = context.get_admin_context()
_instance_values = {
'display_name': 'fake_display_name',
'name': 'fake_name',
'uuid': uuid.uuid1(),
'image_ref': 'fake_image',
'vcpus': 1,
'memory_mb': 512,
'root_gb': 10,
'host': 'fake_host',
'expected_attrs': ['system_metadata'],
}
return fake_instance.fake_instance_obj(
ctxt, **_instance_values)

View File

@ -1,87 +0,0 @@
# Copyright 2016 Canonical Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import context
from nova import test
from nova.tests.unit import fake_instance
from nova.virt.lxd import common
class InstanceAttributesTest(test.NoDBTestCase):
"""Tests for InstanceAttributes."""
def setUp(self):
super(InstanceAttributesTest, self).setUp()
self.CONF_patcher = mock.patch('nova.virt.lxd.driver.nova.conf.CONF')
self.CONF = self.CONF_patcher.start()
self.CONF.instances_path = '/i'
self.CONF.lxd.root_dir = '/c'
def tearDown(self):
super(InstanceAttributesTest, self).tearDown()
self.CONF_patcher.stop()
def test_is_snap_lxd(self):
with mock.patch('os.path.isfile') as isfile:
isfile.return_value = False
self.assertFalse(common.is_snap_lxd())
isfile.return_value = True
self.assertTrue(common.is_snap_lxd())
@mock.patch.object(common, 'is_snap_lxd')
def test_instance_dir(self, is_snap_lxd):
ctx = context.get_admin_context()
instance = fake_instance.fake_instance_obj(
ctx, name='test', memory_mb=0)
is_snap_lxd.return_value = False
attributes = common.InstanceAttributes(instance)
self.assertEqual(
'/i/instance-00000001', attributes.instance_dir)
@mock.patch.object(common, 'is_snap_lxd')
def test_console_path(self, is_snap_lxd):
ctx = context.get_admin_context()
instance = fake_instance.fake_instance_obj(
ctx, name='test', memory_mb=0)
is_snap_lxd.return_value = False
attributes = common.InstanceAttributes(instance)
self.assertEqual(
'/var/log/lxd/instance-00000001/console.log',
attributes.console_path)
is_snap_lxd.return_value = True
attributes = common.InstanceAttributes(instance)
self.assertEqual(
'/var/snap/lxd/common/lxd/logs/instance-00000001/console.log',
attributes.console_path)
@mock.patch.object(common, 'is_snap_lxd')
def test_storage_path(self, is_snap_lxd):
ctx = context.get_admin_context()
instance = fake_instance.fake_instance_obj(
ctx, name='test', memory_mb=0)
is_snap_lxd.return_value = False
attributes = common.InstanceAttributes(instance)
self.assertEqual(
'/i/instance-00000001/storage',
attributes.storage_path)

File diff suppressed because it is too large Load Diff

View File

@ -1,493 +0,0 @@
# Copyright 2016 Canonical Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import context
from nova import exception
from nova import test
from nova.network import model as network_model
from nova.tests.unit import fake_instance
from nova.virt.lxd import flavor
class ToProfileTest(test.NoDBTestCase):
"""Tests for nova.virt.lxd.flavor.to_profile."""
def setUp(self):
super(ToProfileTest, self).setUp()
self.client = mock.Mock()
self.client.host_info = {
'api_extensions': [],
'environment': {
'storage': 'zfs'
}
}
self.patchers = []
CONF_patcher = mock.patch('nova.virt.lxd.driver.nova.conf.CONF')
self.patchers.append(CONF_patcher)
self.CONF = CONF_patcher.start()
self.CONF.instances_path = '/i'
self.CONF.lxd.root_dir = ''
CONF_patcher = mock.patch('nova.virt.lxd.flavor.CONF')
self.patchers.append(CONF_patcher)
self.CONF2 = CONF_patcher.start()
self.CONF2.lxd.pool = None
self.CONF2.lxd.root_dir = ''
def tearDown(self):
super(ToProfileTest, self).tearDown()
for patcher in self.patchers:
patcher.stop()
def test_to_profile(self):
"""A profile configuration is requested of the LXD client."""
ctx = context.get_admin_context()
instance = fake_instance.fake_instance_obj(
ctx, name='test', memory_mb=0)
network_info = []
block_info = []
expected_config = {
'environment.product_name': 'OpenStack Nova',
'limits.cpu': '1',
'limits.memory': '0MB',
'raw.lxc': (
'lxc.console.logfile=/var/log/lxd/{}/console.log\n'.format(
instance.name)),
}
expected_devices = {
'root': {
'path': '/',
'size': '0GB',
'type': 'disk'
},
}
flavor.to_profile(self.client, instance, network_info, block_info)
self.client.profiles.create.assert_called_once_with(
instance.name, expected_config, expected_devices)
def test_to_profile_lvm(self):
"""A profile configuration is requested of the LXD client."""
self.client.host_info['environment']['storage'] = 'lvm'
ctx = context.get_admin_context()
instance = fake_instance.fake_instance_obj(
ctx, name='test', memory_mb=0)
network_info = []
block_info = []
expected_config = {
'environment.product_name': 'OpenStack Nova',
'limits.cpu': '1',
'limits.memory': '0MB',
'raw.lxc': (
'lxc.console.logfile=/var/log/lxd/{}/console.log\n'.format(
instance.name)),
}
expected_devices = {
'root': {
'path': '/',
'type': 'disk'
},
}
flavor.to_profile(self.client, instance, network_info, block_info)
self.client.profiles.create.assert_called_once_with(
instance.name, expected_config, expected_devices)
def test_storage_pools(self):
self.client.host_info['api_extensions'].append('storage')
self.CONF2.lxd.pool = 'test_pool'
ctx = context.get_admin_context()
instance = fake_instance.fake_instance_obj(
ctx, name='test', memory_mb=0)
network_info = []
block_info = []
expected_config = {
'environment.product_name': 'OpenStack Nova',
'limits.cpu': '1',
'limits.memory': '0MB',
'raw.lxc': (
'lxc.console.logfile=/var/log/lxd/{}/console.log\n'.format(
instance.name))
}
expected_devices = {
'root': {
'path': '/',
'type': 'disk',
'pool': 'test_pool',
},
}
flavor.to_profile(self.client, instance, network_info, block_info)
self.client.profiles.create.assert_called_once_with(
instance.name, expected_config, expected_devices)
def test_to_profile_security(self):
self.client.host_info['api_extensions'].append('id_map')
ctx = context.get_admin_context()
instance = fake_instance.fake_instance_obj(
ctx, name='test', memory_mb=0)
instance.flavor.extra_specs = {
'lxd:nested_allowed': True,
'lxd:privileged_allowed': True,
}
network_info = []
block_info = []
expected_config = {
'environment.product_name': 'OpenStack Nova',
'limits.cpu': '1',
'limits.memory': '0MB',
'raw.lxc': (
'lxc.console.logfile=/var/log/lxd/{}/console.log\n'.format(
instance.name)),
'security.nesting': 'True',
'security.privileged': 'True',
}
expected_devices = {
'root': {
'path': '/',
'size': '0GB',
'type': 'disk'
},
}
flavor.to_profile(self.client, instance, network_info, block_info)
self.client.profiles.create.assert_called_once_with(
instance.name, expected_config, expected_devices)
def test_to_profile_idmap(self):
self.client.host_info['api_extensions'].append('id_map')
ctx = context.get_admin_context()
instance = fake_instance.fake_instance_obj(
ctx, name='test', memory_mb=0)
instance.flavor.extra_specs = {
'lxd:isolated': True,
}
network_info = []
block_info = []
expected_config = {
'environment.product_name': 'OpenStack Nova',
'security.idmap.isolated': 'True',
'limits.cpu': '1',
'limits.memory': '0MB',
'raw.lxc': (
'lxc.console.logfile=/var/log/lxd/{}/console.log\n'.format(
instance.name)),
}
expected_devices = {
'root': {
'path': '/',
'size': '0GB',
'type': 'disk'
},
}
flavor.to_profile(self.client, instance, network_info, block_info)
self.client.profiles.create.assert_called_once_with(
instance.name, expected_config, expected_devices)
def test_to_profile_idmap_unsupported(self):
ctx = context.get_admin_context()
instance = fake_instance.fake_instance_obj(
ctx, name='test', memory_mb=0)
instance.flavor.extra_specs = {
'lxd:isolated': True,
}
network_info = []
block_info = []
self.assertRaises(
exception.NovaException,
flavor.to_profile, self.client, instance, network_info, block_info)
def test_to_profile_quota_extra_specs_bytes(self):
"""A profile configuration is requested of the LXD client."""
ctx = context.get_admin_context()
instance = fake_instance.fake_instance_obj(
ctx, name='test', memory_mb=0)
instance.flavor.extra_specs = {
'quota:disk_read_bytes_sec': '3000000',
'quota:disk_write_bytes_sec': '4000000',
}
network_info = []
block_info = []
expected_config = {
'environment.product_name': 'OpenStack Nova',
'limits.cpu': '1',
'limits.memory': '0MB',
'raw.lxc': (
'lxc.console.logfile=/var/log/lxd/{}/console.log\n'.format(
instance.name)),
}
expected_devices = {
'root': {
'limits.read': '2MB',
'limits.write': '3MB',
'path': '/',
'size': '0GB',
'type': 'disk'
},
}
flavor.to_profile(self.client, instance, network_info, block_info)
self.client.profiles.create.assert_called_once_with(
instance.name, expected_config, expected_devices)
def test_to_profile_quota_extra_specs_iops(self):
"""A profile configuration is requested of the LXD client."""
ctx = context.get_admin_context()
instance = fake_instance.fake_instance_obj(
ctx, name='test', memory_mb=0)
instance.flavor.extra_specs = {
'quota:disk_read_iops_sec': '300',
'quota:disk_write_iops_sec': '400',
}
network_info = []
block_info = []
expected_config = {
'environment.product_name': 'OpenStack Nova',
'limits.cpu': '1',
'limits.memory': '0MB',
'raw.lxc': (
'lxc.console.logfile=/var/log/lxd/{}/console.log\n'.format(
instance.name)),
}
expected_devices = {
'root': {
'limits.read': '300iops',
'limits.write': '400iops',
'path': '/',
'size': '0GB',
'type': 'disk'
},
}
flavor.to_profile(self.client, instance, network_info, block_info)
self.client.profiles.create.assert_called_once_with(
instance.name, expected_config, expected_devices)
def test_to_profile_quota_extra_specs_max_bytes(self):
"""A profile configuration is requested of the LXD client."""
ctx = context.get_admin_context()
instance = fake_instance.fake_instance_obj(
ctx, name='test', memory_mb=0)
instance.flavor.extra_specs = {
'quota:disk_total_bytes_sec': '6000000',
}
network_info = []
block_info = []
expected_config = {
'environment.product_name': 'OpenStack Nova',
'limits.cpu': '1',
'limits.memory': '0MB',
'raw.lxc': (
'lxc.console.logfile=/var/log/lxd/{}/console.log\n'.format(
instance.name)),
}
expected_devices = {
'root': {
'limits.max': '5MB',
'path': '/',
'size': '0GB',
'type': 'disk'
},
}
flavor.to_profile(self.client, instance, network_info, block_info)
self.client.profiles.create.assert_called_once_with(
instance.name, expected_config, expected_devices)
def test_to_profile_quota_extra_specs_max_iops(self):
"""A profile configuration is requested of the LXD client."""
ctx = context.get_admin_context()
instance = fake_instance.fake_instance_obj(
ctx, name='test', memory_mb=0)
instance.flavor.extra_specs = {
'quota:disk_total_iops_sec': '500',
}
network_info = []
block_info = []
expected_config = {
'environment.product_name': 'OpenStack Nova',
'limits.cpu': '1',
'limits.memory': '0MB',
'raw.lxc': (
'lxc.console.logfile=/var/log/lxd/{}/console.log\n'.format(
instance.name)),
}
expected_devices = {
'root': {
'limits.max': '500iops',
'path': '/',
'size': '0GB',
'type': 'disk'
},
}
flavor.to_profile(self.client, instance, network_info, block_info)
self.client.profiles.create.assert_called_once_with(
instance.name, expected_config, expected_devices)
@mock.patch('nova.virt.lxd.vif._is_no_op_firewall', return_value=False)
def test_to_profile_network_config_average(self, _is_no_op_firewall):
ctx = context.get_admin_context()
instance = fake_instance.fake_instance_obj(
ctx, name='test', memory_mb=0)
instance.flavor.extra_specs = {
'quota:vif_inbound_average': '1000000',
'quota:vif_outbound_average': '2000000',
}
network_info = [{
'id': '0123456789abcdef',
'type': network_model.VIF_TYPE_OVS,
'address': '00:11:22:33:44:55',
'network': {
'bridge': 'fakebr'},
'devname': 'tap0123456789a'}]
block_info = []
expected_config = {
'environment.product_name': 'OpenStack Nova',
'limits.cpu': '1',
'limits.memory': '0MB',
'raw.lxc': (
'lxc.console.logfile=/var/log/lxd/{}/console.log\n'.format(
instance.name)),
}
expected_devices = {
'tap0123456789a': {
'hwaddr': '00:11:22:33:44:55',
'nictype': 'physical',
'parent': 'tin0123456789a',
'type': 'nic',
'limits.egress': '16000Mbit',
'limits.ingress': '8000Mbit',
},
'root': {
'path': '/',
'size': '0GB',
'type': 'disk'
},
}
flavor.to_profile(self.client, instance, network_info, block_info)
self.client.profiles.create.assert_called_once_with(
instance.name, expected_config, expected_devices)
@mock.patch('nova.virt.lxd.vif._is_no_op_firewall', return_value=False)
def test_to_profile_network_config_peak(self, _is_no_op_firewall):
ctx = context.get_admin_context()
instance = fake_instance.fake_instance_obj(
ctx, name='test', memory_mb=0)
instance.flavor.extra_specs = {
'quota:vif_inbound_peak': '3000000',
'quota:vif_outbound_peak': '4000000',
}
network_info = [{
'id': '0123456789abcdef',
'type': network_model.VIF_TYPE_OVS,
'address': '00:11:22:33:44:55',
'network': {
'bridge': 'fakebr'},
'devname': 'tap0123456789a'}]
block_info = []
expected_config = {
'environment.product_name': 'OpenStack Nova',
'limits.cpu': '1',
'limits.memory': '0MB',
'raw.lxc': (
'lxc.console.logfile=/var/log/lxd/{}/console.log\n'.format(
instance.name)),
}
expected_devices = {
'tap0123456789a': {
'hwaddr': '00:11:22:33:44:55',
'nictype': 'physical',
'parent': 'tin0123456789a',
'type': 'nic',
'limits.egress': '32000Mbit',
'limits.ingress': '24000Mbit',
},
'root': {
'path': '/',
'size': '0GB',
'type': 'disk'
},
}
flavor.to_profile(self.client, instance, network_info, block_info)
self.client.profiles.create.assert_called_once_with(
instance.name, expected_config, expected_devices)
@mock.patch('nova.virt.lxd.flavor.driver.block_device_info_get_ephemerals')
def test_to_profile_ephemeral_storage(self, get_ephemerals):
"""A profile configuration is requested of the LXD client."""
get_ephemerals.return_value = [
{'virtual_name': 'ephemeral1'},
]
ctx = context.get_admin_context()
instance = fake_instance.fake_instance_obj(
ctx, name='test', memory_mb=0)
network_info = []
block_info = []
expected_config = {
'environment.product_name': 'OpenStack Nova',
'limits.cpu': '1',
'limits.memory': '0MB',
'raw.lxc': (
'lxc.console.logfile=/var/log/lxd/{}/console.log\n'.format(
instance.name)),
}
expected_devices = {
'root': {
'path': '/',
'size': '0GB',
'type': 'disk'
},
'ephemeral1': {
'type': 'disk',
'path': '/mnt',
'source': '/i/{}/storage/ephemeral1'.format(instance.name),
},
}
flavor.to_profile(self.client, instance, network_info, block_info)
self.client.profiles.create.assert_called_once_with(
instance.name, expected_config, expected_devices)

View File

@ -1,92 +0,0 @@
# Copyright 2015 Canonical Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import nova.conf
from nova import exception
from nova import test
from pylxd.deprecated import exceptions as lxd_exceptions
from nova.virt.lxd import driver
CONF = nova.conf.CONF
class LXDTestLiveMigrate(test.NoDBTestCase):
def setUp(self):
super(LXDTestLiveMigrate, self).setUp()
self.driver = driver.LXDDriver(None)
self.context = 'fake_context'
self.driver.session = mock.MagicMock()
self.driver.config = mock.MagicMock()
self.driver.operations = mock.MagicMock()
@mock.patch.object(driver.LXDDriver, '_migrate')
def test_live_migration(self, mock_migrate):
"""Verify that the correct live migration calls
are made.
"""
self.flags(my_ip='fakeip')
mock_post_method = mock.MagicMock()
self.driver.live_migration(
mock.sentinel.context, mock.sentinel.instance,
mock.sentinel.dest, mock_post_method,
mock.sentinel.recover_method, mock.sentinel.block_migration,
mock.sentinel.migrate_data)
mock_migrate.assert_called_once_with(mock.sentinel.dest,
mock.sentinel.instance)
mock_post_method.assert_called_once_with(
mock.sentinel.context, mock.sentinel.instance, mock.sentinel.dest,
mock.sentinel.block_migration)
@mock.patch.object(driver.LXDDriver, '_migrate')
def test_live_migration_failed(self, mock_migrate):
"""Verify that an exception is raised when live-migration
fails.
"""
self.flags(my_ip='fakeip')
mock_migrate.side_effect = \
lxd_exceptions.APIError(500, 'Fake')
self.assertRaises(
lxd_exceptions.APIError,
self.driver.live_migration, mock.sentinel.context,
mock.sentinel.instance, mock.sentinel.dest,
mock.sentinel.recover_method, mock.sentinel.block_migration,
mock.sentinel.migrate_data)
def test_live_migration_not_allowed(self):
"""Verify an exception is raised when live migration is not allowed."""
self.flags(allow_live_migration=False,
group='lxd')
self.assertRaises(exception.MigrationPreCheckError,
self.driver.check_can_live_migrate_source,
mock.sentinel.context, mock.sentinel.instance,
mock.sentinel.dest_check_data,
mock.sentinel.block_device_info)
def test_live_migration_allowed(self):
"""Verify live-migration is allowed when the allow_lvie_migrate
flag is True.
"""
self.flags(allow_live_migration=True,
group='lxd')
self.assertEqual(mock.sentinel.dest_check_data,
self.driver.check_can_live_migrate_source(
mock.sentinel.context, mock.sentinel.instance,
mock.sentinel.dest_check_data,
mock.sentinel.block_device_info))

View File

@ -1,109 +0,0 @@
# Copyright 2015 Canonical Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Unit tests for ContinerMixin class
The following tests the ContainerMixin class
for nova-lxd.
"""
import ddt
import mock
from nova import exception
from nova import test
from pylxd.deprecated import exceptions as lxd_exceptions
from nova.virt.lxd import session
import fake_api
import stubs
@ddt.ddt
class SessionContainerTest(test.NoDBTestCase):
def setUp(self):
super(SessionContainerTest, self).setUp()
"""This is so we can mock out pylxd API calls."""
self.ml = stubs.lxd_mock()
lxd_patcher = mock.patch('pylxd.deprecated.api.API',
mock.Mock(return_value=self.ml))
lxd_patcher.start()
self.addCleanup(lxd_patcher.stop)
self.session = session.LXDAPISession()
@stubs.annotated_data(
('1', (200, fake_api.fake_operation_info_ok()))
)
def test_container_init(self, tag, side_effect):
"""
conatainer_init creates a container based on given config
for a container. Check to see if we are returning the right
pylxd calls for the LXD API.
"""
config = mock.Mock()
instance = stubs._fake_instance()
self.ml.container_init.return_value = side_effect
self.ml.operation_info.return_value = \
(200, fake_api.fake_container_state(200))
self.assertIsNone(self.session.container_init(config, instance))
calls = [mock.call.container_init(config),
mock.call.wait_container_operation(
'/1.0/operation/1234', 200, -1),
mock.call.operation_info('/1.0/operation/1234')]
self.assertEqual(calls, self.ml.method_calls)
@stubs.annotated_data(
('api_fail', lxd_exceptions.APIError(500, 'Fake'),
exception.NovaException),
)
def test_container_init_fail(self, tag, side_effect, expected):
"""
continer_init create as container on a given LXD host. Make
sure that we reaise an exception.NovaException if there is
an APIError from the LXD API.
"""
config = mock.Mock()
instance = stubs._fake_instance()
self.ml.container_init.side_effect = side_effect
self.assertRaises(expected,
self.session.container_init, config,
instance)
@ddt.ddt
class SessionEventTest(test.NoDBTestCase):
def setUp(self):
super(SessionEventTest, self).setUp()
self.ml = stubs.lxd_mock()
lxd_patcher = mock.patch('pylxd.deprecated.api.API',
mock.Mock(return_value=self.ml))
lxd_patcher.start()
self.addCleanup(lxd_patcher.stop)
self.session = session.LXDAPISession()
def test_container_wait(self):
instance = stubs._fake_instance()
operation_id = mock.Mock()
self.ml.wait_container_operation.return_value = True
self.assertIsNone(self.session.operation_wait(operation_id, instance))
self.ml.wait_container_operation.assert_called_with(operation_id,
200, -1)

View File

@ -1,259 +0,0 @@
# Copyright 2016 Canonical Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import context
from nova import test
from nova.tests.unit import fake_instance
from nova.virt.lxd import storage
class TestAttachEphemeral(test.NoDBTestCase):
"""Tests for nova.virt.lxd.storage.attach_ephemeral."""
def setUp(self):
super(TestAttachEphemeral, self).setUp()
self.patchers = []
CONF_patcher = mock.patch('nova.virt.lxd.common.conf.CONF')
self.patchers.append(CONF_patcher)
self.CONF = CONF_patcher.start()
self.CONF.instances_path = '/i'
self.CONF.lxd.root_dir = '/var/lib/lxd'
def tearDown(self):
super(TestAttachEphemeral, self).tearDown()
for patcher in self.patchers:
patcher.stop()
@mock.patch.object(storage.utils, 'execute')
@mock.patch(
'nova.virt.lxd.storage.driver.block_device_info_get_ephemerals')
def test_add_ephemerals_with_zfs(
self, block_device_info_get_ephemerals, execute):
ctx = context.get_admin_context()
block_device_info_get_ephemerals.return_value = [
{'virtual_name': 'ephemerals0'}]
instance = fake_instance.fake_instance_obj(
ctx, name='test', memory_mb=0)
block_device_info = mock.Mock()
lxd_config = {'environment': {'storage': 'zfs'},
'config': {'storage.zfs_pool_name': 'zfs'}}
container = mock.Mock()
container.config = {
'volatile.last_state.idmap': '[{"Isuid":true,"Isgid":false,'
'"Hostid":165536,"Nsid":0,'
'"Maprange":65536}]'
}
client = mock.Mock()
client.containers.get.return_value = container
storage.attach_ephemeral(
client, block_device_info, lxd_config, instance)
block_device_info_get_ephemerals.assert_called_once_with(
block_device_info)
expected_calls = [
mock.call(
'zfs', 'create', '-o',
'mountpoint=/i/instance-00000001/storage/ephemerals0', '-o',
'quota=0G', 'zfs/instance-00000001-ephemeral',
run_as_root=True),
mock.call(
'chown', '165536', '/i/instance-00000001/storage/ephemerals0',
run_as_root=True)
]
self.assertEqual(expected_calls, execute.call_args_list)
@mock.patch.object(storage.utils, 'execute')
@mock.patch(
'nova.virt.lxd.storage.driver.block_device_info_get_ephemerals')
def test_add_ephemerals_with_btrfs(
self, block_device_info_get_ephemerals, execute):
ctx = context.get_admin_context()
block_device_info_get_ephemerals.return_value = [
{'virtual_name': 'ephemerals0'}]
instance = fake_instance.fake_instance_obj(
ctx, name='test', memory_mb=0)
instance.ephemeral_gb = 1
block_device_info = mock.Mock()
lxd_config = {'environment': {'storage': 'btrfs'}}
profile = mock.Mock()
profile.devices = {
'root': {
'path': '/',
'type': 'disk',
'size': '1G'
},
'ephemerals0': {
'optional': 'True',
'path': '/mnt',
'source': '/path/fake_path',
'type': 'disk'
}
}
client = mock.Mock()
client.profiles.get.return_value = profile
container = mock.Mock()
container.config = {
'volatile.last_state.idmap': '[{"Isuid":true,"Isgid":false,'
'"Hostid":165536,"Nsid":0,'
'"Maprange":65536}]'
}
client.containers.get.return_value = container
storage.attach_ephemeral(
client, block_device_info, lxd_config, instance)
block_device_info_get_ephemerals.assert_called_once_with(
block_device_info)
profile.save.assert_called_once_with()
expected_calls = [
mock.call(
'btrfs', 'subvolume', 'create',
'/var/lib/lxd/containers/instance-00000001/ephemerals0',
run_as_root=True),
mock.call(
'btrfs', 'qgroup', 'limit', '1g',
'/var/lib/lxd/containers/instance-00000001/ephemerals0',
run_as_root=True),
mock.call(
'chown', '165536',
'/var/lib/lxd/containers/instance-00000001/ephemerals0',
run_as_root=True)
]
self.assertEqual(expected_calls, execute.call_args_list)
self.assertEqual(
profile.devices['ephemerals0']['source'],
'/var/lib/lxd/containers/instance-00000001/ephemerals0')
@mock.patch.object(storage.utils, 'execute')
@mock.patch(
'nova.virt.lxd.storage.driver.block_device_info_get_ephemerals')
def test_ephemeral_with_lvm(
self, block_device_info_get_ephemerals, execute):
ctx = context.get_admin_context()
block_device_info_get_ephemerals.return_value = [
{'virtual_name': 'ephemerals0'}]
instance = fake_instance.fake_instance_obj(
ctx, name='test', memory_mb=0)
block_device_info = mock.Mock()
lxd_config = {'environment': {'storage': 'lvm'},
'config': {'storage.lvm_vg_name': 'lxd'}}
storage.fileutils = mock.Mock()
container = mock.Mock()
container.config = {
'volatile.last_state.idmap': '[{"Isuid":true,"Isgid":false,'
'"Hostid":165536,"Nsid":0,'
'"Maprange":65536}]'
}
client = mock.Mock()
client.containers.get.return_value = container
storage.attach_ephemeral(
client, block_device_info, lxd_config, instance)
block_device_info_get_ephemerals.assert_called_once_with(
block_device_info)
expected_calls = [
mock.call(
'lvcreate', '-L', '0G', '-n', 'instance-00000001-ephemerals0',
'lxd', attempts=3, run_as_root=True),
mock.call(
'mkfs', '-t', 'ext4', '/dev/lxd/instance-00000001-ephemerals0',
run_as_root=True),
mock.call(
'mount', '-t', 'ext4',
'/dev/lxd/instance-00000001-ephemerals0',
'/i/instance-00000001/storage/ephemerals0',
run_as_root=True),
mock.call(
'chown', '165536', '/i/instance-00000001/storage/ephemerals0',
run_as_root=True)]
self.assertEqual(expected_calls, execute.call_args_list)
class TestDetachEphemeral(test.NoDBTestCase):
"""Tests for nova.virt.lxd.storage.detach_ephemeral."""
@mock.patch.object(storage.utils, 'execute')
@mock.patch(
'nova.virt.lxd.storage.driver.block_device_info_get_ephemerals')
def test_remove_ephemeral_with_zfs(
self, block_device_info_get_ephemerals, execute):
block_device_info_get_ephemerals.return_value = [
{'virtual_name': 'ephemerals0'}]
ctx = context.get_admin_context()
instance = fake_instance.fake_instance_obj(
ctx, name='test', memory_mb=0)
block_device_info = mock.Mock()
lxd_config = {'environment': {'storage': 'zfs'},
'config': {'storage.zfs_pool_name': 'zfs'}}
client = mock.Mock()
storage.detach_ephemeral(
client, block_device_info, lxd_config, instance)
block_device_info_get_ephemerals.assert_called_once_with(
block_device_info)
expected_calls = [
mock.call('zfs', 'destroy', 'zfs/instance-00000001-ephemeral',
run_as_root=True)
]
self.assertEqual(expected_calls, execute.call_args_list)
@mock.patch.object(storage.utils, 'execute')
@mock.patch(
'nova.virt.lxd.storage.driver.block_device_info_get_ephemerals')
def test_remove_ephemeral_with_lvm(
self, block_device_info_get_ephemerals, execute):
block_device_info_get_ephemerals.return_value = [
{'virtual_name': 'ephemerals0'}]
ctx = context.get_admin_context()
instance = fake_instance.fake_instance_obj(
ctx, name='test', memory_mb=0)
block_device_info = mock.Mock()
lxd_config = {'environment': {'storage': 'lvm'},
'config': {'storage.lvm_vg_name': 'lxd'}}
client = mock.Mock()
storage.detach_ephemeral(
client, block_device_info, lxd_config, instance)
block_device_info_get_ephemerals.assert_called_once_with(
block_device_info)
expected_calls = [
mock.call(
'umount', '/dev/lxd/instance-00000001-ephemerals0',
run_as_root=True),
mock.call('lvremove', '-f',
'/dev/lxd/instance-00000001-ephemerals0',
run_as_root=True)
]
self.assertEqual(expected_calls, execute.call_args_list)

View File

@ -1,331 +0,0 @@
# Copyright 2016 Canonical Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import context
from nova import exception
from nova.network import model as network_model
from nova import test
from nova.tests.unit import fake_instance
from nova.virt.lxd import vif
GATEWAY = network_model.IP(address='101.168.1.1', type='gateway')
DNS_BRIDGE = network_model.IP(address='8.8.8.8', type=None)
SUBNET = network_model.Subnet(
cidr='101.168.1.0/24', dns=[DNS_BRIDGE], gateway=GATEWAY,
routes=None, dhcp_server='191.168.1.1')
NETWORK = network_model.Network(
id='ab7b876b-2c1c-4bb2-afa1-f9f4b6a28053', bridge='br0', label=None,
subnets=[SUBNET], bridge_interface=None, vlan=99, mtu=1000)
OVS_VIF = network_model.VIF(
id='da5cc4bf-f16c-4807-a0b6-911c7c67c3f8', address='ca:fe:de:ad:be:ef',
network=NETWORK, type=network_model.VIF_TYPE_OVS,
devname='tapda5cc4bf-f1',
ovs_interfaceid='7b6812a6-b044-4596-b3c5-43a8ec431638',
details={network_model.VIF_DETAILS_OVS_HYBRID_PLUG: False})
OVS_HYBRID_VIF = network_model.VIF(
id='da5cc4bf-f16c-4807-a0b6-911c7c67c3f8', address='ca:fe:de:ad:be:ef',
network=NETWORK, type=network_model.VIF_TYPE_OVS,
devname='tapda5cc4bf-f1',
ovs_interfaceid='7b6812a6-b044-4596-b3c5-43a8ec431638',
details={network_model.VIF_DETAILS_OVS_HYBRID_PLUG: True})
TAP_VIF = network_model.VIF(
id='da5cc4bf-f16c-4807-a0b6-911c7c67c3f8', address='ca:fe:de:ad:be:ee',
network=NETWORK, type=network_model.VIF_TYPE_TAP,
devname='tapda5cc4bf-f1',
details={'mac_address': 'aa:bb:cc:dd:ee:ff'})
LB_VIF = network_model.VIF(
id='da5cc4bf-f16c-4807-a0b6-911c7c67c3f8', address='ca:fe:de:ad:be:ed',
network=NETWORK, type=network_model.VIF_TYPE_BRIDGE,
devname='tapda5cc4bf-f1')
INSTANCE = fake_instance.fake_instance_obj(
context.get_admin_context(), name='test')
class GetVifDevnameTest(test.NoDBTestCase):
"""Tests for get_vif_devname."""
def test_get_vif_devname_devname_exists(self):
an_vif = {
'id': 'da5cc4bf-f16c-4807-a0b6-911c7c67c3f8',
'devname': 'oth1',
}
devname = vif.get_vif_devname(an_vif)
self.assertEqual('oth1', devname)
def test_get_vif_devname_devname_nonexistent(self):
an_vif = {
'id': 'da5cc4bf-f16c-4807-a0b6-911c7c67c3f8',
}
devname = vif.get_vif_devname(an_vif)
self.assertEqual('nicda5cc4bf-f1', devname)
class GetConfigTest(test.NoDBTestCase):
"""Tests for get_config."""
def setUp(self):
super(GetConfigTest, self).setUp()
self.CONF_patcher = mock.patch('nova.virt.lxd.vif.CONF')
self.CONF = self.CONF_patcher.start()
self.CONF.firewall_driver = 'nova.virt.firewall.NoopFirewallDriver'
def tearDown(self):
super(GetConfigTest, self).tearDown()
self.CONF_patcher.stop()
def test_get_config_bad_vif_type(self):
"""Unsupported vif types raise an exception."""
an_vif = network_model.VIF(
id='da5cc4bf-f16c-4807-a0b6-911c7c67c3f8',
address='ca:fe:de:ad:be:ef',
network=NETWORK, type='invalid',
devname='tapda5cc4bf-f1',
ovs_interfaceid='7b6812a6-b044-4596-b3c5-43a8ec431638')
self.assertRaises(
exception.NovaException, vif.get_config, an_vif)
def test_get_config_bridge(self):
expected = {'bridge': 'br0', 'mac_address': 'ca:fe:de:ad:be:ef'}
an_vif = network_model.VIF(
id='da5cc4bf-f16c-4807-a0b6-911c7c67c3f8',
address='ca:fe:de:ad:be:ef',
network=NETWORK, type='bridge',
devname='tapda5cc4bf-f1',
ovs_interfaceid='7b6812a6-b044-4596-b3c5-43a8ec431638')
config = vif.get_config(an_vif)
self.assertEqual(expected, config)
def test_get_config_ovs_bridge(self):
expected = {
'bridge': 'br0', 'mac_address': 'ca:fe:de:ad:be:ef'}
an_vif = network_model.VIF(
id='da5cc4bf-f16c-4807-a0b6-911c7c67c3f8',
address='ca:fe:de:ad:be:ef',
network=NETWORK, type='ovs',
devname='tapda5cc4bf-f1',
ovs_interfaceid='7b6812a6-b044-4596-b3c5-43a8ec431638')
config = vif.get_config(an_vif)
self.assertEqual(expected, config)
def test_get_config_ovs_hybrid(self):
self.CONF.firewall_driver = 'AnFirewallDriver'
expected = {
'bridge': 'qbrda5cc4bf-f1', 'mac_address': 'ca:fe:de:ad:be:ef'}
an_vif = network_model.VIF(
id='da5cc4bf-f16c-4807-a0b6-911c7c67c3f8',
address='ca:fe:de:ad:be:ef',
network=NETWORK, type='ovs',
devname='tapda5cc4bf-f1',
ovs_interfaceid='7b6812a6-b044-4596-b3c5-43a8ec431638')
config = vif.get_config(an_vif)
self.assertEqual(expected, config)
def test_get_config_tap(self):
expected = {'mac_address': 'ca:fe:de:ad:be:ef'}
an_vif = network_model.VIF(
id='da5cc4bf-f16c-4807-a0b6-911c7c67c3f8',
address='ca:fe:de:ad:be:ef',
network=NETWORK, type='tap',
devname='tapda5cc4bf-f1',
ovs_interfaceid='7b6812a6-b044-4596-b3c5-43a8ec431638')
config = vif.get_config(an_vif)
self.assertEqual(expected, config)
class LXDGenericVifDriverTest(test.NoDBTestCase):
"""Tests for LXDGenericVifDriver."""
def setUp(self):
super(LXDGenericVifDriverTest, self).setUp()
self.vif_driver = vif.LXDGenericVifDriver()
@mock.patch.object(vif, '_post_plug_wiring')
@mock.patch('nova.virt.lxd.vif.os_vif')
def test_plug_ovs(self, os_vif, _post_plug_wiring):
self.vif_driver.plug(INSTANCE, OVS_VIF)
self.assertEqual(
'tapda5cc4bf-f1', os_vif.plug.call_args[0][0].vif_name)
self.assertEqual(
'instance-00000001', os_vif.plug.call_args[0][1].name)
_post_plug_wiring.assert_called_with(INSTANCE, OVS_VIF)
@mock.patch.object(vif, '_post_unplug_wiring')
@mock.patch('nova.virt.lxd.vif.os_vif')
def test_unplug_ovs(self, os_vif, _post_unplug_wiring):
self.vif_driver.unplug(INSTANCE, OVS_VIF)
self.assertEqual(
'tapda5cc4bf-f1', os_vif.unplug.call_args[0][0].vif_name)
self.assertEqual(
'instance-00000001', os_vif.unplug.call_args[0][1].name)
_post_unplug_wiring.assert_called_with(INSTANCE, OVS_VIF)
@mock.patch.object(vif, '_post_plug_wiring')
@mock.patch.object(vif, '_create_veth_pair')
@mock.patch('nova.virt.lxd.vif.os_vif')
def test_plug_tap(self, os_vif, _create_veth_pair, _post_plug_wiring):
self.vif_driver.plug(INSTANCE, TAP_VIF)
os_vif.plug.assert_not_called()
_create_veth_pair.assert_called_with('tapda5cc4bf-f1',
'tinda5cc4bf-f1',
1000)
_post_plug_wiring.assert_called_with(INSTANCE, TAP_VIF)
@mock.patch.object(vif, '_post_unplug_wiring')
@mock.patch('nova.virt.lxd.vif.linux_net')
@mock.patch('nova.virt.lxd.vif.os_vif')
def test_unplug_tap(self, os_vif, linux_net, _post_unplug_wiring):
self.vif_driver.unplug(INSTANCE, TAP_VIF)
os_vif.plug.assert_not_called()
linux_net.delete_net_dev.assert_called_with('tapda5cc4bf-f1')
_post_unplug_wiring.assert_called_with(INSTANCE, TAP_VIF)
class PostPlugTest(test.NoDBTestCase):
"""Tests for post plug operations"""
def setUp(self):
super(PostPlugTest, self).setUp()
@mock.patch('nova.virt.lxd.vif._create_veth_pair')
@mock.patch('nova.virt.lxd.vif._add_bridge_port')
@mock.patch('nova.virt.lxd.vif.linux_net')
def test_post_plug_ovs_hybrid(self,
linux_net,
add_bridge_port,
create_veth_pair):
linux_net.device_exists.return_value = False
vif._post_plug_wiring(INSTANCE, OVS_HYBRID_VIF)
linux_net.device_exists.assert_called_with('tapda5cc4bf-f1')
create_veth_pair.assert_called_with('tapda5cc4bf-f1',
'tinda5cc4bf-f1',
1000)
add_bridge_port.assert_called_with('qbrda5cc4bf-f1',
'tapda5cc4bf-f1')
@mock.patch('nova.virt.lxd.vif._create_veth_pair')
@mock.patch('nova.virt.lxd.vif._add_bridge_port')
@mock.patch.object(vif, '_create_ovs_vif_port')
@mock.patch('nova.virt.lxd.vif.linux_net')
def test_post_plug_ovs(self,
linux_net,
create_ovs_vif_port,
add_bridge_port,
create_veth_pair):
linux_net.device_exists.return_value = False
vif._post_plug_wiring(INSTANCE, OVS_VIF)
linux_net.device_exists.assert_called_with('tapda5cc4bf-f1')
create_veth_pair.assert_called_with('tapda5cc4bf-f1',
'tinda5cc4bf-f1',
1000)
add_bridge_port.assert_not_called()
create_ovs_vif_port.assert_called_with(
'br0',
'tapda5cc4bf-f1',
'da5cc4bf-f16c-4807-a0b6-911c7c67c3f8',
'ca:fe:de:ad:be:ef',
INSTANCE.uuid,
1000
)
@mock.patch('nova.virt.lxd.vif._create_veth_pair')
@mock.patch('nova.virt.lxd.vif._add_bridge_port')
@mock.patch('nova.virt.lxd.vif.linux_net')
def test_post_plug_bridge(self,
linux_net,
add_bridge_port,
create_veth_pair):
linux_net.device_exists.return_value = False
vif._post_plug_wiring(INSTANCE, LB_VIF)
linux_net.device_exists.assert_called_with('tapda5cc4bf-f1')
create_veth_pair.assert_called_with('tapda5cc4bf-f1',
'tinda5cc4bf-f1',
1000)
add_bridge_port.assert_called_with('br0',
'tapda5cc4bf-f1')
@mock.patch('nova.virt.lxd.vif._create_veth_pair')
@mock.patch('nova.virt.lxd.vif._add_bridge_port')
@mock.patch('nova.virt.lxd.vif.linux_net')
def test_post_plug_tap(self,
linux_net,
add_bridge_port,
create_veth_pair):
linux_net.device_exists.return_value = False
vif._post_plug_wiring(INSTANCE, TAP_VIF)
linux_net.device_exists.assert_not_called()
class PostUnplugTest(test.NoDBTestCase):
"""Tests for post unplug operations"""
@mock.patch('nova.virt.lxd.vif.linux_net')
def test_post_unplug_ovs_hybrid(self, linux_net):
vif._post_unplug_wiring(INSTANCE, OVS_HYBRID_VIF)
linux_net.delete_net_dev.assert_called_with('tapda5cc4bf-f1')
@mock.patch.object(vif, '_delete_ovs_vif_port')
def test_post_unplug_ovs(self, delete_ovs_vif_port):
vif._post_unplug_wiring(INSTANCE, OVS_VIF)
delete_ovs_vif_port.assert_called_with('br0',
'tapda5cc4bf-f1',
True)
@mock.patch('nova.virt.lxd.vif.linux_net')
def test_post_unplug_bridge(self, linux_net):
vif._post_unplug_wiring(INSTANCE, LB_VIF)
linux_net.delete_net_dev.assert_called_with('tapda5cc4bf-f1')
class MiscHelpersTest(test.NoDBTestCase):
"""Misc tests for vif module"""
def test_is_ovs_vif_port(self):
self.assertTrue(vif._is_ovs_vif_port(OVS_VIF))
self.assertFalse(vif._is_ovs_vif_port(OVS_HYBRID_VIF))
self.assertFalse(vif._is_ovs_vif_port(TAP_VIF))
@mock.patch.object(vif, 'utils')
def test_add_bridge_port(self, utils):
vif._add_bridge_port('br-int', 'tapXYZ')
utils.execute.assert_called_with('brctl', 'addif',
'br-int', 'tapXYZ',
run_as_root=True)

View File

@ -1 +0,0 @@
__import__('pkg_resources').declare_namespace(__name__)

View File

@ -1,3 +0,0 @@
from nova.virt.lxd import driver
LXDDriver = driver.LXDDriver

View File

@ -1,48 +0,0 @@
# Copyright 2015 Canonical Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import os
from nova import conf
_InstanceAttributes = collections.namedtuple('InstanceAttributes', [
'instance_dir', 'console_path', 'storage_path', 'container_path'])
def InstanceAttributes(instance):
"""An instance adapter for nova-lxd specific attributes."""
if is_snap_lxd():
prefix = '/var/snap/lxd/common/lxd/logs'
else:
prefix = '/var/log/lxd'
instance_dir = os.path.join(conf.CONF.instances_path, instance.name)
console_path = os.path.join(prefix, instance.name, 'console.log')
storage_path = os.path.join(instance_dir, 'storage')
container_path = os.path.join(
conf.CONF.lxd.root_dir, 'containers', instance.name)
return _InstanceAttributes(
instance_dir, console_path, storage_path, container_path)
def is_snap_lxd():
"""Determine whether it's a snap installed lxd or a package installed lxd
This is easily done by checking if the bin file is /snap/bin/lxc
:returns: True if snap installed, otherwise False
:rtype: bool
"""
return os.path.isfile('/snap/bin/lxc')

File diff suppressed because it is too large Load Diff

View File

@ -1,234 +0,0 @@
# Copyright 2016 Canonical Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from nova import exception
from nova import i18n
from nova.virt import driver
from oslo_config import cfg
from oslo_utils import units
from nova.virt.lxd import common
from nova.virt.lxd import vif
_ = i18n._
CONF = cfg.CONF
def _base_config(instance, _):
instance_attributes = common.InstanceAttributes(instance)
return {
'environment.product_name': 'OpenStack Nova',
'raw.lxc': 'lxc.console.logfile={}\n'.format(
instance_attributes.console_path),
}
def _nesting(instance, _):
if instance.flavor.extra_specs.get('lxd:nested_allowed'):
return {'security.nesting': 'True'}
def _security(instance, _):
if instance.flavor.extra_specs.get('lxd:privileged_allowed'):
return {'security.privileged': 'True'}
def _memory(instance, _):
mem = instance.memory_mb
if mem >= 0:
return {'limits.memory': '{}MB'.format(mem)}
def _cpu(instance, _):
vcpus = instance.flavor.vcpus
if vcpus >= 0:
return {'limits.cpu': str(vcpus)}
def _isolated(instance, client):
lxd_isolated = instance.flavor.extra_specs.get('lxd:isolated')
if lxd_isolated:
extensions = client.host_info.get('api_extensions', [])
if 'id_map' in extensions:
return {'security.idmap.isolated': 'True'}
else:
msg = _("Host does not support isolated instances")
raise exception.NovaException(msg)
_CONFIG_FILTER_MAP = [
_base_config,
_nesting,
_security,
_memory,
_cpu,
_isolated,
]
def _root(instance, client, *_):
"""Configure the root disk."""
device = {'type': 'disk', 'path': '/'}
# we don't do quotas if the CONF.lxd.pool is set and is dir or lvm, or if
# the environment['storage'] is dir or lvm.
if CONF.lxd.pool:
extensions = client.host_info.get('api_extensions', [])
if 'storage' in extensions:
device['pool'] = CONF.lxd.pool
storage_type = client.storage_pools.get(CONF.lxd.pool).driver
else:
msg = _("Host does not have storage pool support")
raise exception.NovaException(msg)
else:
storage_type = client.host_info['environment']['storage']
if storage_type in ['btrfs', 'zfs']:
device['size'] = '{}GB'.format(instance.root_gb)
specs = instance.flavor.extra_specs
# Bytes and iops are not separate config options in a container
# profile - we let Bytes take priority over iops if both are set.
# Align all limits to MiB/s, which should be a sensible middle road.
if specs.get('quota:disk_read_iops_sec'):
device['limits.read'] = '{}iops'.format(
specs['quota:disk_read_iops_sec'])
if specs.get('quota:disk_write_iops_sec'):
device['limits.write'] = '{}iops'.format(
specs['quota:disk_write_iops_sec'])
if specs.get('quota:disk_read_bytes_sec'):
device['limits.read'] = '{}MB'.format(
int(specs['quota:disk_read_bytes_sec']) // units.Mi)
if specs.get('quota:disk_write_bytes_sec'):
device['limits.write'] = '{}MB'.format(
int(specs['quota:disk_write_bytes_sec']) // units.Mi)
minor_quota_defined = ('limits.write' in device or
'limits.read' in device)
if specs.get('quota:disk_total_iops_sec') and not minor_quota_defined:
device['limits.max'] = '{}iops'.format(
specs['quota:disk_total_iops_sec'])
if specs.get('quota:disk_total_bytes_sec') and not minor_quota_defined:
device['limits.max'] = '{}MB'.format(
int(specs['quota:disk_total_bytes_sec']) // units.Mi)
return {'root': device}
def _ephemeral_storage(instance, client, __, block_info):
instance_attributes = common.InstanceAttributes(instance)
ephemeral_storage = driver.block_device_info_get_ephemerals(block_info)
if ephemeral_storage:
devices = {}
for ephemeral in ephemeral_storage:
ephemeral_src = os.path.join(
instance_attributes.storage_path,
ephemeral['virtual_name'])
device = {
'path': '/mnt',
'source': ephemeral_src,
'type': 'disk',
}
if CONF.lxd.pool:
extensions = client.host_info.get('api_extensions', [])
if 'storage' in extensions:
device['pool'] = CONF.lxd.pool
else:
msg = _("Host does not have storage pool support")
raise exception.NovaException(msg)
devices[ephemeral['virtual_name']] = device
return devices
def _network(instance, _, network_info, __):
if not network_info:
return
devices = {}
for vifaddr in network_info:
cfg = vif.get_config(vifaddr)
devname = vif.get_vif_devname(vifaddr)
key = devname
devices[key] = {
'nictype': 'physical',
'hwaddr': str(cfg['mac_address']),
'parent': vif.get_vif_internal_devname(vifaddr),
'type': 'nic'
}
specs = instance.flavor.extra_specs
# Since LXD does not implement average NIC IO and number of burst
# bytes, we take the max(vif_*_average, vif_*_peak) to set the peak
# network IO and simply ignore the burst bytes.
# Align values to MBit/s (8 * powers of 1000 in this case), having
# in mind that the values are recieved in Kilobytes/s.
vif_inbound_limit = max(
int(specs.get('quota:vif_inbound_average', 0)),
int(specs.get('quota:vif_inbound_peak', 0)),
)
if vif_inbound_limit:
devices[key]['limits.ingress'] = '{}Mbit'.format(
vif_inbound_limit * units.k * 8 // units.M)
vif_outbound_limit = max(
int(specs.get('quota:vif_outbound_average', 0)),
int(specs.get('quota:vif_outbound_peak', 0)),
)
if vif_outbound_limit:
devices[key]['limits.egress'] = '{}Mbit'.format(
vif_outbound_limit * units.k * 8 // units.M)
return devices
_DEVICE_FILTER_MAP = [
_root,
_ephemeral_storage,
_network,
]
def to_profile(client, instance, network_info, block_info, update=False):
"""Convert a nova flavor to a lxd profile.
Every instance container created via nova-lxd has a profile by the
same name. The profile is sync'd with the configuration of the container.
When the instance container is deleted, so is the profile.
"""
name = instance.name
config = {}
for f in _CONFIG_FILTER_MAP:
new = f(instance, client)
if new:
config.update(new)
devices = {}
for f in _DEVICE_FILTER_MAP:
new = f(instance, client, network_info, block_info)
if new:
devices.update(new)
if update is True:
profile = client.profiles.get(name)
profile.devices = devices
profile.config = config
profile.save()
return profile
else:
return client.profiles.create(name, config, devices)

View File

@ -1,183 +0,0 @@
# Copyright 2015 Canonical Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See
# the License for the specific language governing permissions and
# limitations under the License.
import nova.conf
from nova import context as nova_context
from nova import exception
from nova import i18n
from nova import rpc
from oslo_log import log as logging
from oslo_utils import excutils
from pylxd.deprecated import api
from pylxd.deprecated import exceptions as lxd_exceptions
_ = i18n._
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
class LXDAPISession(object):
"""The session to invoke the LXD API session."""
def get_session(self, host=None):
"""Returns a connection to the LXD hypervisor
This method should be used to create a connection
to the LXD hypervisor via the pylxd API call.
:param host: host is the LXD daemon to connect to
:return: pylxd object
"""
try:
if host:
return api.API(host=host)
else:
return api.API()
except Exception as ex:
# notify the compute host that the connection failed
# via an rpc call
LOG.exception("Connection to LXD failed")
payload = dict(ip=CONF.host,
method='_connect',
reason=ex)
rpc.get_notifier('compute').error(nova_context.get_admin_context,
'compute.nova_lxd.error',
payload)
raise exception.HypervisorUnavailable(host=CONF.host)
#
# Container related API methods
#
def container_init(self, config, instance, host=None):
"""Create a LXD container
:param config: LXD container config as a dict
:param instance: nova instance object
:param host: perform initialization on perfered host
"""
try:
LOG.info("Creating container {instance} with {image}"
.format(instance=instance.name,
image=instance.image_ref),
instance=instance)
client = self.get_session(host=host)
(state, data) = client.container_init(config)
operation = data.get('operation')
self.operation_wait(operation, instance, host=host)
status, data = self.operation_info(operation, instance, host=host)
data = data.get('metadata')
if not data['status_code'] == 200:
msg = data.get('err') or data['metadata']
raise exception.NovaException(msg)
LOG.info("Successfully created container {instance} with {image}"
.format(instance=instance.name,
image=instance.image_ref),
instance=instance)
except lxd_exceptions.APIError as ex:
msg = (_("Failed to communicate with LXD API {instance}: {reason}")
.format(instance=instance.name, reason=ex))
raise exception.NovaException(msg)
except Exception as ex:
with excutils.save_and_reraise_exception():
LOG.error("Failed to create container {instance}: {reason}"
.format(instance=instance.name, reason=ex),
instance=instance)
#
# Operation methods
#
def operation_wait(self, operation_id, instance, host=None):
"""Waits for an operation to return 200 (Success)
:param operation_id: The operation to wait for.
:param instance: nova instace object
"""
LOG.debug("wait_for_container for instance", instance=instance)
try:
client = self.get_session(host=host)
if not client.wait_container_operation(operation_id, 200, -1):
msg = _("Container creation timed out")
raise exception.NovaException(msg)
except lxd_exceptions.APIError as ex:
msg = _("Failed to communicate with LXD API {instance}: "
"{reason}").format(instance=instance.image_ref,
reason=ex)
LOG.error(msg, instance=instance)
raise exception.NovaException(msg)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error("Error from LXD during operation wait "
"{instance}: {reason}"
.format(instance=instance.image_ref, reason=e))
def operation_info(self, operation_id, instance, host=None):
LOG.debug("operation_info called for instance", instance=instance)
try:
client = self.get_session(host=host)
return client.operation_info(operation_id)
except lxd_exceptions.APIError as ex:
msg = _("Failed to communicate with LXD API {instance}:"
" {reason}").format(instance=instance.image_ref,
reason=ex)
LOG.error(msg, instance=instance)
raise exception.NovaException(msg)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error("Error from LXD during operation_info "
"{instance}: {reason}"
.format(instance=instance.image_ref, reason=e))
#
# Migrate methods
#
def container_migrate(self, instance_name, host, instance):
"""Initialize a container migration for LXD
:param instance_name: container name
:param host: host to move container from
:param instance: nova instance object
:return: dictionary of the container keys
"""
LOG.debug("container_migrate called for instance", instance=instance)
try:
LOG.info("Migrating instance {instance} with {image}"
.format(instance=instance_name,
image=instance.image_ref))
client = self.get_session()
(state, data) = client.container_migrate(instance_name)
LOG.info("Successfully initialized migration for instance "
"{instance} with {image}"
.format(instance=instance.name,
image=instance.image_ref))
return (state, data)
except lxd_exceptions.APIError as ex:
msg = _("Failed to communicate with LXD API {instance}:"
" {reason}").format(instance=instance.name,
reason=ex)
raise exception.NovaException(msg)
except Exception as ex:
with excutils.save_and_reraise_exception():
LOG.error("Failed to migrate container {instance}: {reason}"
.format(instance=instance.name, reason=ex))

View File

@ -1,138 +0,0 @@
# Copyright 2016 Canonical Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_config import cfg
from oslo_utils import fileutils
from nova import exception
from nova import i18n
from nova import utils
from nova.virt import driver
from nova.virt.lxd import common
_ = i18n._
CONF = cfg.CONF
def attach_ephemeral(client, block_device_info, lxd_config, instance):
"""Attach ephemeral storage to an instance."""
ephemeral_storage = driver.block_device_info_get_ephemerals(
block_device_info)
if ephemeral_storage:
storage_driver = lxd_config['environment']['storage']
container = client.containers.get(instance.name)
container_id_map = container.config[
'volatile.last_state.idmap'].split(',')
storage_id = container_id_map[2].split(':')[1]
instance_attrs = common.InstanceAttributes(instance)
for ephemeral in ephemeral_storage:
storage_dir = os.path.join(
instance_attrs.storage_path, ephemeral['virtual_name'])
if storage_driver == 'zfs':
# NOTE(ajkavanagh) - BUG/1782329 - this is temporary until
# storage pools is implemented. LXD 3 removed the
# storage.zfs_pool_name key from the config. So, if it fails,
# we need to grab the configured storage pool and use that as
# the name instead.
try:
zfs_pool = lxd_config['config']['storage.zfs_pool_name']
except KeyError:
zfs_pool = CONF.lxd.pool
utils.execute(
'zfs', 'create',
'-o', 'mountpoint=%s' % storage_dir,
'-o', 'quota=%sG' % instance.ephemeral_gb,
'%s/%s-ephemeral' % (zfs_pool, instance.name),
run_as_root=True)
elif storage_driver == 'btrfs':
# We re-use the same btrfs subvolumes that LXD uses,
# so the ephemeral storage path is updated in the profile
# before the container starts.
storage_dir = os.path.join(
instance_attrs.container_path, ephemeral['virtual_name'])
profile = client.profiles.get(instance.name)
storage_name = ephemeral['virtual_name']
profile.devices[storage_name]['source'] = storage_dir
profile.save()
utils.execute(
'btrfs', 'subvolume', 'create', storage_dir,
run_as_root=True)
utils.execute(
'btrfs', 'qgroup', 'limit',
'%sg' % instance.ephemeral_gb, storage_dir,
run_as_root=True)
elif storage_driver == 'lvm':
fileutils.ensure_tree(storage_dir)
lvm_pool = lxd_config['config']['storage.lvm_vg_name']
lvm_volume = '%s-%s' % (instance.name,
ephemeral['virtual_name'])
lvm_path = '/dev/%s/%s' % (lvm_pool, lvm_volume)
cmd = (
'lvcreate', '-L', '%sG' % instance.ephemeral_gb,
'-n', lvm_volume, lvm_pool)
utils.execute(*cmd, run_as_root=True, attempts=3)
utils.execute('mkfs', '-t', 'ext4',
lvm_path, run_as_root=True)
cmd = ('mount', '-t', 'ext4', lvm_path, storage_dir)
utils.execute(*cmd, run_as_root=True)
else:
reason = _("Unsupport LXD storage detected. Supported"
" storage drivers are zfs and btrfs.")
raise exception.NovaException(reason)
utils.execute(
'chown', storage_id,
storage_dir, run_as_root=True)
def detach_ephemeral(client, block_device_info, lxd_config, instance):
"""Detach ephemeral device from the instance."""
ephemeral_storage = driver.block_device_info_get_ephemerals(
block_device_info)
if ephemeral_storage:
storage_driver = lxd_config['environment']['storage']
for ephemeral in ephemeral_storage:
if storage_driver == 'zfs':
# NOTE(ajkavanagh) - BUG/1782329 - this is temporary until
# storage pools is implemented. LXD 3 removed the
# storage.zfs_pool_name key from the config. So, if it fails,
# we need to grab the configured storage pool and use that as
# the name instead.
try:
zfs_pool = lxd_config['config']['storage.zfs_pool_name']
except KeyError:
zfs_pool = CONF.lxd.pool
utils.execute(
'zfs', 'destroy',
'%s/%s-ephemeral' % (zfs_pool, instance.name),
run_as_root=True)
if storage_driver == 'lvm':
lvm_pool = lxd_config['config']['storage.lvm_vg_name']
lvm_path = '/dev/%s/%s-%s' % (
lvm_pool, instance.name, ephemeral['virtual_name'])
utils.execute('umount', lvm_path, run_as_root=True)
utils.execute('lvremove', '-f', lvm_path, run_as_root=True)

View File

@ -1,335 +0,0 @@
# Copyright (c) 2015 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_concurrency import processutils
from oslo_log import log as logging
from nova import conf
from nova import exception
from nova import utils
from nova.network import model as network_model
from nova.network import os_vif_util
from nova.privsep import linux_net
import os_vif
CONF = conf.CONF
LOG = logging.getLogger(__name__)
def get_vif_devname(vif):
"""Get device name for a given vif."""
if 'devname' in vif:
return vif['devname']
return ("nic" + vif['id'])[:network_model.NIC_NAME_LEN]
def get_vif_internal_devname(vif):
"""Get the internal device name for a given vif."""
return get_vif_devname(vif).replace('tap', 'tin')
def _create_veth_pair(dev1_name, dev2_name, mtu=None):
"""Create a pair of veth devices with the specified names,
deleting any previous devices with those names.
"""
for dev in [dev1_name, dev2_name]:
linux_net.delete_net_dev(dev)
utils.execute('ip', 'link', 'add', dev1_name, 'type', 'veth', 'peer',
'name', dev2_name, run_as_root=True)
for dev in [dev1_name, dev2_name]:
utils.execute('ip', 'link', 'set', dev, 'up', run_as_root=True)
linux_net.set_device_mtu(dev, mtu)
def _add_bridge_port(bridge, dev):
utils.execute('brctl', 'addif', bridge, dev, run_as_root=True)
def _is_no_op_firewall():
return CONF.firewall_driver == "nova.virt.firewall.NoopFirewallDriver"
def _is_ovs_vif_port(vif):
return vif['type'] == 'ovs' and not vif.is_hybrid_plug_enabled()
def _get_bridge_config(vif):
return {
'bridge': vif['network']['bridge'],
'mac_address': vif['address']}
def _get_ovs_config(vif):
if not _is_no_op_firewall() or vif.is_hybrid_plug_enabled():
return {
'bridge': ('qbr{}'.format(vif['id']))[:network_model.NIC_NAME_LEN],
'mac_address': vif['address']}
else:
return {
'bridge': vif['network']['bridge'],
'mac_address': vif['address']}
def _get_tap_config(vif):
return {'mac_address': vif['address']}
def _ovs_vsctl(args):
full_args = ['ovs-vsctl', '--timeout=%s' % CONF.ovs_vsctl_timeout] + args
try:
return utils.execute(*full_args, run_as_root=True)
except Exception as e:
LOG.error("Unable to execute %(cmd)s. Exception: %(exception)s",
{'cmd': full_args, 'exception': e})
raise exception.OvsConfigurationFailure(inner_exception=e)
def _create_ovs_vif_cmd(bridge, dev, iface_id, mac,
instance_id, interface_type=None):
cmd = ['--', '--if-exists', 'del-port', dev, '--',
'add-port', bridge, dev,
'--', 'set', 'Interface', dev,
'external-ids:iface-id=%s' % iface_id,
'external-ids:iface-status=active',
'external-ids:attached-mac=%s' % mac,
'external-ids:vm-uuid=%s' % instance_id]
if interface_type:
cmd += ['type=%s' % interface_type]
return cmd
def _create_ovs_vif_port(bridge, dev, iface_id, mac, instance_id,
mtu=None, interface_type=None):
_ovs_vsctl(_create_ovs_vif_cmd(bridge, dev, iface_id,
mac, instance_id,
interface_type))
linux_net.set_device_mtu(dev, mtu)
def _delete_ovs_vif_port(bridge, dev, delete_dev=True):
_ovs_vsctl(['--', '--if-exists', 'del-port', bridge, dev])
if delete_dev:
linux_net.delete_net_dev(dev)
CONFIG_GENERATORS = {
'bridge': _get_bridge_config,
'ovs': _get_ovs_config,
'tap': _get_tap_config,
}
def get_config(vif):
"""Get LXD specific config for a vif."""
vif_type = vif['type']
try:
return CONFIG_GENERATORS[vif_type](vif)
except KeyError:
raise exception.NovaException(
'Unsupported vif type: {}'.format(vif_type))
# VIF_TYPE_OVS = 'ovs'
# VIF_TYPE_BRIDGE = 'bridge'
def _post_plug_wiring_veth_and_bridge(instance, vif):
"""Wire/plug the virtual interface for the instance into the bridge that
lxd is using.
:param instance: the instance to plug into the bridge
:type instance: ???
:param vif: the virtual interface to plug into the bridge
:type vif: :class:`nova.network.model.VIF`
"""
config = get_config(vif)
network = vif.get('network')
mtu = network.get_meta('mtu') if network else None
v1_name = get_vif_devname(vif)
v2_name = get_vif_internal_devname(vif)
if not linux_net.device_exists(v1_name):
_create_veth_pair(v1_name, v2_name, mtu)
if _is_ovs_vif_port(vif):
# NOTE(jamespage): wire tap device directly to ovs bridge
_create_ovs_vif_port(vif['network']['bridge'],
v1_name,
vif['id'],
vif['address'],
instance.uuid,
mtu)
else:
# NOTE(jamespage): wire tap device linux bridge
_add_bridge_port(config['bridge'], v1_name)
else:
linux_net.set_device_mtu(v1_name, mtu)
POST_PLUG_WIRING = {
'bridge': _post_plug_wiring_veth_and_bridge,
'ovs': _post_plug_wiring_veth_and_bridge,
}
def _post_plug_wiring(instance, vif):
"""Perform nova-lxd specific post os-vif plug processing
Perform any post os-vif plug wiring required to network
the instance LXD container with the underlying Neutron
network infrastructure
:param instance: the instance to plug into the bridge
:type instance: ???
:param vif: the virtual interface to plug into the bridge
:type vif: :class:`nova.network.model.VIF`
"""
LOG.debug("Performing post plug wiring for VIF {}".format(vif),
instance=instance)
vif_type = vif['type']
try:
POST_PLUG_WIRING[vif_type](instance, vif)
LOG.debug("Post plug wiring step for VIF {} done".format(vif),
instance=instance)
except KeyError:
LOG.debug("No post plug wiring step "
"for vif type: {}".format(vif_type),
instance=instance)
# VIF_TYPE_OVS = 'ovs'
# VIF_TYPE_BRIDGE = 'bridge'
def _post_unplug_wiring_delete_veth(instance, vif):
"""Wire/plug the virtual interface for the instance into the bridge that
lxd is using.
:param instance: the instance to plug into the bridge
:type instance: ???
:param vif: the virtual interface to plug into the bridge
:type vif: :class:`nova.network.model.VIF`
"""
v1_name = get_vif_devname(vif)
try:
if _is_ovs_vif_port(vif):
_delete_ovs_vif_port(vif['network']['bridge'],
v1_name, True)
else:
linux_net.delete_net_dev(v1_name)
except processutils.ProcessExecutionError:
LOG.exception("Failed to delete veth for vif {}".foramt(vif),
instance=instance)
POST_UNPLUG_WIRING = {
'bridge': _post_unplug_wiring_delete_veth,
'ovs': _post_unplug_wiring_delete_veth,
}
def _post_unplug_wiring(instance, vif):
"""Perform nova-lxd specific post os-vif unplug processing
Perform any post os-vif unplug wiring required to remove
network interfaces assocaited with a lxd container.
:param instance: the instance to plug into the bridge
:type instance: :class:`nova.db.sqlalchemy.models.Instance`
:param vif: the virtual interface to plug into the bridge
:type vif: :class:`nova.network.model.VIF`
"""
LOG.debug("Performing post unplug wiring for VIF {}".format(vif),
instance=instance)
vif_type = vif['type']
try:
POST_UNPLUG_WIRING[vif_type](instance, vif)
LOG.debug("Post unplug wiring for VIF {} done".format(vif),
instance=instance)
except KeyError:
LOG.debug("No post unplug wiring step "
"for vif type: {}".format(vif_type),
instance=instance)
class LXDGenericVifDriver(object):
"""Generic VIF driver for LXD networking."""
def __init__(self):
os_vif.initialize()
def plug(self, instance, vif):
vif_type = vif['type']
instance_info = os_vif_util.nova_to_osvif_instance(instance)
# Try os-vif codepath first
vif_obj = os_vif_util.nova_to_osvif_vif(vif)
if vif_obj is not None:
os_vif.plug(vif_obj, instance_info)
else:
# Legacy non-os-vif codepath
func = getattr(self, 'plug_%s' % vif_type, None)
if not func:
raise exception.InternalError(
"Unexpected vif_type=%s" % vif_type
)
func(instance, vif)
_post_plug_wiring(instance, vif)
def unplug(self, instance, vif):
vif_type = vif['type']
instance_info = os_vif_util.nova_to_osvif_instance(instance)
# Try os-vif codepath first
vif_obj = os_vif_util.nova_to_osvif_vif(vif)
if vif_obj is not None:
os_vif.unplug(vif_obj, instance_info)
else:
# Legacy non-os-vif codepath
func = getattr(self, 'unplug_%s' % vif_type, None)
if not func:
raise exception.InternalError(
"Unexpected vif_type=%s" % vif_type
)
func(instance, vif)
_post_unplug_wiring(instance, vif)
def plug_tap(self, instance, vif):
"""Plug a VIF_TYPE_TAP virtual interface."""
v1_name = get_vif_devname(vif)
v2_name = get_vif_internal_devname(vif)
network = vif.get('network')
mtu = network.get_meta('mtu') if network else None
# NOTE(jamespage): For nova-lxd this is really a veth pair
# so that a) security rules get applied on the host
# and b) that the container can still be wired.
if not linux_net.device_exists(v1_name):
_create_veth_pair(v1_name, v2_name, mtu)
else:
linux_net.set_device_mtu(v1_name, mtu)
def unplug_tap(self, instance, vif):
"""Unplug a VIF_TYPE_TAP virtual interface."""
dev = get_vif_devname(vif)
try:
linux_net.delete_net_dev(dev)
except processutils.ProcessExecutionError:
LOG.exception("Failed while unplugging vif for instance",
instance=instance)

View File

@ -1,3 +0,0 @@
To run tempest specific tests for nova-lxd run the following command:
tox -e all-plugin -- nova_lxd

View File

@ -1,33 +0,0 @@
# Copyright 2016 Canonical Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from tempest.test_discover import plugins
class MyPlugin(plugins.TempestPlugin):
def load_tests(self):
base_path = os.path.split(os.path.dirname(
os.path.abspath(__file__)))[0]
test_dir = "nova_lxd_tempest_plugin/tests"
full_test_dir = os.path.join(base_path, test_dir)
return full_test_dir, base_path
def register_opts(self, conf):
pass
def get_opt_lists(self):
pass

View File

@ -1,152 +0,0 @@
# Copyright 2016 Canonical Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from pylxd import client
from tempest.api.compute import base
from tempest import config
from tempest.lib.common.utils import data_utils
CONF = config.CONF
class LXDServersTestJSON(base.BaseV2ComputeAdminTest):
disk_config = 'AUTO'
@classmethod
def setup_credentials(cls):
cls.prepare_instance_network()
super(LXDServersTestJSON, cls).setup_credentials()
@classmethod
def setup_clients(cls):
super(LXDServersTestJSON, cls).setup_clients()
cls.lxd = client.Client()
cls.client = cls.os_admin.servers_client
cls.flavors_client = cls.os_admin.flavors_client
@classmethod
def resource_setup(cls):
cls.set_validation_resources()
super(LXDServersTestJSON, cls).resource_setup()
cls.meta = {'hello': 'world'}
cls.accessIPv4 = '1.1.1.1'
cls.accessIPv6 = '0000:0000:0000:0000:0000:babe:220.12.22.2'
cls.name = data_utils.rand_name(cls.__name__ + '-server')
cls.password = data_utils.rand_password()
disk_config = cls.disk_config
cls.server_initial = cls.create_test_server(
validatable=True,
wait_until='ACTIVE',
name=cls.name,
metadata=cls.meta,
accessIPv4=cls.accessIPv4,
accessIPv6=cls.accessIPv6,
disk_config=disk_config,
adminPass=cls.password)
cls.server = (
cls.client.show_server(cls.server_initial['id'])['server'])
def test_profile_configuration(self):
# Verify that the profile was created
profile = self.lxd.profiles.get(
self.server['OS-EXT-SRV-ATTR:instance_name'])
self.assertEqual(
self.server['OS-EXT-SRV-ATTR:instance_name'], profile.name)
self.assertIn('raw.lxc', profile.config)
self.assertIn('boot.autostart', profile.config)
self.assertIn('limits.cpu', profile.config)
self.assertIn('limits.memory', profile.config)
self.assertIn('root', profile.devices)
def test_verify_created_server_vcpus(self):
# Verify that the number of vcpus reported by the instance matches
# the amount stated by the flavor
flavor = self.flavors_client.show_flavor(self.flavor_ref)['flavor']
profile = self.lxd.profiles.get(
self.server['OS-EXT-SRV-ATTR:instance_name'])
self.assertEqual(
'%s' % flavor['vcpus'], profile.config['limits.cpu'])
def test_verify_created_server_memory(self):
# Verify that the memory reported by the instance matches
# the amount stated by the flavor
flavor = self.flavors_client.show_flavor(self.flavor_ref)['flavor']
profile = self.lxd.profiles.get(
self.server['OS-EXT-SRV-ATTR:instance_name'])
self.assertEqual(
'%sMB' % flavor['ram'], profile.config['limits.memory'])
def test_verify_server_root_size(self):
flavor = self.flavors_client.show_flavor(self.flavor_ref)['flavor']
profile = self.lxd.profiles.get(
self.server['OS-EXT-SRV-ATTR:instance_name'])
self.assertEqual(
'%sGB' % flavor['disk'], profile.devices['root']['size'])
def test_verify_console_log(self):
# Verify that the console log for the container exists
profile = self.lxd.profiles.get(
self.server['OS-EXT-SRV-ATTR:instance_name'])
self.assertIn('lxc.console.logfile', profile.config['raw.lxc'])
def test_verify_network_configuration(self):
# Verify network is configured for the instance
profile = self.lxd.profiles.get(
self.server['OS-EXT-SRV-ATTR:instance_name'])
for device in profile.devices:
if 'root' not in device:
network_device = device
self.assertEqual('nic', profile.devices[network_device]['type'])
self.assertEqual('bridged', profile.devices[network_device]['nictype'])
self.assertEqual(
network_device, profile.devices[network_device]['parent'])
def test_container_configuration_valid(self):
# Verify container configuration is correct
profile = self.lxd.profiles.get(
self.server['OS-EXT-SRV-ATTR:instance_name'])
container = self.lxd.containers.get(
self.server['OS-EXT-SRV-ATTR:instance_name'])
flavor = self.flavors_client.show_flavor(self.flavor_ref)['flavor']
self.assertEqual(profile.name, container.profiles[0])
self.assertIn('raw.lxc', container.expanded_config)
self.assertEqual(
'%s' % flavor['vcpus'], container.expanded_config['limits.cpu'])
self.assertEqual(
'%sMB' % flavor['ram'], container.expanded_config['limits.memory'])
self.assertEqual(
'%sGB' % flavor['disk'],
container.expanded_devices['root']['size'])
for device in profile.devices:
if 'root' not in device:
network_device = device
self.assertIn(network_device, container.expanded_devices)
self.assertEqual(
'nic', container.expanded_devices[network_device]['type'])
self.assertEqual(
'bridged', container.expanded_devices[network_device]['nictype'])
self.assertEqual(
network_device,
container.expanded_devices[network_device]['parent'])

View File

@ -1,119 +0,0 @@
# Copyright 2016 Canonical Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from pylxd import client
from tempest.api.compute import base
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils.linux import remote_client
CONF = config.CONF
class LXDServersWithSpecificFlavorTestJSON(base.BaseV2ComputeAdminTest):
disk_config = 'AUTO'
@classmethod
def setup_credentials(cls):
cls.prepare_instance_network()
super(LXDServersWithSpecificFlavorTestJSON, cls).setup_credentials()
@classmethod
def setup_clients(cls):
super(LXDServersWithSpecificFlavorTestJSON, cls).setup_clients()
cls.flavor_client = cls.os_admin.flavors_client
cls.client = cls.os_admin.servers_client
@classmethod
def resource_setup(cls):
cls.set_validation_resources()
super(LXDServersWithSpecificFlavorTestJSON, cls).resource_setup()
def test_verify_created_server_ephemeral_disk(self):
# Verify that the ephemeral disk is created when creating server
flavor_base = self.flavors_client.show_flavor(
self.flavor_ref)['flavor']
def create_flavor_with_extra_specs():
flavor_with_eph_disk_name = data_utils.rand_name('eph_flavor')
flavor_with_eph_disk_id = data_utils.rand_int_id(start=1000)
ram = flavor_base['ram']
vcpus = flavor_base['vcpus']
disk = flavor_base['disk']
# Create a flavor with extra specs
flavor = (self.flavor_client.
create_flavor(name=flavor_with_eph_disk_name,
ram=ram, vcpus=vcpus, disk=disk,
id=flavor_with_eph_disk_id,
ephemeral=1))['flavor']
self.addCleanup(flavor_clean_up, flavor['id'])
return flavor['id']
def create_flavor_without_extra_specs():
flavor_no_eph_disk_name = data_utils.rand_name('no_eph_flavor')
flavor_no_eph_disk_id = data_utils.rand_int_id(start=1000)
ram = flavor_base['ram']
vcpus = flavor_base['vcpus']
disk = flavor_base['disk']
# Create a flavor without extra specs
flavor = (self.flavor_client.
create_flavor(name=flavor_no_eph_disk_name,
ram=ram, vcpus=vcpus, disk=disk,
id=flavor_no_eph_disk_id))['flavor']
self.addCleanup(flavor_clean_up, flavor['id'])
return flavor['id']
def flavor_clean_up(flavor_id):
self.flavor_client.delete_flavor(flavor_id)
self.flavor_client.wait_for_resource_deletion(flavor_id)
flavor_with_eph_disk_id = create_flavor_with_extra_specs()
admin_pass = self.image_ssh_password
server_with_eph_disk = self.create_test_server(
validatable=True,
wait_until='ACTIVE',
adminPass=admin_pass,
flavor=flavor_with_eph_disk_id)
server_with_eph_disk = self.client.show_server(
server_with_eph_disk['id'])['server']
linux_client = remote_client.RemoteClient(
self.get_server_ip(server_with_eph_disk),
self.ssh_user,
admin_pass,
self.validation_resources['keypair']['private_key'],
server=server_with_eph_disk,
servers_client=self.client)
cmd = 'sudo touch /mnt/tempest.txt'
linux_client.exec_command(cmd)
lxd = client.Client()
profile = lxd.profiles.get(server_with_eph_disk[
'OS-EXT-SRV-ATTR:instance_name'])
tempfile = '%s/tempest.txt' % profile.devices['ephemeral0']['source']
self.assertTrue(os.path.exists(tempfile))

View File

@ -1,105 +0,0 @@
# Copyright 2012 OpenStack Foundation
# Copyright 2016 Canonical Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from pylxd import client
from tempest.api.compute import base
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
CONF = config.CONF
class LXDVolumeTests(base.BaseV2ComputeAdminTest):
disk_config = 'AUTO'
def __init__(self, *args, **kwargs):
super(LXDVolumeTests, self).__init__(*args, **kwargs)
self.attachment = None
@classmethod
def setup_credentials(cls):
cls.prepare_instance_network()
super(LXDVolumeTests, cls).setup_credentials()
@classmethod
def setup_clients(cls):
super(LXDVolumeTests, cls).setup_clients()
cls.lxd = client.Client()
cls.client = cls.os_admin.servers_client
cls.flavors_client = cls.os_admin.flavors_client
@classmethod
def resource_setup(cls):
cls.set_validation_resources()
super(LXDVolumeTests, cls).resource_setup()
cls.meta = {'hello': 'world'}
cls.accessIPv4 = '1.1.1.1'
cls.accessIPv6 = '0000:0000:0000:0000:0000:babe:220.12.22.2'
cls.name = data_utils.rand_name(cls.__name__ + '-server')
cls.password = data_utils.rand_password()
disk_config = cls.disk_config
cls.server_initial = cls.create_test_server(
validatable=True,
wait_until='ACTIVE',
name=cls.name,
metadata=cls.meta,
accessIPv4=cls.accessIPv4,
accessIPv6=cls.accessIPv6,
disk_config=disk_config,
adminPass=cls.password)
cls.server = (
cls.client.show_server(cls.server_initial['id'])['server'])
cls.device = CONF.compute.volume_device_name
def _detach(self, server_id, volume_id):
if self.attachment:
self.servers_client.detach_volume(server_id, volume_id)
waiters.wait_for_volume_status(self.volumes_client,
volume_id, 'available')
def _create_and_attach_volume(self, server):
# Create a volume and wait for it to become ready
vol_name = data_utils.rand_name(self.__class__.__name__ + '-volume')
volume = self.volumes_client.create_volume(
size=CONF.volume.volume_size, display_name=vol_name)['volume']
self.addCleanup(self.delete_volume, volume['id'])
waiters.wait_for_volume_status(self.volumes_client,
volume['id'], 'available')
# Attach the volume to the server
self.attachment = self.servers_client.attach_volume(
server['id'],
volumeId=volume['id'],
device='/dev/%s' % self.device)['volumeAttachment']
waiters.wait_for_volume_status(self.volumes_client,
volume['id'], 'in-use')
self.addCleanup(self._detach, server['id'], volume['id'])
return volume
def test_create_server_and_attach_volume(self):
# Verify that LXD profile has the correct configuration
# for volumes
volume = self._create_and_attach_volume(self.server)
profile = self.lxd.profiles.get(
self.server['OS-EXT-SRV-ATTR:instance_name'])
self.assertIn(volume['id'], [device for device in profile.devices])
self.assertEqual(
'/dev/%s' % self.device, profile.devices[volume['id']]['path'])

View File

@ -1,697 +0,0 @@
# Copyright 2012 OpenStack Foundation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import subprocess
from oslo_log import log
from oslo_serialization import jsonutils as json
from tempest.common import compute
from tempest.common import image as common_image
from tempest.common.utils.linux import remote_client
from tempest.common.utils import net_utils
from tempest.common import waiters
from tempest import config
from tempest import exceptions
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import exceptions as lib_exc
import tempest.test
CONF = config.CONF
LOG = log.getLogger(__name__)
class ScenarioTest(tempest.test.BaseTestCase):
"""Base class for scenario tests. Uses tempest own clients. """
credentials = ['primary']
@classmethod
def setup_clients(cls):
super(ScenarioTest, cls).setup_clients()
# Clients (in alphabetical order)
cls.flavors_client = cls.os_primary.flavors_client
cls.compute_floating_ips_client = (
cls.os_primary.compute_floating_ips_client)
if CONF.service_available.glance:
# Check if glance v1 is available to determine which client to use.
if CONF.image_feature_enabled.api_v1:
cls.image_client = cls.os_primary.image_client
elif CONF.image_feature_enabled.api_v2:
cls.image_client = cls.os_primary.image_client_v2
else:
raise lib_exc.InvalidConfiguration(
'Either api_v1 or api_v2 must be True in '
'[image-feature-enabled].')
# Compute image client
cls.compute_images_client = cls.os_primary.compute_images_client
cls.keypairs_client = cls.os_primary.keypairs_client
# Nova security groups client
cls.compute_security_groups_client = (
cls.os_primary.compute_security_groups_client)
cls.compute_security_group_rules_client = (
cls.os_primary.compute_security_group_rules_client)
cls.servers_client = cls.os_primary.servers_client
cls.interface_client = cls.os_primary.interfaces_client
# Neutron network client
cls.networks_client = cls.os_primary.networks_client
cls.ports_client = cls.os_primary.ports_client
cls.routers_client = cls.os_primary.routers_client
cls.subnets_client = cls.os_primary.subnets_client
cls.floating_ips_client = cls.os_primary.floating_ips_client
cls.security_groups_client = cls.os_primary.security_groups_client
cls.security_group_rules_client = (
cls.os_primary.security_group_rules_client)
if (CONF.volume_feature_enabled.api_v2 or
CONF.volume_feature_enabled.api_v3):
cls.volumes_client = cls.os_primary.volumes_client_latest
cls.snapshots_client = cls.os_primary.snapshots_client_latest
# ## Test functions library
#
# The create_[resource] functions only return body and discard the
# resp part which is not used in scenario tests
def _create_port(self, network_id, client=None, namestart='port-quotatest',
**kwargs):
if not client:
client = self.ports_client
name = data_utils.rand_name(namestart)
result = client.create_port(
name=name,
network_id=network_id,
**kwargs)
self.assertIsNotNone(result, 'Unable to allocate port')
port = result['port']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
client.delete_port, port['id'])
return port
def create_keypair(self, client=None):
if not client:
client = self.keypairs_client
name = data_utils.rand_name(self.__class__.__name__)
# We don't need to create a keypair by pubkey in scenario
body = client.create_keypair(name=name)
self.addCleanup(client.delete_keypair, name)
return body['keypair']
def create_server(self, name=None, image_id=None, flavor=None,
validatable=False, wait_until='ACTIVE',
clients=None, **kwargs):
"""Wrapper utility that returns a test server.
This wrapper utility calls the common create test server and
returns a test server. The purpose of this wrapper is to minimize
the impact on the code of the tests already using this
function.
"""
# NOTE(jlanoux): As a first step, ssh checks in the scenario
# tests need to be run regardless of the run_validation and
# validatable parameters and thus until the ssh validation job
# becomes voting in CI. The test resources management and IP
# association are taken care of in the scenario tests.
# Therefore, the validatable parameter is set to false in all
# those tests. In this way create_server just return a standard
# server and the scenario tests always perform ssh checks.
# Needed for the cross_tenant_traffic test:
if clients is None:
clients = self.os_primary
if name is None:
name = data_utils.rand_name(self.__class__.__name__ + "-server")
vnic_type = CONF.network.port_vnic_type
# If vnic_type is configured create port for
# every network
if vnic_type:
ports = []
create_port_body = {'binding:vnic_type': vnic_type,
'namestart': 'port-smoke'}
if kwargs:
# Convert security group names to security group ids
# to pass to create_port
if 'security_groups' in kwargs:
security_groups = \
clients.security_groups_client.list_security_groups(
).get('security_groups')
sec_dict = dict([(s['name'], s['id'])
for s in security_groups])
sec_groups_names = [s['name'] for s in kwargs.pop(
'security_groups')]
security_groups_ids = [sec_dict[s]
for s in sec_groups_names]
if security_groups_ids:
create_port_body[
'security_groups'] = security_groups_ids
networks = kwargs.pop('networks', [])
else:
networks = []
# If there are no networks passed to us we look up
# for the project's private networks and create a port.
# The same behaviour as we would expect when passing
# the call to the clients with no networks
if not networks:
networks = clients.networks_client.list_networks(
**{'router:external': False, 'fields': 'id'})['networks']
# It's net['uuid'] if networks come from kwargs
# and net['id'] if they come from
# clients.networks_client.list_networks
for net in networks:
net_id = net.get('uuid', net.get('id'))
if 'port' not in net:
port = self._create_port(network_id=net_id,
client=clients.ports_client,
**create_port_body)
ports.append({'port': port['id']})
else:
ports.append({'port': net['port']})
if ports:
kwargs['networks'] = ports
self.ports = ports
tenant_network = self.get_tenant_network()
body, servers = compute.create_test_server(
clients,
tenant_network=tenant_network,
wait_until=wait_until,
name=name, flavor=flavor,
image_id=image_id, **kwargs)
self.addCleanup(waiters.wait_for_server_termination,
clients.servers_client, body['id'])
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
clients.servers_client.delete_server, body['id'])
server = clients.servers_client.show_server(body['id'])['server']
return server
def create_volume(self, size=None, name=None, snapshot_id=None,
imageRef=None, volume_type=None):
if size is None:
size = CONF.volume.volume_size
if imageRef:
image = self.compute_images_client.show_image(imageRef)['image']
min_disk = image.get('minDisk')
size = max(size, min_disk)
if name is None:
name = data_utils.rand_name(self.__class__.__name__ + "-volume")
kwargs = {'display_name': name,
'snapshot_id': snapshot_id,
'imageRef': imageRef,
'volume_type': volume_type,
'size': size}
volume = self.volumes_client.create_volume(**kwargs)['volume']
self.addCleanup(self.volumes_client.wait_for_resource_deletion,
volume['id'])
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.volumes_client.delete_volume, volume['id'])
# NOTE(e0ne): Cinder API v2 uses name instead of display_name
if 'display_name' in volume:
self.assertEqual(name, volume['display_name'])
else:
self.assertEqual(name, volume['name'])
waiters.wait_for_volume_resource_status(self.volumes_client,
volume['id'], 'available')
# The volume retrieved on creation has a non-up-to-date status.
# Retrieval after it becomes active ensures correct details.
volume = self.volumes_client.show_volume(volume['id'])['volume']
return volume
def create_volume_type(self, client=None, name=None, backend_name=None):
if not client:
client = self.admin_volume_types_client
if not name:
class_name = self.__class__.__name__
name = data_utils.rand_name(class_name + '-volume-type')
randomized_name = data_utils.rand_name('scenario-type-' + name)
LOG.debug("Creating a volume type: {name} on backend {backend}"
.format(name=randomized_name, backend=backend_name))
extra_specs = {}
if backend_name:
extra_specs = {"volume_backend_name": backend_name}
body = client.create_volume_type(name=randomized_name,
extra_specs=extra_specs)
volume_type = body['volume_type']
self.assertIn('id', volume_type)
self.addCleanup(client.delete_volume_type, volume_type['id'])
return volume_type
def _create_loginable_secgroup_rule(self, secgroup_id=None):
_client = self.compute_security_groups_client
_client_rules = self.compute_security_group_rules_client
if secgroup_id is None:
sgs = _client.list_security_groups()['security_groups']
for sg in sgs:
if sg['name'] == 'default':
secgroup_id = sg['id']
# These rules are intended to permit inbound ssh and icmp
# traffic from all sources, so no group_id is provided.
# Setting a group_id would only permit traffic from ports
# belonging to the same security group.
rulesets = [
{
# ssh
'ip_protocol': 'tcp',
'from_port': 22,
'to_port': 22,
'cidr': '0.0.0.0/0',
},
{
# ping
'ip_protocol': 'icmp',
'from_port': -1,
'to_port': -1,
'cidr': '0.0.0.0/0',
}
]
rules = list()
for ruleset in rulesets:
sg_rule = _client_rules.create_security_group_rule(
parent_group_id=secgroup_id, **ruleset)['security_group_rule']
rules.append(sg_rule)
return rules
def _create_security_group(self):
# Create security group
sg_name = data_utils.rand_name(self.__class__.__name__)
sg_desc = sg_name + " description"
secgroup = self.compute_security_groups_client.create_security_group(
name=sg_name, description=sg_desc)['security_group']
self.assertEqual(secgroup['name'], sg_name)
self.assertEqual(secgroup['description'], sg_desc)
self.addCleanup(
test_utils.call_and_ignore_notfound_exc,
self.compute_security_groups_client.delete_security_group,
secgroup['id'])
# Add rules to the security group
self._create_loginable_secgroup_rule(secgroup['id'])
return secgroup
def get_remote_client(self, ip_address, username=None, private_key=None):
"""Get a SSH client to a remote server
@param ip_address the server floating or fixed IP address to use
for ssh validation
@param username name of the Linux account on the remote server
@param private_key the SSH private key to use
@return a RemoteClient object
"""
if username is None:
username = CONF.validation.image_ssh_user
# Set this with 'keypair' or others to log in with keypair or
# username/password.
if CONF.validation.auth_method == 'keypair':
password = None
if private_key is None:
private_key = self.keypair['private_key']
else:
password = CONF.validation.image_ssh_password
private_key = None
linux_client = remote_client.RemoteClient(ip_address, username,
pkey=private_key,
password=password)
try:
linux_client.validate_authentication()
except Exception as e:
message = ("Initializing SSH connection to {ip} failed. "
"Error: {error}"
.format(ip=ip_address,
error=e))
caller = test_utils.find_test_caller()
if caller:
message = '(%s) %s' % (caller, message)
LOG.exception(message)
self._log_console_output()
raise
return linux_client
def _image_create(self, name, fmt, path,
disk_format=None, properties=None):
if properties is None:
properties = {}
name = data_utils.rand_name('%s-' % name)
params = {
'name': name,
'container_format': fmt,
'disk_format': disk_format or fmt,
}
if CONF.image_feature_enabled.api_v1:
params['is_public'] = 'False'
params['properties'] = properties
params = {'headers': common_image.image_meta_to_headers(**params)}
else:
params['visibility'] = 'private'
# Additional properties are flattened out in the v2 API.
params.update(properties)
body = self.image_client.create_image(**params)
image = body['image'] if 'image' in body else body
self.addCleanup(self.image_client.delete_image, image['id'])
self.assertEqual("queued", image['status'])
with open(path, 'rb') as image_file:
if CONF.image_feature_enabled.api_v1:
self.image_client.update_image(image['id'], data=image_file)
else:
self.image_client.store_image_file(image['id'], image_file)
return image['id']
def glance_image_create(self):
img_path = CONF.scenario.img_dir + "/" + CONF.scenario.img_file
aki_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.aki_img_file
ari_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.ari_img_file
ami_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.ami_img_file
img_container_format = CONF.scenario.img_container_format
img_disk_format = CONF.scenario.img_disk_format
img_properties = CONF.scenario.img_properties
LOG.debug("paths: img: {img}, container_format: {cf}, "
"disk_format: {df}, properties: {props}, ami: {ami}, "
"ari: {ari}, aki: {aki}"
.format(img=img_path,
cf=img_container_format,
df=img_disk_format,
props=img_properties,
ami=ami_img_path,
ari=ari_img_path,
aki=aki_img_path))
try:
image = self._image_create('scenario-img',
img_container_format,
img_path,
disk_format=img_disk_format,
properties=img_properties)
except IOError:
LOG.debug("A qcow2 image was not found. Try to get a uec image.")
kernel = self._image_create('scenario-aki', 'aki', aki_img_path)
ramdisk = self._image_create('scenario-ari', 'ari', ari_img_path)
properties = {'kernel_id': kernel, 'ramdisk_id': ramdisk}
image = self._image_create('scenario-ami', 'ami',
path=ami_img_path,
properties=properties)
LOG.debug("image: {}".format(image))
return image
def _log_console_output(self, servers=None):
if not CONF.compute_feature_enabled.console_output:
LOG.debug("Console output not supported, cannot log")
return
if not servers:
servers = self.servers_client.list_servers()
servers = servers['servers']
for server in servers:
try:
console_output = self.servers_client.get_console_output(
server['id'])['output']
LOG.debug("Console output for {}\nbody=\n{}"
.format(server['id'], console_output))
except lib_exc.NotFound:
LOG.debug("Server {} disappeared(deleted) while looking "
"for the console log".format(server['id']))
def _log_net_info(self, exc):
# network debug is called as part of ssh init
if not isinstance(exc, lib_exc.SSHTimeout):
LOG.debug("Network information on a devstack host")
def create_server_snapshot(self, server, name=None):
# Glance client
_image_client = self.image_client
# Compute client
_images_client = self.compute_images_client
if name is None:
name = data_utils.rand_name(self.__class__.__name__ + 'snapshot')
LOG.debug("Creating a snapshot image for server: {}"
.format(server['name']))
image = _images_client.create_image(server['id'], name=name)
image_id = image.response['location'].split('images/')[1]
waiters.wait_for_image_status(_image_client, image_id, 'active')
self.addCleanup(_image_client.wait_for_resource_deletion,
image_id)
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
_image_client.delete_image, image_id)
if CONF.image_feature_enabled.api_v1:
# In glance v1 the additional properties are stored in the headers.
resp = _image_client.check_image(image_id)
snapshot_image = common_image.get_image_meta_from_headers(resp)
image_props = snapshot_image.get('properties', {})
else:
# In glance v2 the additional properties are flattened.
snapshot_image = _image_client.show_image(image_id)
image_props = snapshot_image
bdm = image_props.get('block_device_mapping')
if bdm:
bdm = json.loads(bdm)
if bdm and 'snapshot_id' in bdm[0]:
snapshot_id = bdm[0]['snapshot_id']
self.addCleanup(
self.snapshots_client.wait_for_resource_deletion,
snapshot_id)
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.snapshots_client.delete_snapshot,
snapshot_id)
waiters.wait_for_volume_resource_status(self.snapshots_client,
snapshot_id,
'available')
image_name = snapshot_image['name']
self.assertEqual(name, image_name)
LOG.debug("Created snapshot image {} for server {}"
.format(image_name, server['name']))
return snapshot_image
def nova_volume_attach(self, server, volume_to_attach):
volume = self.servers_client.attach_volume(
server['id'], volumeId=volume_to_attach['id'], device='/dev/%s'
% CONF.compute.volume_device_name)['volumeAttachment']
self.assertEqual(volume_to_attach['id'], volume['id'])
waiters.wait_for_volume_resource_status(self.volumes_client,
volume['id'], 'in-use')
# Return the updated volume after the attachment
return self.volumes_client.show_volume(volume['id'])['volume']
def nova_volume_detach(self, server, volume):
self.servers_client.detach_volume(server['id'], volume['id'])
waiters.wait_for_volume_resource_status(self.volumes_client,
volume['id'], 'available')
volume = self.volumes_client.show_volume(volume['id'])['volume']
self.assertEqual('available', volume['status'])
def rebuild_server(self, server_id, image=None,
preserve_ephemeral=False, wait=True,
rebuild_kwargs=None):
if image is None:
image = CONF.compute.image_ref
rebuild_kwargs = rebuild_kwargs or {}
LOG.debug("Rebuilding server (id: {_id}, image: {image}, "
"preserve eph: {ephemeral})"
.format(_id=server_id,
image=image,
ephemeral=preserve_ephemeral))
self.servers_client.rebuild_server(
server_id=server_id, image_ref=image,
preserve_ephemeral=preserve_ephemeral,
**rebuild_kwargs)
if wait:
waiters.wait_for_server_status(self.servers_client,
server_id, 'ACTIVE')
def ping_ip_address(self, ip_address, should_succeed=True,
ping_timeout=None, mtu=None):
timeout = ping_timeout or CONF.validation.ping_timeout
cmd = ['ping', '-c1', '-w1']
if mtu:
cmd += [
# don't fragment
'-M', 'do',
# ping receives just the size of ICMP payload
'-s', str(net_utils.get_ping_payload_size(mtu, 4))
]
cmd.append(ip_address)
def ping():
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
proc.communicate()
return (proc.returncode == 0) == should_succeed
caller = test_utils.find_test_caller()
LOG.debug("{caller} begins to ping {ip} in {timeout} sec and the"
" expected result is {should_succeed}"
.format(caller=caller,
ip=ip_address,
timeout=timeout,
should_succeed=('reachable' if should_succeed
else 'unreachable')))
result = test_utils.call_until_true(ping, timeout, 1)
LOG.debug("{caller} finishes ping {ip} in {timeout} sec and the "
"ping result is {result}"
.format(caller=caller,
ip=ip_address,
timeout=timeout,
result='expected' if result else 'unexpected'))
return result
def check_vm_connectivity(self, ip_address,
username=None,
private_key=None,
should_connect=True,
mtu=None):
"""Check server connectivity
:param ip_address: server to test against
:param username: server's ssh username
:param private_key: server's ssh private key to be used
:param should_connect: True/False indicates positive/negative test
positive - attempt ping and ssh
negative - attempt ping and fail if succeed
:param mtu: network MTU to use for connectivity validation
:raises: AssertError if the result of the connectivity check does
not match the value of the should_connect param
"""
if should_connect:
msg = "Timed out waiting for %s to become reachable" % ip_address
else:
msg = "ip address %s is reachable" % ip_address
self.assertTrue(self.ping_ip_address(ip_address,
should_succeed=should_connect,
mtu=mtu),
msg=msg)
if should_connect:
# no need to check ssh for negative connectivity
self.get_remote_client(ip_address, username, private_key)
def check_public_network_connectivity(self, ip_address, username,
private_key, should_connect=True,
msg=None, servers=None, mtu=None):
# The target login is assumed to have been configured for
# key-based authentication by cloud-init.
LOG.debug("checking network connections to IP {} with user: {}"
.format(ip_address, username))
try:
self.check_vm_connectivity(ip_address,
username,
private_key,
should_connect=should_connect,
mtu=mtu)
except Exception:
ex_msg = 'Public network connectivity check failed'
if msg:
ex_msg += ": " + msg
LOG.exception(ex_msg)
self._log_console_output(servers)
raise
def create_floating_ip(self, thing, pool_name=None):
"""Create a floating IP and associates to a server on Nova"""
if not pool_name:
pool_name = CONF.network.floating_network_name
floating_ip = (self.compute_floating_ips_client.
create_floating_ip(pool=pool_name)['floating_ip'])
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.compute_floating_ips_client.delete_floating_ip,
floating_ip['id'])
self.compute_floating_ips_client.associate_floating_ip_to_server(
floating_ip['ip'], thing['id'])
return floating_ip
def create_timestamp(self, ip_address, dev_name=None, mount_path='/mnt',
private_key=None):
ssh_client = self.get_remote_client(ip_address,
private_key=private_key)
if dev_name is not None:
ssh_client.make_fs(dev_name)
ssh_client.mount(dev_name, mount_path)
cmd_timestamp = 'sudo sh -c "date > %s/timestamp; sync"' % mount_path
ssh_client.exec_command(cmd_timestamp)
timestamp = ssh_client.exec_command('sudo cat %s/timestamp'
% mount_path)
if dev_name is not None:
ssh_client.umount(mount_path)
return timestamp
def get_timestamp(self, ip_address, dev_name=None, mount_path='/mnt',
private_key=None):
ssh_client = self.get_remote_client(ip_address,
private_key=private_key)
if dev_name is not None:
ssh_client.mount(dev_name, mount_path)
timestamp = ssh_client.exec_command('sudo cat %s/timestamp'
% mount_path)
if dev_name is not None:
ssh_client.umount(mount_path)
return timestamp
def get_server_ip(self, server):
"""Get the server fixed or floating IP.
Based on the configuration we're in, return a correct ip
address for validating that a guest is up.
"""
if CONF.validation.connect_method == 'floating':
# The tests calling this method don't have a floating IP
# and can't make use of the validation resources. So the
# method is creating the floating IP there.
return self.create_floating_ip(server)['ip']
elif CONF.validation.connect_method == 'fixed':
# Determine the network name to look for based on config or creds
# provider network resources.
if CONF.validation.network_for_ssh:
addresses = server['addresses'][
CONF.validation.network_for_ssh]
else:
creds_provider = self._get_credentials_provider()
net_creds = creds_provider.get_primary_creds()
network = getattr(net_creds, 'network', None)
addresses = (server['addresses'][network['name']]
if network else [])
for address in addresses:
if (address['version'] == CONF.validation.ip_version_for_ssh
and address['OS-EXT-IPS:type'] == 'fixed'):
return address['addr']
raise exceptions.ServerUnreachable(server_id=server['id'])
else:
raise lib_exc.InvalidConfiguration()

View File

@ -1,127 +0,0 @@
# Copyright 2106 Canonical Ltd
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.common import utils
from tempest import config
from tempest import exceptions
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from nova_lxd_tempest_plugin.tests.scenario import manager
from oslo_serialization import jsonutils
CONF = config.CONF
class TestServerBasicOps(manager.ScenarioTest):
"""The test suite for server basic operations
This smoke test case follows this basic set of operations:
* Create a keypair for use in launching an instance
* Create a security group to control network access in instance
* Add simple permissive rules to the security group
* Launch an instance
* Perform ssh to instance
* Verify metadata service
* Verify metadata on config_drive
* Terminate the instance
"""
def setUp(self):
super(TestServerBasicOps, self).setUp()
self.image_ref = CONF.compute.image_ref
self.flavor_ref = CONF.compute.flavor_ref
self.run_ssh = CONF.validation.run_validation
self.ssh_user = CONF.validation.image_ssh_user
def verify_ssh(self, keypair):
if self.run_ssh:
# Obtain a floating IP
self.fip = self.create_floating_ip(self.instance)['ip']
# Check ssh
self.ssh_client = self.get_remote_client(
ip_address=self.fip,
username=self.ssh_user,
private_key=keypair['private_key'])
def verify_metadata(self):
if self.run_ssh and CONF.compute_feature_enabled.metadata_service:
# Verify metadata service
md_url = 'http://169.254.169.254/latest/meta-data/public-ipv4'
def exec_cmd_and_verify_output():
cmd = 'curl ' + md_url
result = self.ssh_client.exec_command(cmd)
if result:
msg = ('Failed while verifying metadata on server. Result '
'of command "%s" is NOT "%s".' % (cmd, self.fip))
self.assertEqual(self.fip, result, msg)
return 'Verification is successful!'
if not test_utils.call_until_true(exec_cmd_and_verify_output,
CONF.compute.build_timeout,
CONF.compute.build_interval):
raise exceptions.TimeoutException('Timed out while waiting to '
'verify metadata on server. '
'%s is empty.' % md_url)
def verify_metadata_on_config_drive(self):
if self.run_ssh and CONF.compute_feature_enabled.config_drive:
# Verify metadata on config_drive
cmd_md = \
'cat /var/lib/cloud/data/openstack/latest/meta_data.json'
result = self.ssh_client.exec_command(cmd_md)
result = jsonutils.loads(result)
self.assertIn('meta', result)
msg = ('Failed while verifying metadata on config_drive on server.'
' Result of command "%s" is NOT "%s".' % (cmd_md, self.md))
self.assertEqual(self.md, result['meta'], msg)
def verify_networkdata_on_config_drive(self):
if self.run_ssh and CONF.compute_feature_enabled.config_drive:
# Verify network data on config_drive
cmd_md = \
'cat /var/lib/cloud/data/openstack/latest/network_data.json'
result = self.ssh_client.exec_command(cmd_md)
result = jsonutils.loads(result)
self.assertIn('services', result)
self.assertIn('links', result)
self.assertIn('networks', result)
# TODO(clarkb) construct network_data from known network
# instance info and do direct comparison.
@decorators.idempotent_id('7fff3fb3-91d8-4fd0-bd7d-0204f1f180ba')
@decorators.attr(type='smoke')
@utils.services('compute', 'network')
def test_server_basic_ops(self):
keypair = self.create_keypair()
self.security_group = self._create_security_group()
security_groups = [{'name': self.security_group['name']}]
self.md = {'meta1': 'data1', 'meta2': 'data2', 'metaN': 'dataN'}
self.instance = self.create_server(
image_id=self.image_ref,
flavor=self.flavor_ref,
key_name=keypair['name'],
security_groups=security_groups,
config_drive=CONF.compute_feature_enabled.config_drive,
metadata=self.md,
wait_until='ACTIVE')
self.verify_ssh(keypair)
self.verify_metadata()
self.verify_metadata_on_config_drive()
self.verify_networkdata_on_config_drive()
self.servers_client.delete_server(self.instance['id'])

View File

@ -1,112 +0,0 @@
# Copyright 2013 NEC Corporation
# Copyright 2016 Canonical Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from tempest import config
from tempest import exceptions
from tempest.lib.common.utils import test_utils
from nova_lxd_tempest_plugin.tests.scenario import manager
CONF = config.CONF
LOG = logging.getLogger(__name__)
class LXDVolumeScenario(manager.ScenarioTest):
"""The test suite for attaching volume to an instance
The following is the scenario outline:
1. Boot an instance "instance"
2. Create a volume "volume1"
3. Attach volume1 to instance
4. Create a filesystem on volume1
5. Mount volume1
6. Create a file which timestamp is written in volume1
7. Check for file on instnace1
7. Unmount volume1
8. Detach volume1 from instance1
"""
def setUp(self):
super(LXDVolumeScenario, self).setUp()
self.image_ref = CONF.compute.image_ref
self.flavor_ref = CONF.compute.flavor_ref
self.run_ssh = CONF.validation.run_validation
self.ssh_user = CONF.validation.image_ssh_user
@classmethod
def skip_checks(cls):
super(LXDVolumeScenario, cls).skip_checks()
def _wait_for_volume_available_on_the_system(self, ip_address,
private_key):
ssh = self.get_remote_client(ip_address, private_key=private_key)
def _func():
part = ssh.get_partitions()
LOG.debug("Partitions: {}".format(part))
return CONF.compute.volume_device_name in part
if not test_utils.call_until_true(_func,
CONF.compute.build_timeout,
CONF.compute.build_interval):
raise exceptions.TimeoutException
def test_volume_attach(self):
keypair = self.create_keypair()
self.security_group = self._create_security_group()
security_groups = [{'name': self.security_group['name']}]
self.md = {'meta1': 'data1', 'meta2': 'data2', 'metaN': 'dataN'}
server = self.create_server(
image_id=self.image_ref,
flavor=self.flavor_ref,
key_name=keypair['name'],
security_groups=security_groups,
config_drive=CONF.compute_feature_enabled.config_drive,
metadata=self.md,
wait_until='ACTIVE')
volume = self.create_volume()
# create and add floating IP to server1
ip_for_server = self.get_server_ip(server)
self.nova_volume_attach(server, volume)
self._wait_for_volume_available_on_the_system(ip_for_server,
keypair['private_key'])
ssh_client = self.get_remote_client(
ip_address=ip_for_server,
username=self.ssh_user,
private_key=keypair['private_key'])
ssh_client.exec_command(
'sudo /sbin/mke2fs -t ext4 /dev/%s'
% CONF.compute.volume_device_name)
ssh_client.exec_command(
'sudo /bin/mount -t ext4 /dev/%s /mnt'
% CONF.compute.volume_device_name)
ssh_client.exec_command(
'sudo sh -c "date > /mnt/timestamp; sync"')
timestamp = ssh_client.exec_command(
'test -f /mnt/timestamp && echo ok')
ssh_client.exec_command(
'sudo /bin/umount /mnt')
self.nova_volume_detach(server, volume)
self.assertEqual(u'ok\n', timestamp)

View File

@ -1,6 +0,0 @@
[DEFAULT]
# The list of modules to copy from oslo-incubator.git
# The base module to hold the copy of openstack.common
base=nova-lxd

View File

@ -1,18 +0,0 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
pbr!=2.1.0,>=3.1.1 # Apache-2.0
os-brick>=2.3.0 # Apache-2.0
os-vif!=1.8.0,>=1.9.0 # Apache-2.0
oslo.config>=5.2.0 # Apache-2.0
oslo.concurrency>=3.26.0 # Apache-2.0
oslo.utils>=3.36.0 # Apache-2.0
oslo.i18n>=3.20.0 # Apache-2.0
oslo.log>=3.37.0 # Apache-2.0
pylxd>=2.2.6 # Apache-2.0
# XXX: rockstar (17 Feb 2016) - oslo_config imports
# debtcollector, which imports this, but doesn't
# require it in dependencies.
#wrapt>=1.7.0 # BSD License

View File

@ -1,252 +0,0 @@
#!/bin/bash
set -eu
function usage {
echo "Usage: $0 [OPTION]..."
echo "Run Nova's test suite(s)"
echo ""
echo " -V, --virtual-env Always use virtualenv. Install automatically if not present"
echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment"
echo " -s, --no-site-packages Isolate the virtualenv from the global Python environment"
echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added."
echo " -u, --update Update the virtual environment with any newer package versions"
echo " -p, --pep8 Just run PEP8 and HACKING compliance check"
echo " -8, --pep8-only-changed Just run PEP8 and HACKING compliance check on files changed since HEAD~1"
echo " -P, --no-pep8 Don't run static code checks"
echo " -c, --coverage Generate coverage report"
echo " -d, --debug Run tests with testtools instead of testr. This allows you to use the debugger."
echo " -h, --help Print this usage message"
echo " --hide-elapsed Don't print the elapsed time for each test along with slow test list"
echo " --virtual-env-path <path> Location of the virtualenv directory"
echo " Default: \$(pwd)"
echo " --virtual-env-name <name> Name of the virtualenv directory"
echo " Default: .venv"
echo " --tools-path <dir> Location of the tools directory"
echo " Default: \$(pwd)"
echo " --concurrency <concurrency> How many processes to use when running the tests. A value of 0 autodetects concurrency from your CPU count"
echo " Default: 0"
echo ""
echo "Note: with no options specified, the script will try to run the tests in a virtual environment,"
echo " If no virtualenv is found, the script will ask if you would like to create one. If you "
echo " prefer to run tests NOT in a virtual environment, simply pass the -N option."
exit
}
function process_options {
i=1
while [ $i -le $# ]; do
case "${!i}" in
-h|--help) usage;;
-V|--virtual-env) always_venv=1; never_venv=0;;
-N|--no-virtual-env) always_venv=0; never_venv=1;;
-s|--no-site-packages) no_site_packages=1;;
-f|--force) force=1;;
-u|--update) update=1;;
-p|--pep8) just_pep8=1;;
-8|--pep8-only-changed) just_pep8_changed=1;;
-P|--no-pep8) no_pep8=1;;
-c|--coverage) coverage=1;;
-d|--debug) debug=1;;
--virtual-env-path)
(( i++ ))
venv_path=${!i}
;;
--virtual-env-name)
(( i++ ))
venv_dir=${!i}
;;
--tools-path)
(( i++ ))
tools_path=${!i}
;;
--concurrency)
(( i++ ))
concurrency=${!i}
;;
-*) testropts="$testropts ${!i}";;
*) testrargs="$testrargs ${!i}"
esac
(( i++ ))
done
}
tool_path=${tools_path:-$(pwd)}
venv_path=${venv_path:-$(pwd)}
venv_dir=${venv_name:-.venv}
with_venv=tools/with_venv.sh
always_venv=0
never_venv=0
force=0
no_site_packages=0
installvenvopts=
testrargs=
testropts=
wrapper=""
just_pep8=0
just_pep8_changed=0
no_pep8=0
coverage=0
debug=0
update=0
concurrency=0
LANG=en_US.UTF-8
LANGUAGE=en_US:en
LC_ALL=C
process_options $@
# Make our paths available to other scripts we call
export venv_path
export venv_dir
export venv_name
export tools_dir
export venv=${venv_path}/${venv_dir}
if [ $no_site_packages -eq 1 ]; then
installvenvopts="--no-site-packages"
fi
function run_tests {
# Cleanup *pyc
${wrapper} find . -type f -name "*.pyc" -delete
if [ $debug -eq 1 ]; then
if [ "$testropts" = "" ] && [ "$testrargs" = "" ]; then
# Default to running all tests if specific test is not
# provided.
testrargs="discover ./nova_lxd/tests"
fi
${wrapper} python -m testtools.run $testropts $testrargs
# Short circuit because all of the testr and coverage stuff
# below does not make sense when running testtools.run for
# debugging purposes.
return $?
fi
if [ $coverage -eq 1 ]; then
TESTRTESTS="$TESTRTESTS --coverage"
else
TESTRTESTS="$TESTRTESTS"
fi
# Just run the test suites in current environment
set +e
testrargs=`echo "$testrargs" | sed -e's/^\s*\(.*\)\s*$/\1/'`
TESTRTESTS="$TESTRTESTS --testr-args='--subunit --concurrency $concurrency $testropts $testrargs'"
if [ setup.cfg -nt nova.egg-info/entry_points.txt ]
then
${wrapper} python setup.py egg_info
fi
echo "Running \`${wrapper} $TESTRTESTS\`"
if ${wrapper} which subunit-2to1 2>&1 > /dev/null
then
# subunit-2to1 is present, testr subunit stream should be in version 2
# format. Convert to version one before colorizing.
bash -c "${wrapper} $TESTRTESTS | ${wrapper} subunit-2to1 | ${wrapper} tools/colorizer.py"
else
bash -c "${wrapper} $TESTRTESTS | ${wrapper} tools/colorizer.py"
fi
RESULT=$?
set -e
copy_subunit_log
if [ $coverage -eq 1 ]; then
echo "Generating coverage report in covhtml/"
# Don't compute coverage for common code, which is tested elsewhere
${wrapper} coverage combine
${wrapper} coverage html --include='nova/*' --omit='nova/openstack/common/*' -d covhtml -i
fi
return $RESULT
}
function copy_subunit_log {
LOGNAME=`cat .testrepository/next-stream`
LOGNAME=$(($LOGNAME - 1))
LOGNAME=".testrepository/${LOGNAME}"
cp $LOGNAME subunit.log
}
function warn_on_flake8_without_venv {
if [ $never_venv -eq 1 ]; then
echo "**WARNING**:"
echo "Running flake8 without virtual env may miss OpenStack HACKING detection"
fi
}
function run_pep8 {
echo "Running flake8 ..."
warn_on_flake8_without_venv
bash -c "${wrapper} flake8"
}
TESTRTESTS="python setup.py testr"
if [ $never_venv -eq 0 ]
then
# Remove the virtual environment if --force used
if [ $force -eq 1 ]; then
echo "Cleaning virtualenv..."
rm -rf ${venv}
fi
if [ $update -eq 1 ]; then
echo "Updating virtualenv..."
python tools/install_venv.py $installvenvopts
fi
if [ -e ${venv} ]; then
wrapper="${with_venv}"
else
if [ $always_venv -eq 1 ]; then
# Automatically install the virtualenv
python tools/install_venv.py $installvenvopts
wrapper="${with_venv}"
else
echo -e "No virtual environment found...create one? (Y/n) \c"
read use_ve
if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then
# Install the virtualenv and run the test suite in it
python tools/install_venv.py $installvenvopts
wrapper=${with_venv}
fi
fi
fi
fi
# Delete old coverage data from previous runs
if [ $coverage -eq 1 ]; then
${wrapper} coverage erase
fi
if [ $just_pep8 -eq 1 ]; then
run_pep8
exit
fi
if [ $just_pep8_changed -eq 1 ]; then
# NOTE(gilliard) We want use flake8 to check the entirety of every file that has
# a change in it. Unfortunately the --filenames argument to flake8 only accepts
# file *names* and there are no files named (eg) "nova/compute/manager.py". The
# --diff argument behaves surprisingly as well, because although you feed it a
# diff, it actually checks the file on disk anyway.
files=$(git diff --name-only HEAD~1 | tr '\n' ' ')
echo "Running flake8 on ${files}"
warn_on_flake8_without_venv
bash -c "diff -u --from-file /dev/null ${files} | ${wrapper} flake8 --diff"
exit
fi
run_tests
# NOTE(sirp): we only want to run pep8 when we're running the full-test suite,
# not when we're running tests individually. To handle this, we need to
# distinguish between options (testropts), which begin with a '-', and
# arguments (testrargs).
if [ -z "$testrargs" ]; then
if [ $no_pep8 -eq 0 ]; then
run_pep8
fi
fi

View File

@ -1,35 +0,0 @@
[metadata]
name = nova-lxd
summary = native lxd driver for openstack
description-file =
README.md
author = OpenStack
author-email = openstack-discuss@lists.openstack.org
home-page = https://www.openstack.org/
classifier =
Environment :: OpenStack
Intended Audience :: Information Technology
Intended Audience :: System Administrators
License :: OSI Approved :: Apache Software License
Operating System :: POSIX :: Linux
Programming Language :: Python
Programming Language :: Python :: 2
Programming Language :: Python :: 2.7
[files]
packages =
nova/virt/lxd
nova/tests
nova_lxd_tempest_plugin
[entry_points]
tempest.test_plugins =
nova-lxd-tempest-plugin = nova_lxd_tempest_plugin.plugin:MyPlugin
[build_sphinx]
source-dir = doc/source
build-dir = doc/build
all_files = 1
[upload_sphinx]
upload-dir = doc/build/html

View File

@ -1,36 +0,0 @@
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
import os
import setuptools
# In python < 2.7.4, a lazy loading of package `pbr` will break
# setuptools if some other modules registered functions in `atexit`.
# solution from: http://bugs.python.org/issue15881#msg170215
try:
import multiprocessing # noqa
except ImportError:
pass
this_directory = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(this_directory, 'README.md'), 'rb') as f:
long_description = f.read().decode('utf-8')
setuptools.setup(
long_description=long_description,
long_description_content_type='text/markdown',
setup_requires=['pbr>=2.0.0'],
pbr=True)

View File

@ -1,19 +0,0 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
hacking!=0.13.0,<0.14,>=0.12.0 # Apache-2.0
coverage!=4.4,>=4.5.1 # Apache-2.0
ddt>=1.1.2 # MIT
python-subunit>=1.2.0 # Apache-2.0/BSD
sphinx!=1.6.6,!=1.6.7,>=1.6.5 # BSD
sphinx-feature-classification>=0.1.0 # Apache 2.0
oslosphinx>=4.18.0 # Apache-2.0
oslotest>=3.3.0 # Apache-2.0
testrepository>=0.0.20 # Apache-2.0/BSD
testscenarios>=0.5.0 # Apache-2.0/BSD
testtools>=2.3.0 # MIT
stestr>=1.0.0 # Apache-2.0
nosexcover>=1.0.11 # BSD
wsgi-intercept>=1.6.0 # MIT License

View File

@ -1,81 +0,0 @@
#!/bin/bash
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
#
#
# before you run this modify your .ssh/config to create a
# review.openstack.org entry:
#
# Host review.openstack.org
# User <yourgerritusername>
# Port 29418
#
# Note: due to gerrit bug somewhere, this double posts messages. :(
# first purge the all reviews that are more than 4w old and blocked by a core -2
set -o errexit
function abandon_review {
local gitid=$1
shift
local msg=$@
echo "Abandoning $gitid"
ssh review.openstack.org gerrit review $gitid --abandon --message \"$msg\"
}
blocked_reviews=$(ssh review.openstack.org "gerrit query --current-patch-set --format json project:openstack/nova status:open age:4w label:Code-Review<=-2" | jq .currentPatchSet.revision | grep -v null | sed 's/"//g')
blocked_msg=$(cat <<EOF
This review is > 4 weeks without comment and currently blocked by a
core reviewer with a -2. We are abandoning this for now.
Feel free to reactivate the review by pressing the restore button and
contacting the reviewer with the -2 on this review to ensure you
address their concerns.
EOF
)
# For testing, put in a git rev of something you own and uncomment
# blocked_reviews="b6c4218ae4d75b86c33fa3d37c27bc23b46b6f0f"
for review in $blocked_reviews; do
# echo ssh review.openstack.org gerrit review $review --abandon --message \"$msg\"
echo "Blocked review $review"
abandon_review $review $blocked_msg
done
# then purge all the reviews that are > 4w with no changes and Jenkins has -1ed
failing_reviews=$(ssh review.openstack.org "gerrit query --current-patch-set --format json project:openstack/nova status:open age:4w NOT label:Verified>=1,jenkins" | jq .currentPatchSet.revision | grep -v null | sed 's/"//g')
failing_msg=$(cat <<EOF
This review is > 4 weeks without comment, and failed Jenkins the last
time it was checked. We are abandoning this for now.
Feel free to reactivate the review by pressing the restore button and
leaving a 'recheck' comment to get fresh test results.
EOF
)
for review in $failing_reviews; do
echo "Failing review $review"
abandon_review $review $failing_msg
done

View File

@ -1,24 +0,0 @@
#!/usr/bin/env bash
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
export LC_ALL=C
sudo ifconfig -a | grep br | grep -v bridge | cut -f1 -d" " | xargs -n1 -ifoo ifconfig foo down
sudo ifconfig -a | grep br | grep -v bridge | cut -f1 -d" " | xargs -n1 -ifoo brctl delbr foo
sudo ifconfig -a | grep vlan | cut -f1 -d" " | xargs -n1 -ifoo ifconfig foo down
sudo ifconfig -a | grep vlan | cut -f1 -d" " | xargs -n1 -ifoo ip link del foo

View File

@ -1,326 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2013, Nebula, Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Colorizer Code is borrowed from Twisted:
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Display a subunit stream through a colorized unittest test runner."""
import heapq
import sys
import unittest
import subunit
import testtools
class _AnsiColorizer(object):
"""A colorizer is an object that loosely wraps around a stream, allowing
callers to write text to the stream in a particular color.
Colorizer classes must implement C{supported()} and C{write(text, color)}.
"""
_colors = dict(black=30, red=31, green=32, yellow=33,
blue=34, magenta=35, cyan=36, white=37)
def __init__(self, stream):
self.stream = stream
def supported(cls, stream=sys.stdout):
"""A class method that returns True if the current platform supports
coloring terminal output using this method. Returns False otherwise.
"""
if not stream.isatty():
return False # auto color only on TTYs
try:
import curses
except ImportError:
return False
else:
try:
try:
return curses.tigetnum("colors") > 2
except curses.error:
curses.setupterm()
return curses.tigetnum("colors") > 2
except Exception:
# guess false in case of error
return False
supported = classmethod(supported)
def write(self, text, color):
"""Write the given text to the stream in the given color.
@param text: Text to be written to the stream.
@param color: A string label for a color. e.g. 'red', 'white'.
"""
color = self._colors[color]
self.stream.write('\x1b[%s;1m%s\x1b[0m' % (color, text))
class _Win32Colorizer(object):
"""See _AnsiColorizer docstring."""
def __init__(self, stream):
import win32console
red, green, blue, bold = (win32console.FOREGROUND_RED,
win32console.FOREGROUND_GREEN,
win32console.FOREGROUND_BLUE,
win32console.FOREGROUND_INTENSITY)
self.stream = stream
self.screenBuffer = win32console.GetStdHandle(
win32console.STD_OUT_HANDLE)
self._colors = {
'normal': red | green | blue,
'red': red | bold,
'green': green | bold,
'blue': blue | bold,
'yellow': red | green | bold,
'magenta': red | blue | bold,
'cyan': green | blue | bold,
'white': red | green | blue | bold
}
def supported(cls, stream=sys.stdout):
try:
import win32console
screenBuffer = win32console.GetStdHandle(
win32console.STD_OUT_HANDLE)
except ImportError:
return False
import pywintypes
try:
screenBuffer.SetConsoleTextAttribute(
win32console.FOREGROUND_RED |
win32console.FOREGROUND_GREEN |
win32console.FOREGROUND_BLUE)
except pywintypes.error:
return False
else:
return True
supported = classmethod(supported)
def write(self, text, color):
color = self._colors[color]
self.screenBuffer.SetConsoleTextAttribute(color)
self.stream.write(text)
self.screenBuffer.SetConsoleTextAttribute(self._colors['normal'])
class _NullColorizer(object):
"""See _AnsiColorizer docstring."""
def __init__(self, stream):
self.stream = stream
def supported(cls, stream=sys.stdout):
return True
supported = classmethod(supported)
def write(self, text, color):
self.stream.write(text)
def get_elapsed_time_color(elapsed_time):
if elapsed_time > 1.0:
return 'red'
elif elapsed_time > 0.25:
return 'yellow'
else:
return 'green'
class NovaTestResult(testtools.TestResult):
def __init__(self, stream, descriptions, verbosity):
super(NovaTestResult, self).__init__()
self.stream = stream
self.showAll = verbosity > 1
self.num_slow_tests = 10
self.slow_tests = [] # this is a fixed-sized heap
self.colorizer = None
# NOTE(vish): reset stdout for the terminal check
stdout = sys.stdout
sys.stdout = sys.__stdout__
for colorizer in [_Win32Colorizer, _AnsiColorizer, _NullColorizer]:
if colorizer.supported():
self.colorizer = colorizer(self.stream)
break
sys.stdout = stdout
self.start_time = None
self.last_time = {}
self.results = {}
self.last_written = None
def _writeElapsedTime(self, elapsed):
color = get_elapsed_time_color(elapsed)
self.colorizer.write(" %.2f" % elapsed, color)
def _addResult(self, test, *args):
try:
name = test.id()
except AttributeError:
name = 'Unknown.unknown'
test_class, test_name = name.rsplit('.', 1)
elapsed = (self._now() - self.start_time).total_seconds()
item = (elapsed, test_class, test_name)
if len(self.slow_tests) >= self.num_slow_tests:
heapq.heappushpop(self.slow_tests, item)
else:
heapq.heappush(self.slow_tests, item)
self.results.setdefault(test_class, [])
self.results[test_class].append((test_name, elapsed) + args)
self.last_time[test_class] = self._now()
self.writeTests()
def _writeResult(self, test_name, elapsed, long_result, color,
short_result, success):
if self.showAll:
self.stream.write(' %s' % str(test_name).ljust(66))
self.colorizer.write(long_result, color)
if success:
self._writeElapsedTime(elapsed)
self.stream.writeln()
else:
self.colorizer.write(short_result, color)
def addSuccess(self, test):
super(NovaTestResult, self).addSuccess(test)
self._addResult(test, 'OK', 'green', '.', True)
def addFailure(self, test, err):
if test.id() == 'process-returncode':
return
super(NovaTestResult, self).addFailure(test, err)
self._addResult(test, 'FAIL', 'red', 'F', False)
def addError(self, test, err):
super(NovaTestResult, self).addFailure(test, err)
self._addResult(test, 'ERROR', 'red', 'E', False)
def addSkip(self, test, reason=None, details=None):
super(NovaTestResult, self).addSkip(test, reason, details)
self._addResult(test, 'SKIP', 'blue', 'S', True)
def startTest(self, test):
self.start_time = self._now()
super(NovaTestResult, self).startTest(test)
def writeTestCase(self, cls):
if not self.results.get(cls):
return
if cls != self.last_written:
self.colorizer.write(cls, 'white')
self.stream.writeln()
for result in self.results[cls]:
self._writeResult(*result)
del self.results[cls]
self.stream.flush()
self.last_written = cls
def writeTests(self):
time = self.last_time.get(self.last_written, self._now())
if not self.last_written or (self._now() - time).total_seconds() > 2.0:
diff = 3.0
while diff > 2.0:
classes = self.results.keys()
oldest = min(classes, key=lambda x: self.last_time[x])
diff = (self._now() - self.last_time[oldest]).total_seconds()
self.writeTestCase(oldest)
else:
self.writeTestCase(self.last_written)
def done(self):
self.stopTestRun()
def stopTestRun(self):
for cls in list(self.results.iterkeys()):
self.writeTestCase(cls)
self.stream.writeln()
self.writeSlowTests()
def writeSlowTests(self):
# Pare out 'fast' tests
slow_tests = [item for item in self.slow_tests
if get_elapsed_time_color(item[0]) != 'green']
if slow_tests:
slow_total_time = sum(item[0] for item in slow_tests)
slow = ("Slowest %i tests took %.2f secs:"
% (len(slow_tests), slow_total_time))
self.colorizer.write(slow, 'yellow')
self.stream.writeln()
last_cls = None
# sort by name
for elapsed, cls, name in sorted(slow_tests,
key=lambda x: x[1] + x[2]):
if cls != last_cls:
self.colorizer.write(cls, 'white')
self.stream.writeln()
last_cls = cls
self.stream.write(' %s' % str(name).ljust(68))
self._writeElapsedTime(elapsed)
self.stream.writeln()
def printErrors(self):
if self.showAll:
self.stream.writeln()
self.printErrorList('ERROR', self.errors)
self.printErrorList('FAIL', self.failures)
def printErrorList(self, flavor, errors):
for test, err in errors:
self.colorizer.write("=" * 70, 'red')
self.stream.writeln()
self.colorizer.write(flavor, 'red')
self.stream.writeln(": %s" % test.id())
self.colorizer.write("-" * 70, 'red')
self.stream.writeln()
self.stream.writeln("%s" % err)
test = subunit.ProtocolTestCase(sys.stdin, passthrough=None)
if sys.version_info[0:2] <= (2, 6):
runner = unittest.TextTestRunner(verbosity=2)
else:
runner = unittest.TextTestRunner(verbosity=2, resultclass=NovaTestResult)
if runner.run(test).wasSuccessful():
exit_code = 0
else:
exit_code = 1
sys.exit(exit_code)

View File

@ -1,20 +0,0 @@
This generate_sample.sh tool is used to generate etc/nova/nova.conf.sample
Run it from the top-level working directory i.e.
$> ./tools/config/generate_sample.sh -b ./ -p nova -o etc/nova
Watch out for warnings about modules like libvirt, qpid and zmq not
being found - these warnings are significant because they result
in options not appearing in the generated config file.
The analyze_opts.py tool is used to find options which appear in
/etc/nova/nova.conf but not in etc/nova/nova.conf.sample
This helps identify options in the nova.conf file which are not used by nova.
The tool also identifies any options which are set to the default value.
Run it from the top-level working directory i.e.
$> ./tools/config/analyze_opts.py

View File

@ -1,83 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2012, Cloudscaling
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
find_unused_options.py
Compare the nova.conf file with the nova.conf.sample file to find any unused
options or default values in nova.conf
'''
from __future__ import print_function
import argparse
import os
import sys
from oslo.config import iniparser
sys.path.append(os.getcwd())
class PropertyCollecter(iniparser.BaseParser):
def __init__(self):
super(PropertyCollecter, self).__init__()
self.key_value_pairs = {}
def assignment(self, key, value):
self.key_value_pairs[key] = value
def new_section(self, section):
pass
@classmethod
def collect_properties(cls, lineiter, sample_format=False):
def clean_sample(f):
for line in f:
if line.startswith("#") and not line.startswith("# "):
line = line[1:]
yield line
pc = cls()
if sample_format:
lineiter = clean_sample(lineiter)
pc.parse(lineiter)
return pc.key_value_pairs
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='''Compare the nova.conf
file with the nova.conf.sample file to find any unused options or
default values in nova.conf''')
parser.add_argument('-c', action='store',
default='/etc/nova/nova.conf',
help='path to nova.conf'
' (defaults to /etc/nova/nova.conf)')
parser.add_argument('-s', default='./etc/nova/nova.conf.sample',
help='path to nova.conf.sample'
' (defaults to ./etc/nova/nova.conf.sample')
options = parser.parse_args()
conf_file_options = PropertyCollecter.collect_properties(open(options.c))
sample_conf_file_options = PropertyCollecter.collect_properties(
open(options.s), sample_format=True)
for k, v in sorted(conf_file_options.items()):
if k not in sample_conf_file_options:
print("Unused:", k)
for k, v in sorted(conf_file_options.items()):
if k in sample_conf_file_options and v == sample_conf_file_options[k]:
print("Default valued:", k)

View File

@ -1,25 +0,0 @@
#!/usr/bin/env bash
PROJECT_NAME=${PROJECT_NAME:-nova}
CFGFILE_NAME=${PROJECT_NAME}.conf.sample
if [ -e etc/${PROJECT_NAME}/${CFGFILE_NAME} ]; then
CFGFILE=etc/${PROJECT_NAME}/${CFGFILE_NAME}
elif [ -e etc/${CFGFILE_NAME} ]; then
CFGFILE=etc/${CFGFILE_NAME}
else
echo "${0##*/}: can not find config file"
exit 1
fi
TEMPDIR=`mktemp -d /tmp/${PROJECT_NAME}.XXXXXX`
trap "rm -rf $TEMPDIR" EXIT
tools/config/generate_sample.sh -b ./ -p ${PROJECT_NAME} -o ${TEMPDIR}
if ! diff -u ${TEMPDIR}/${CFGFILE_NAME} ${CFGFILE}
then
echo "${0##*/}: ${PROJECT_NAME}.conf.sample is not up to date."
echo "${0##*/}: Please run ${0%%${0##*/}}generate_sample.sh."
exit 1
fi

View File

@ -1,119 +0,0 @@
#!/usr/bin/env bash
print_hint() {
echo "Try \`${0##*/} --help' for more information." >&2
}
PARSED_OPTIONS=$(getopt -n "${0##*/}" -o hb:p:m:l:o: \
--long help,base-dir:,package-name:,output-dir:,module:,library: -- "$@")
if [ $? != 0 ] ; then print_hint ; exit 1 ; fi
eval set -- "$PARSED_OPTIONS"
while true; do
case "$1" in
-h|--help)
echo "${0##*/} [options]"
echo ""
echo "options:"
echo "-h, --help show brief help"
echo "-b, --base-dir=DIR project base directory"
echo "-p, --package-name=NAME project package name"
echo "-o, --output-dir=DIR file output directory"
echo "-m, --module=MOD extra python module to interrogate for options"
echo "-l, --library=LIB extra library that registers options for discovery"
exit 0
;;
-b|--base-dir)
shift
BASEDIR=`echo $1 | sed -e 's/\/*$//g'`
shift
;;
-p|--package-name)
shift
PACKAGENAME=`echo $1`
shift
;;
-o|--output-dir)
shift
OUTPUTDIR=`echo $1 | sed -e 's/\/*$//g'`
shift
;;
-m|--module)
shift
MODULES="$MODULES -m $1"
shift
;;
-l|--library)
shift
LIBRARIES="$LIBRARIES -l $1"
shift
;;
--)
break
;;
esac
done
BASEDIR=${BASEDIR:-`pwd`}
if ! [ -d $BASEDIR ]
then
echo "${0##*/}: missing project base directory" >&2 ; print_hint ; exit 1
elif [[ $BASEDIR != /* ]]
then
BASEDIR=$(cd "$BASEDIR" && pwd)
fi
PACKAGENAME=${PACKAGENAME:-${BASEDIR##*/}}
TARGETDIR=$BASEDIR/$PACKAGENAME
if ! [ -d $TARGETDIR ]
then
echo "${0##*/}: invalid project package name" >&2 ; print_hint ; exit 1
fi
OUTPUTDIR=${OUTPUTDIR:-$BASEDIR/etc}
# NOTE(bnemec): Some projects put their sample config in etc/,
# some in etc/$PACKAGENAME/
if [ -d $OUTPUTDIR/$PACKAGENAME ]
then
OUTPUTDIR=$OUTPUTDIR/$PACKAGENAME
elif ! [ -d $OUTPUTDIR ]
then
echo "${0##*/}: cannot access \`$OUTPUTDIR': No such file or directory" >&2
exit 1
fi
BASEDIRESC=`echo $BASEDIR | sed -e 's/\//\\\\\//g'`
find $TARGETDIR -type f -name "*.pyc" -delete
FILES=$(find $TARGETDIR -type f -name "*.py" ! -path "*/tests/*" \
-exec grep -l "Opt(" {} + | sed -e "s/^$BASEDIRESC\///g" | sort -u)
RC_FILE="`dirname $0`/oslo.config.generator.rc"
if test -r "$RC_FILE"
then
source "$RC_FILE"
fi
for mod in ${NOVA_CONFIG_GENERATOR_EXTRA_MODULES}; do
MODULES="$MODULES -m $mod"
done
for lib in ${NOVA_CONFIG_GENERATOR_EXTRA_LIBRARIES}; do
LIBRARIES="$LIBRARIES -l $lib"
done
export EVENTLET_NO_GREENDNS=yes
OS_VARS=$(set | sed -n '/^OS_/s/=[^=]*$//gp' | xargs)
[ "$OS_VARS" ] && eval "unset \$OS_VARS"
DEFAULT_MODULEPATH=nova.openstack.common.config.generator
MODULEPATH=${MODULEPATH:-$DEFAULT_MODULEPATH}
OUTPUTFILE=$OUTPUTDIR/$PACKAGENAME.conf.sample
python -m $MODULEPATH $MODULES $LIBRARIES $FILES > $OUTPUTFILE
# Hook to allow projects to append custom config file snippets
CONCAT_FILES=$(ls $BASEDIR/tools/config/*.conf.sample 2>/dev/null)
for CONCAT_FILE in $CONCAT_FILES; do
cat $CONCAT_FILE >> $OUTPUTFILE
done

View File

@ -1,2 +0,0 @@
NOVA_CONFIG_GENERATOR_EXTRA_LIBRARIES="oslo.messaging oslo.db oslo.concurrency"
NOVA_CONFIG_GENERATOR_EXTRA_MODULES=keystonemiddleware.auth_token

View File

@ -1,286 +0,0 @@
#!/usr/bin/env python
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility for diff'ing two versions of the DB schema.
Each release cycle the plan is to compact all of the migrations from that
release into a single file. This is a manual and, unfortunately, error-prone
process. To ensure that the schema doesn't change, this tool can be used to
diff the compacted DB schema to the original, uncompacted form.
The database is specified by providing a SQLAlchemy connection URL WITHOUT the
database-name portion (that will be filled in automatically with a temporary
database name).
The schema versions are specified by providing a git ref (a branch name or
commit hash) and a SQLAlchemy-Migrate version number:
Run like:
MYSQL:
./tools/db/schema_diff.py mysql://root@localhost \
master:latest my_branch:82
POSTGRESQL:
./tools/db/schema_diff.py postgresql://localhost \
master:latest my_branch:82
"""
from __future__ import print_function
import datetime
import glob
from nova import i18n
import os
import subprocess
import sys
_ = i18n._
# Dump
def dump_db(db_driver, db_name, db_url, migration_version, dump_filename):
if not db_url.endswith('/'):
db_url += '/'
db_url += db_name
db_driver.create(db_name)
try:
_migrate(db_url, migration_version)
db_driver.dump(db_name, dump_filename)
finally:
db_driver.drop(db_name)
# Diff
def diff_files(filename1, filename2):
pipeline = ["diff -U 3 {filename1} {filename2}"
.format(filename1=filename1, filename2=filename2)]
# Use colordiff if available
if subprocess.call(['which', 'colordiff']) == 0:
pipeline.append('colordiff')
pipeline.append('less -R')
cmd = ' | '.join(pipeline)
subprocess.check_call(cmd, shell=True)
# Database
class Mysql(object):
def create(self, name):
subprocess.check_call(['mysqladmin', '-u', 'root', 'create', name])
def drop(self, name):
subprocess.check_call(['mysqladmin', '-f', '-u', 'root', 'drop', name])
def dump(self, name, dump_filename):
subprocess.check_call(
"mysqldump -u root {name} > {dump_filename}"
.format(name=name, dump_filename=dump_filename),
shell=True)
class Postgresql(object):
def create(self, name):
subprocess.check_call(['createdb', name])
def drop(self, name):
subprocess.check_call(['dropdb', name])
def dump(self, name, dump_filename):
subprocess.check_call(
"pg_dump {name} > {dump_filename}"
.format(name=name, dump_filename=dump_filename),
shell=True)
def _get_db_driver_class(db_url):
try:
return globals()[db_url.split('://')[0].capitalize()]
except KeyError:
raise Exception(_("database {] not supported").format(db_url))
# Migrate
MIGRATE_REPO = os.path.join(os.getcwd(), "nova/db/sqlalchemy/migrate_repo")
def _migrate(db_url, migration_version):
earliest_version = _migrate_get_earliest_version()
# NOTE(sirp): sqlalchemy-migrate currently cannot handle the skipping of
# migration numbers.
_migrate_cmd(
db_url, 'version_control', str(earliest_version - 1))
upgrade_cmd = ['upgrade']
if migration_version != 'latest':
upgrade_cmd.append(str(migration_version))
_migrate_cmd(db_url, *upgrade_cmd)
def _migrate_cmd(db_url, *cmd):
manage_py = os.path.join(MIGRATE_REPO, 'manage.py')
args = ['python', manage_py]
args += cmd
args += ['--repository=%s' % MIGRATE_REPO,
'--url=%s' % db_url]
subprocess.check_call(args)
def _migrate_get_earliest_version():
versions_glob = os.path.join(MIGRATE_REPO, 'versions', '???_*.py')
versions = []
for path in glob.iglob(versions_glob):
filename = os.path.basename(path)
prefix = filename.split('_', 1)[0]
try:
version = int(prefix)
except ValueError:
pass
versions.append(version)
versions.sort()
return versions[0]
# Git
def git_current_branch_name():
ref_name = git_symbolic_ref('HEAD', quiet=True)
current_branch_name = ref_name.replace('refs/heads/', '')
return current_branch_name
def git_symbolic_ref(ref, quiet=False):
args = ['git', 'symbolic-ref', ref]
if quiet:
args.append('-q')
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
stdout, stderr = proc.communicate()
return stdout.strip()
def git_checkout(branch_name):
subprocess.check_call(['git', 'checkout', branch_name])
def git_has_uncommited_changes():
return subprocess.call(['git', 'diff', '--quiet', '--exit-code']) == 1
# Command
def die(msg):
print("ERROR: %s" % msg, file=sys.stderr)
sys.exit(1)
def usage(msg=None):
if msg:
print("ERROR: %s" % msg, file=sys.stderr)
prog = "schema_diff.py"
args = ["<db-url>", "<orig-branch:orig-version>",
"<new-branch:new-version>"]
print("usage: %s %s" % (prog, ' '.join(args)), file=sys.stderr)
sys.exit(1)
def parse_options():
try:
db_url = sys.argv[1]
except IndexError:
usage("must specify DB connection url")
try:
orig_branch, orig_version = sys.argv[2].split(':')
except IndexError:
usage('original branch and version required (e.g. master:82)')
try:
new_branch, new_version = sys.argv[3].split(':')
except IndexError:
usage('new branch and version required (e.g. master:82)')
return db_url, orig_branch, orig_version, new_branch, new_version
def main():
timestamp = datetime.datetime.utcnow().strftime("%Y%m%d_%H%M%S")
ORIG_DB = 'orig_db_%s' % timestamp
NEW_DB = 'new_db_%s' % timestamp
ORIG_DUMP = ORIG_DB + ".dump"
NEW_DUMP = NEW_DB + ".dump"
options = parse_options()
db_url, orig_branch, orig_version, new_branch, new_version = options
# Since we're going to be switching branches, ensure user doesn't have any
# uncommited changes
if git_has_uncommited_changes():
die("You have uncommited changes. Please commit them before running "
"this command.")
db_driver = _get_db_driver_class(db_url)()
users_branch = git_current_branch_name()
git_checkout(orig_branch)
try:
# Dump Original Schema
dump_db(db_driver, ORIG_DB, db_url, orig_version, ORIG_DUMP)
# Dump New Schema
git_checkout(new_branch)
dump_db(db_driver, NEW_DB, db_url, new_version, NEW_DUMP)
diff_files(ORIG_DUMP, NEW_DUMP)
finally:
git_checkout(users_branch)
if os.path.exists(ORIG_DUMP):
os.unlink(ORIG_DUMP)
if os.path.exists(NEW_DUMP):
os.unlink(NEW_DUMP)
if __name__ == "__main__":
main()

View File

@ -1,42 +0,0 @@
#!/bin/sh
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
PRE_COMMIT_SCRIPT=.git/hooks/pre-commit
make_hook() {
echo "exec ./run_tests.sh -N -p" >> $PRE_COMMIT_SCRIPT
chmod +x $PRE_COMMIT_SCRIPT
if [ -w $PRE_COMMIT_SCRIPT -a -x $PRE_COMMIT_SCRIPT ]; then
echo "pre-commit hook was created successfully"
else
echo "unable to create pre-commit hook"
fi
}
# NOTE(jk0): Make sure we are in nova's root directory before adding the hook.
if [ ! -d ".git" ]; then
echo "unable to find .git; moving up a directory"
cd ..
if [ -d ".git" ]; then
make_hook
else
echo "still unable to find .git; hook not created"
fi
else
make_hook
fi

View File

@ -1,73 +0,0 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2010 OpenStack Foundation
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import os
import sys
import install_venv_common as install_venv
def print_help(venv, root):
help = """
Nova development environment setup is complete.
Nova development uses virtualenv to track and manage Python dependencies
while in development and testing.
To activate the Nova virtualenv for the extent of your current shell
session you can run:
$ source %s/bin/activate
Or, if you prefer, you can run commands in the virtualenv on a case by case
basis by running:
$ %s/tools/with_venv.sh <your command>
Also, make test will automatically use the virtualenv.
"""
print(help % (venv, root))
def main(argv):
root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
if os.environ.get('tools_path'):
root = os.environ['tools_path']
venv = os.path.join(root, '.venv')
if os.environ.get('venv'):
venv = os.environ['venv']
pip_requires = os.path.join(root, 'requirements.txt')
test_requires = os.path.join(root, 'test-requirements.txt')
py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1])
project = 'Nova'
install = install_venv.InstallVenv(root, venv, pip_requires, test_requires,
py_version, project)
options = install.parse_args(argv)
install.check_python_version()
install.check_dependencies()
install.create_virtualenv(no_site_packages=options.no_site_packages)
install.install_dependencies()
print_help(venv, root)
if __name__ == '__main__':
main(sys.argv)

View File

@ -1,172 +0,0 @@
# Copyright 2013 OpenStack Foundation
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Provides methods needed by installation script for OpenStack development
virtual environments.
Since this script is used to bootstrap a virtualenv from the system's Python
environment, it should be kept strictly compatible with Python 2.6.
Synced in from openstack-common
"""
from __future__ import print_function
import optparse
import os
import subprocess
import sys
class InstallVenv(object):
def __init__(self, root, venv, requirements,
test_requirements, py_version,
project):
self.root = root
self.venv = venv
self.requirements = requirements
self.test_requirements = test_requirements
self.py_version = py_version
self.project = project
def die(self, message, *args):
print(message % args, file=sys.stderr)
sys.exit(1)
def check_python_version(self):
if sys.version_info < (2, 6):
self.die("Need Python Version >= 2.6")
def run_command_with_code(self, cmd, redirect_output=True,
check_exit_code=True):
"""Runs a command in an out-of-process shell.
Returns the output of that command. Working directory is self.root.
"""
if redirect_output:
stdout = subprocess.PIPE
else:
stdout = None
proc = subprocess.Popen(cmd, cwd=self.root, stdout=stdout)
output = proc.communicate()[0]
if check_exit_code and proc.returncode != 0:
self.die('Command "%s" failed.\n%s', ' '.join(cmd), output)
return (output, proc.returncode)
def run_command(self, cmd, redirect_output=True, check_exit_code=True):
return self.run_command_with_code(cmd, redirect_output,
check_exit_code)[0]
def get_distro(self):
if (os.path.exists('/etc/fedora-release') or
os.path.exists('/etc/redhat-release')):
return Fedora(
self.root, self.venv, self.requirements,
self.test_requirements, self.py_version, self.project)
else:
return Distro(
self.root, self.venv, self.requirements,
self.test_requirements, self.py_version, self.project)
def check_dependencies(self):
self.get_distro().install_virtualenv()
def create_virtualenv(self, no_site_packages=True):
"""Creates the virtual environment and installs PIP.
Creates the virtual environment and installs PIP only into the
virtual environment.
"""
if not os.path.isdir(self.venv):
print('Creating venv...', end=' ')
if no_site_packages:
self.run_command(['virtualenv', '-q', '--no-site-packages',
self.venv])
else:
self.run_command(['virtualenv', '-q', self.venv])
print('done.')
else:
print("venv already exists...")
pass
def pip_install(self, *args):
self.run_command(['tools/with_venv.sh',
'pip', 'install', '--upgrade'] + list(args),
redirect_output=False)
def install_dependencies(self):
print('Installing dependencies with pip (this can take a while)...')
# First things first, make sure our venv has the latest pip and
# setuptools and pbr
self.pip_install('pip>=1.4')
self.pip_install('setuptools')
self.pip_install('pbr')
self.pip_install('-r', self.requirements, '-r', self.test_requirements)
def parse_args(self, argv):
"""Parses command-line arguments."""
parser = optparse.OptionParser()
parser.add_option('-n', '--no-site-packages',
action='store_true',
help="Do not inherit packages from global Python "
"install.")
return parser.parse_args(argv[1:])[0]
class Distro(InstallVenv):
def check_cmd(self, cmd):
return bool(self.run_command(['which', cmd],
check_exit_code=False).strip())
def install_virtualenv(self):
if self.check_cmd('virtualenv'):
return
if self.check_cmd('easy_install'):
print('Installing virtualenv via easy_install...', end=' ')
if self.run_command(['easy_install', 'virtualenv']):
print('Succeeded')
return
else:
print('Failed')
self.die('ERROR: virtualenv not found.\n\n%s development'
' requires virtualenv, please install it using your'
' favorite package management tool' % self.project)
class Fedora(Distro):
"""This covers all Fedora-based distributions.
Includes: Fedora, RHEL, CentOS, Scientific Linux
"""
def check_pkg(self, pkg):
return self.run_command_with_code(['rpm', '-q', pkg],
check_exit_code=False)[1] == 0
def install_virtualenv(self):
if self.check_cmd('virtualenv'):
return
if not self.check_pkg('python-virtualenv'):
self.die("Please install 'python-virtualenv'.")
super(Fedora, self).install_virtualenv()

View File

@ -1,37 +0,0 @@
# bash completion for openstack nova-manage
_nova_manage_opts="" # lazy init
_nova_manage_opts_exp="" # lazy init
# dict hack for bash 3
_set_nova_manage_subopts () {
eval _nova_manage_subopts_"$1"='$2'
}
_get_nova_manage_subopts () {
eval echo '${_nova_manage_subopts_'"$1"'#_nova_manage_subopts_}'
}
_nova_manage()
{
local cur prev subopts
COMPREPLY=()
cur="${COMP_WORDS[COMP_CWORD]}"
prev="${COMP_WORDS[COMP_CWORD-1]}"
if [ "x$_nova_manage_opts" == "x" ] ; then
_nova_manage_opts="`nova-manage bash-completion 2>/dev/null`"
_nova_manage_opts_exp="`echo $_nova_manage_opts | sed -e "s/\s/|/g"`"
fi
if [[ " `echo $_nova_manage_opts` " =~ " $prev " ]] ; then
if [ "x$(_get_nova_manage_subopts "$prev")" == "x" ] ; then
subopts="`nova-manage bash-completion $prev 2>/dev/null`"
_set_nova_manage_subopts "$prev" "$subopts"
fi
COMPREPLY=($(compgen -W "$(_get_nova_manage_subopts "$prev")" -- ${cur}))
elif [[ ! " ${COMP_WORDS[@]} " =~ " "($_nova_manage_opts_exp)" " ]] ; then
COMPREPLY=($(compgen -W "${_nova_manage_opts}" -- ${cur}))
fi
return 0
}
complete -F _nova_manage nova-manage

View File

@ -1,6 +0,0 @@
#!/usr/bin/env bash
set -o pipefail
TESTRARGS=$1
python setup.py testr --slowest --testr-args="--subunit $TESTRARGS" | subunit-trace -f

View File

@ -1,109 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tool for checking if patch contains a regression test.
By default runs against current patch but can be set to use any gerrit review
as specified by change number (uses 'git review -d').
Idea: take tests from patch to check, and run against code from previous patch.
If new tests pass, then no regression test, if new tests fails against old code
then either
* new tests depend on new code and cannot confirm regression test is valid
(false positive)
* new tests detects the bug being fixed (detect valid regression test)
Due to the risk of false positives, the results from this need some human
interpretation.
"""
from __future__ import print_function
import optparse
import string
import subprocess
import sys
def run(cmd, fail_ok=False):
print("running: %s" % cmd)
obj = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True)
obj.wait()
if obj.returncode != 0 and not fail_ok:
print("The above command terminated with an error.")
sys.exit(obj.returncode)
return obj.stdout.read()
def main():
usage = """
Tool for checking if a patch includes a regression test.
Usage: %prog [options]"""
parser = optparse.OptionParser(usage)
parser.add_option("-r", "--review", dest="review",
help="gerrit review number to test")
(options, args) = parser.parse_args()
if options.review:
original_branch = run("git rev-parse --abbrev-ref HEAD")
run("git review -d %s" % options.review)
else:
print("no gerrit review number specified, running on latest commit"
"on current branch.")
test_works = False
# run new tests with old code
run("git checkout HEAD^ nova")
run("git checkout HEAD nova/tests")
# identify which tests have changed
tests = run("git whatchanged --format=oneline -1 | grep \"nova/tests\" "
"| cut -f2").split()
test_list = []
for test in tests:
test_list.append(string.replace(test[0:-3], '/', '.'))
if test_list == []:
test_works = False
expect_failure = ""
else:
# run new tests, expect them to fail
expect_failure = run(("tox -epy27 %s 2>&1" % string.join(test_list)),
fail_ok=True)
if "FAILED (id=" in expect_failure:
test_works = True
# cleanup
run("git checkout HEAD nova")
if options.review:
new_branch = run("git status | head -1 | cut -d ' ' -f 4")
run("git checkout %s" % original_branch)
run("git branch -D %s" % new_branch)
print(expect_failure)
print("")
print("*******************************")
if test_works:
print("FOUND a regression test")
else:
print("NO regression test")
sys.exit(1)
if __name__ == "__main__":
main()

View File

@ -1,7 +0,0 @@
#!/bin/bash
tools_path=${tools_path:-$(dirname $0)}
venv_path=${venv_path:-${tools_path}}
venv_dir=${venv_name:-/../.venv}
TOOLS=${tools_path}
VENV=${venv:-${venv_path}/${venv_dir}}
source ${VENV}/bin/activate && "$@"

80
tox.ini
View File

@ -1,80 +0,0 @@
[tox]
minversion = 2.0
envlist = py{3,27},pep8
skipsdist = True
[testenv]
usedevelop = True
install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} {opts} {packages}
setenv =
VIRTUAL_ENV={envdir}
EVENTLET_NO_GREENDNS=yes
PYTHONDONTWRITEBYTECODE=1
LANGUAGE=en_US
LC_ALL=en_US.utf-8
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
-egit+https://github.com/openstack/nova.git#egg=nova
whitelist_externals =
bash
find
rm
env
commands =
find . -type f -name "*.pyc" -delete
rm -Rf .testrepository/times.dbm
passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY OS_DEBUG GENERATE_HASHES
[testenv:py27]
commands =
{[testenv]commands}
/bin/cp -r {toxinidir}/nova/virt/lxd/ {toxinidir}/.tox/py27/src/nova/nova/virt/
stestr run {posargs}
[testenv:py3]
basepython = python3
commands =
{[testenv]commands}
/bin/cp -r {toxinidir}/nova/virt/lxd/ {toxinidir}/.tox/py3/src/nova/nova/virt/
stestr run {posargs}
[testenv:pep8]
basepython = python3
deps = {[testenv]deps}
commands =
flake8 {toxinidir}/nova
flake8 {toxinidir}/nova_lxd_tempest_plugin
[testenv:venv]
basepython = python3
commands = {posargs}
[testenv:cover]
basepython = python3
# Also do not run test_coverage_ext tests while gathering coverage as those
# tests conflict with coverage.
commands =
coverage erase
find . -type f -name "*.pyc" -delete
python setup.py testr --coverage --testr-args='{posargs}'
coverage report
[testenv:docs]
basepython = python3
commands = python setup.py build_sphinx
[flake8]
# H803 skipped on purpose per list discussion.
# E123, E125 skipped as they are invalid PEP-8.
show-source = True
ignore = E123,E125,H803,H904,H405,H404,H305,H306,H307
builtins = _
exclude=.venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build,tools/colorizer.py
[testenv:lower-constraints]
basepython = python3
deps =
-c{toxinidir}/lower-constraints.txt
-r{toxinidir}/test-requirements.txt
-r{toxinidir}/requirements.txt