initial commit

This commit is contained in:
Jesse Andrews 2010-05-27 23:05:26 -07:00
commit 0f4873cfca
1855 changed files with 427896 additions and 0 deletions

11
CA/.gitignore vendored Normal file
View File

@ -0,0 +1,11 @@
index.txt
index.txt.old
index.txt.attr
index.txt.attr.old
cacert.pem
serial
serial.old
openssl.cnf
private/*
newcerts/*

1
CA/INTER/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
*

30
CA/geninter.sh Executable file
View File

@ -0,0 +1,30 @@
#!/bin/bash
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ARG is the id of the user
mkdir INTER/$1
cd INTER/$1
cp ../../openssl.cnf.tmpl openssl.cnf
sed -i -e s/%USERNAME%/$1/g openssl.cnf
mkdir certs crl newcerts private
echo "10" > serial
touch index.txt
openssl genrsa -out private/cakey.pem 1024 -config ./openssl.cnf -batch -nodes
openssl req -new -sha1 -key private/cakey.pem -out ../../reqs/inter$1.csr -batch -subj "/C=US/ST=California/L=Mountain View/O=Anso Labs/OU=Nova Dev/CN=customer-intCA-$1"
cd ../../
openssl ca -extensions v3_ca -days 365 -out INTER/$1/cacert.pem -in reqs/inter$1.csr -config openssl.cnf -batch

26
CA/genrootca.sh Executable file
View File

@ -0,0 +1,26 @@
#!/bin/bash
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if [ -f "cacert.pem" ];
then
echo "Not installing, it's already done."
else
cp openssl.cnf.tmpl openssl.cnf
sed -i -e s/%USERNAME%/ROOT/g openssl.cnf
openssl req -new -x509 -extensions v3_ca -keyout private/cakey.pem -out cacert.pem -days 365 -config ./openssl.cnf -batch -nodes
touch index.txt
echo "10" > serial
fi

0
CA/newcerts/.placeholder Normal file
View File

87
CA/openssl.cnf.tmpl Normal file
View File

@ -0,0 +1,87 @@
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# OpenSSL configuration file.
#
# Establish working directory.
dir = .
[ ca ]
default_ca = CA_default
unique_subject = no
[ CA_default ]
serial = $dir/serial
database = $dir/index.txt
new_certs_dir = $dir/newcerts
certificate = $dir/cacert.pem
private_key = $dir/private/cakey.pem
default_days = 365
default_md = md5
preserve = no
email_in_dn = no
nameopt = default_ca
certopt = default_ca
policy = policy_match
[ policy_match ]
countryName = match
stateOrProvinceName = match
organizationName = optional
organizationalUnitName = optional
commonName = supplied
emailAddress = optional
[ req ]
default_bits = 1024 # Size of keys
default_keyfile = key.pem # name of generated keys
default_md = md5 # message digest algorithm
string_mask = nombstr # permitted characters
distinguished_name = req_distinguished_name
[ req_distinguished_name ]
# Variable name Prompt string
#---------------------- ----------------------------------
0.organizationName = Organization Name (company)
organizationalUnitName = Organizational Unit Name (department, division)
emailAddress = Email Address
emailAddress_max = 40
localityName = Locality Name (city, district)
stateOrProvinceName = State or Province Name (full name)
countryName = Country Name (2 letter code)
countryName_min = 2
countryName_max = 2
commonName = Common Name (hostname, IP, or your name)
commonName_max = 64
# Default values for the above, for consistency and less typing.
# Variable name Value
#------------------------------ ------------------------------
0.organizationName_default = NOVA %USERNAME%
localityName_default = Mountain View
stateOrProvinceName_default = California
countryName_default = US
[ v3_ca ]
basicConstraints = CA:TRUE
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid:always,issuer:always
[ v3_req ]
basicConstraints = CA:FALSE
subjectKeyIdentifier = hash

0
CA/private/.placeholder Normal file
View File

1
CA/reqs/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
*

53
HACKING Normal file
View File

@ -0,0 +1,53 @@
Nova Style Commandments
=======================
Step 1: Read http://www.python.org/dev/peps/pep-0008/
Step 2: Read http://www.python.org/dev/peps/pep-0008/ again
Step 3: Read on
Imports
-------
- thou shalt not import objects, only modules
- thou shalt not import more than one module per line
- thou shalt not make relative imports
- thou shalt "from nova import vendor" before importing third party code
- thou shalt organize your imports according to the following template
::
# vim: tabstop=4 shiftwidth=4 softtabstop=4
{{stdlib imports in human alphabetical order}}
\n
from nova import vendor
{{vendor imports in human alphabetical order}}
\n
{{nova imports in human alphabetical order}}
\n
\n
{{begin your code}}
General
-------
- thou shalt put two newlines twixt toplevel code (funcs, classes, etc)
- thou shalt put one newline twixt methods in classes and anywhere else
- thou shalt not write "except:", use "except Exception:" at the very least
- thou shalt include your name with TODOs as in "TODO(termie)"
- thou shalt not name anything the same name as a builtin or reserved word
- thou shalt not violate causality in our time cone, or else
Human Alphabetical Order Examples
---------------------------------
::
import httplib
import logging
import random
import StringIO
import time
import unittest
from nova import flags
from nova import test
from nova.auth import users
from nova.endpoint import api
from nova.endpoint import cloud

176
LICENSE Normal file
View File

@ -0,0 +1,176 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.

63
bin/nova-api Executable file
View File

@ -0,0 +1,63 @@
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tornado daemon for the main API endpoint.
"""
import logging
from nova import vendor
from tornado import httpserver
from tornado import ioloop
from nova import flags
from nova import rpc
from nova import server
from nova import utils
from nova.auth import users
from nova.endpoint import admin
from nova.endpoint import api
from nova.endpoint import cloud
FLAGS = flags.FLAGS
def main(_argv):
user_manager = users.UserManager()
controllers = {
'Cloud': cloud.CloudController(),
'Admin': admin.AdminController(user_manager)
}
_app = api.APIServerApplication(user_manager, controllers)
conn = rpc.Connection.instance()
consumer = rpc.AdapterConsumer(connection=conn,
topic=FLAGS.cloud_topic,
proxy=controllers['Cloud'])
io_inst = ioloop.IOLoop.instance()
_injected = consumer.attach_to_tornado(io_inst)
http_server = httpserver.HTTPServer(_app)
http_server.listen(FLAGS.cc_port)
logging.debug('Started HTTP server on %s', FLAGS.cc_port)
io_inst.start()
if __name__ == '__main__':
utils.default_flagfile()
server.serve('nova-api', main)

97
bin/nova-compute Executable file
View File

@ -0,0 +1,97 @@
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Twistd daemon for the nova compute nodes.
Receives messages via AMQP, manages pool of worker threads
for async tasks.
"""
import logging
import os
import sys
# NOTE(termie): kludge so that we can run this from the bin directory in the
# checkout without having to screw with paths
NOVA_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'nova')
if os.path.exists(NOVA_PATH):
sys.path.insert(0, os.path.dirname(NOVA_PATH))
from nova import vendor
from carrot import connection
from carrot import messaging
from twisted.internet import task
from twisted.application import service
from nova import flags
from nova import rpc
from nova import twistd
from nova.compute import node
FLAGS = flags.FLAGS
# NOTE(termie): This file will necessarily be re-imported under different
# context when the twistd.serve() call is made below so any
# flags we define here will have to be conditionally defined,
# flags defined by imported modules are safe.
if 'node_report_state_interval' not in FLAGS:
flags.DEFINE_integer('node_report_state_interval', 10,
'seconds between nodes reporting state to cloud',
lower_bound=1)
logging.getLogger().setLevel(logging.DEBUG)
def main():
logging.warn('Starting compute node')
n = node.NetworkNode()
d = n.adopt_instances()
d.addCallback(lambda x: logging.info('Adopted %d instances', x))
conn = rpc.Connection.instance()
consumer_all = rpc.AdapterConsumer(
connection=conn,
topic='%s' % FLAGS.compute_topic,
proxy=n)
consumer_node = rpc.AdapterConsumer(
connection=conn,
topic='%s.%s' % (FLAGS.compute_topic, FLAGS.node_name),
proxy=n)
# heartbeat = task.LoopingCall(n.report_state)
# heartbeat.start(interval=FLAGS.node_report_state_interval, now=False)
injected = consumer_all.attach_to_twisted()
injected = consumer_node.attach_to_twisted()
# This is the parent service that twistd will be looking for when it
# parses this file, return it so that we can get it into globals below
application = service.Application('nova-compute')
n.setServiceParent(application)
return application
# NOTE(termie): When this script is executed from the commandline what it will
# actually do is tell the twistd application runner that it
# should run this file as a twistd application (see below).
if __name__ == '__main__':
twistd.serve(__file__)
# NOTE(termie): When this script is loaded by the twistd application runner
# this code path will be executed and twistd will expect a
# variable named 'application' to be available, it will then
# handle starting it and stopping it.
if __name__ == '__builtin__':
application = main()

158
bin/nova-manage Executable file
View File

@ -0,0 +1,158 @@
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
CLI interface for nova management.
Connects to the running ADMIN api in the api daemon.
"""
import sys
from nova import flags
from nova import utils
from nova.auth import users
from nova.compute import model
from nova.endpoint import cloud
import time
FLAGS = flags.FLAGS
class UserCommands(object):
def __init__(self):
self.manager = users.UserManager.instance()
def __print_export(self, user):
print 'export EC2_ACCESS_KEY=%s' % user.access
print 'export EC2_SECRET_KEY=%s' % user.secret
def admin(self, name, access=None, secret=None):
"""creates a new admin and prints exports
arguments: name [access] [secret]"""
user = self.manager.create_user(name, access, secret, True)
self.__print_export(user)
def create(self, name, access=None, secret=None):
"""creates a new user and prints exports
arguments: name [access] [secret]"""
user = self.manager.create_user(name, access, secret, False)
self.__print_export(user)
def delete(self, name):
"""deletes an existing user
arguments: name"""
self.manager.delete_user(name)
def exports(self, name):
"""prints access and secrets for user in export format
arguments: name"""
user = self.manager.get_user(name)
if user:
self.__print_export(user)
else:
print "User %s doesn't exist" % name
def list(self):
"""lists all users
arguments: <none>"""
for user in self.manager.get_users():
print user.name
def zip(self, name, filename='nova.zip'):
"""exports credentials for user to a zip file
arguments: name [filename='nova.zip]"""
user = self.manager.get_user(name)
if user:
with open(filename, 'w') as f:
f.write(user.get_credentials())
else:
print "User %s doesn't exist" % name
def usage(script_name):
print script_name + " category action [<args>]"
categories = [
('user', UserCommands),
]
def lazy_match(name, key_value_tuples):
"""finds all objects that have a key that case insensitively contains [name]
key_value_tuples is a list of tuples of the form (key, value)
returns a list of tuples of the form (key, value)"""
return [(k, v) for (k, v) in key_value_tuples if k.lower().find(name.lower()) == 0]
def methods_of(obj):
"""get all callable methods of an object that don't start with underscore
returns a list of tuples of the form (method_name, method)"""
return [(i, getattr(obj, i)) for i in dir(obj) if callable(getattr(obj, i)) and not i.startswith('_')]
if __name__ == '__main__':
utils.default_flagfile()
argv = FLAGS(sys.argv)
script_name = argv.pop(0)
if len(argv) < 1:
usage(script_name)
print "Available categories:"
for k, v in categories:
print "\t%s" % k
sys.exit(2)
category = argv.pop(0)
matches = lazy_match(category, categories)
if len(matches) == 0:
print "%s does not match any categories:" % category
for k, v in categories:
print "\t%s" % k
sys.exit(2)
if len(matches) > 1:
print "%s matched multiple categories:" % category
for k, v in matches:
print "\t%s" % k
sys.exit(2)
# instantiate the command group object
category, fn = matches[0]
command_object = fn()
actions = methods_of(command_object)
if len(argv) < 1:
usage(script_name)
print "Available actions for %s category:" % category
for k, v in actions:
print "\t%s" % k
sys.exit(2)
action = argv.pop(0)
matches = lazy_match(action, actions)
if len(matches) == 0:
print "%s does not match any actions" % action
for k, v in actions:
print "\t%s" % k
sys.exit(2)
if len(matches) > 1:
print "%s matched multiple actions:" % action
for k, v in matches:
print "\t%s" % k
sys.exit(2)
action, fn = matches[0]
# call the action with the remaining arguments
try:
fn(*argv)
except TypeError:
print "Wrong number of arguments supplied"
print "%s %s: %s" % (category, action, fn.__doc__)

49
bin/nova-objectstore Executable file
View File

@ -0,0 +1,49 @@
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tornado daemon for nova objectstore. Supports S3 API.
"""
import logging
from nova import vendor
from tornado import httpserver
from tornado import ioloop
from nova import flags
from nova import server
from nova import utils
from nova.auth import users
from nova.objectstore import handler
FLAGS = flags.FLAGS
def main(argv):
# FIXME: if this log statement isn't here, no logging
# appears from other files and app won't start daemonized
logging.debug('Started HTTP server on %s' % (FLAGS.s3_internal_port))
app = handler.Application(users.UserManager())
server = httpserver.HTTPServer(app)
server.listen(FLAGS.s3_internal_port)
ioloop.IOLoop.instance().start()
if __name__ == '__main__':
utils.default_flagfile()
server.serve('nova-objectstore', main)

68
bin/nova-volume Executable file
View File

@ -0,0 +1,68 @@
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tornado Storage daemon manages AoE volumes via AMQP messaging.
"""
import logging
from nova import vendor
from tornado import ioloop
from nova import flags
from nova import rpc
from nova import server
from nova import utils
from nova.volume import storage
FLAGS = flags.FLAGS
flags.DEFINE_integer('storage_report_state_interval', 10,
'seconds between broadcasting state to cloud',
lower_bound=1)
def main(argv):
bs = storage.BlockStore()
conn = rpc.Connection.instance()
consumer_all = rpc.AdapterConsumer(
connection=conn,
topic='%s' % FLAGS.storage_topic,
proxy=bs)
consumer_node = rpc.AdapterConsumer(
connection=conn,
topic='%s.%s' % (FLAGS.storage_topic, FLAGS.node_name),
proxy=bs)
io_inst = ioloop.IOLoop.instance()
scheduler = ioloop.PeriodicCallback(
lambda: bs.report_state(),
FLAGS.storage_report_state_interval * 1000,
io_loop=io_inst)
injected = consumer_all.attachToTornado(io_inst)
injected = consumer_node.attachToTornado(io_inst)
scheduler.start()
io_inst.start()
if __name__ == '__main__':
utils.default_flagfile()
server.serve('nova-volume', main)

6
debian/changelog vendored Normal file
View File

@ -0,0 +1,6 @@
nova (0.3.0-1) UNRELEASED; urgency=low
* initial release
-- Jesse Andrews <jesse@ansolabs.com> Thur, 27 May 2010 12:28:00 -0700

1
debian/compat vendored Normal file
View File

@ -0,0 +1 @@
7

40
debian/control vendored Normal file
View File

@ -0,0 +1,40 @@
Source: nova
Section: net
Priority: extra
Maintainer: Jesse Andrews <jesse@ansolabs.com>
Build-Depends: debhelper (>= 7)
Build-Depends-Indep: python-support
Standards-Version: 3.8.4
XS-Python-Version: 2.6
Package: nova-common
Architecture: all
Depends: ${python:Depends}, aoetools, vlan, python-ipy, python-boto, python-m2crypto, python-pycurl, python-twisted, python-daemon, python-redis, python-carrot, python-lockfile, python-gflags, python-tornado, ${misc:Depends}
Provides: ${python:Provides}
Conflicts: nova
Description: Nova is a cloud
Package: nova-compute
Architecture: all
Depends: nova-common (= ${binary:Version}), kpartx, kvm, python-libvirt, libvirt-bin (>= 0.8.1), ${python:Depends}, ${misc:Depends}
Description: Nova compute
Package: nova-volume
Architecture: all
Depends: nova-common (= ${binary:Version}), vblade, vblade-persist, ${python:Depends}, ${misc:Depends}
Description: Nova volume
Package: nova-api
Architecture: all
Depends: nova-common (= ${binary:Version}), ${python:Depends}, ${misc:Depends}
Description: Nova api
Package: nova-objectstore
Architecture: all
Depends: nova-common (= ${binary:Version}), ${python:Depends}, ${misc:Depends}
Description: Nova object store
Package: nova-tools
Architecture: all
Depends: python-boto, ${python:Depends}, ${misc:Depends}
Description: CLI tools to access nova

69
debian/nova-api.init vendored Normal file
View File

@ -0,0 +1,69 @@
#! /bin/sh
### BEGIN INIT INFO
# Provides: nova-api
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: nova-api
# Description: nova-api
### END INIT INFO
set -e
DAEMON=/usr/bin/nova-api
DAEMON_ARGS="--flagfile=/etc/nova.conf"
PIDFILE=/var/run/nova-api.pid
ENABLED=false
if test -f /etc/default/nova-api; then
. /etc/default/nova-api
fi
. /lib/lsb/init-functions
export PATH="${PATH:+$PATH:}/usr/sbin:/sbin"
case "$1" in
start)
test "$ENABLED" = "true" || exit 0
log_daemon_msg "Starting nova api" "nova-api"
cd /var/run
if $DAEMON $DAEMON_ARGS start; then
log_end_msg 0
else
log_end_msg 1
fi
;;
stop)
test "$ENABLED" = "true" || exit 0
log_daemon_msg "Stopping nova api" "nova-api"
cd /var/run
if $DAEMON $DAEMON_ARGS stop; then
log_end_msg 0
else
log_end_msg 1
fi
;;
restart|force-reload)
test "$ENABLED" = "true" || exit 1
cd /var/run
if $DAEMON $DAEMON_ARGS restart; then
log_end_msg 0
else
log_end_msg 1
fi
;;
status)
test "$ENABLED" = "true" || exit 0
status_of_proc -p $PIDFILE $DAEMON nova-api && exit 0 || exit $?
;;
*)
log_action_msg "Usage: /etc/init.d/nova-api {start|stop|restart|force-reload|status}"
exit 1
;;
esac
exit 0

1
debian/nova-api.install vendored Normal file
View File

@ -0,0 +1 @@
bin/nova-api usr/bin

4
debian/nova-common.install vendored Normal file
View File

@ -0,0 +1,4 @@
bin/nova-manage usr/bin
nova/auth/novarc.template usr/lib/pymodules/python2.6/nova/auth
nova/compute/libvirt.xml.template usr/lib/pymodules/python2.6/nova/compute
usr/lib/python*/*-packages/nova/*

69
debian/nova-compute.init vendored Normal file
View File

@ -0,0 +1,69 @@
#! /bin/sh
### BEGIN INIT INFO
# Provides: nova-compute
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: nova-compute
# Description: nova-compute
### END INIT INFO
set -e
DAEMON=/usr/bin/nova-compute
DAEMON_ARGS="--flagfile=/etc/nova.conf"
PIDFILE=/var/run/nova-compute.pid
ENABLED=false
if test -f /etc/default/nova-compute; then
. /etc/default/nova-compute
fi
. /lib/lsb/init-functions
export PATH="${PATH:+$PATH:}/usr/sbin:/sbin"
case "$1" in
start)
test "$ENABLED" = "true" || exit 0
log_daemon_msg "Starting nova compute" "nova-compute"
cd /var/run
if $DAEMON $DAEMON_ARGS start; then
log_end_msg 0
else
log_end_msg 1
fi
;;
stop)
test "$ENABLED" = "true" || exit 0
log_daemon_msg "Stopping nova compute" "nova-compute"
cd /var/run
if $DAEMON $DAEMON_ARGS stop; then
log_end_msg 0
else
log_end_msg 1
fi
;;
restart|force-reload)
test "$ENABLED" = "true" || exit 1
cd /var/run
if $DAEMON $DAEMON_ARGS restart; then
log_end_msg 0
else
log_end_msg 1
fi
;;
status)
test "$ENABLED" = "true" || exit 0
status_of_proc -p $PIDFILE $DAEMON nova-compute && exit 0 || exit $?
;;
*)
log_action_msg "Usage: /etc/init.d/nova-compute {start|stop|restart|force-reload|status}"
exit 1
;;
esac
exit 0

1
debian/nova-compute.install vendored Normal file
View File

@ -0,0 +1 @@
bin/nova-compute usr/bin

69
debian/nova-objectstore.init vendored Normal file
View File

@ -0,0 +1,69 @@
#! /bin/sh
### BEGIN INIT INFO
# Provides: nova-objectstore
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: nova-objectstore
# Description: nova-objectstore
### END INIT INFO
set -e
DAEMON=/usr/bin/nova-objectstore
DAEMON_ARGS="--flagfile=/etc/nova.conf"
PIDFILE=/var/run/nova-objectstore.pid
ENABLED=false
if test -f /etc/default/nova-objectstore; then
. /etc/default/nova-objectstore
fi
. /lib/lsb/init-functions
export PATH="${PATH:+$PATH:}/usr/sbin:/sbin"
case "$1" in
start)
test "$ENABLED" = "true" || exit 0
log_daemon_msg "Starting nova objectstore" "nova-objectstore"
cd /var/run
if $DAEMON $DAEMON_ARGS start; then
log_end_msg 0
else
log_end_msg 1
fi
;;
stop)
test "$ENABLED" = "true" || exit 0
log_daemon_msg "Stopping nova objectstore" "nova-objectstore"
cd /var/run
if $DAEMON $DAEMON_ARGS stop; then
log_end_msg 0
else
log_end_msg 1
fi
;;
restart|force-reload)
test "$ENABLED" = "true" || exit 1
cd /var/run
if $DAEMON $DAEMON_ARGS restart; then
log_end_msg 0
else
log_end_msg 1
fi
;;
status)
test "$ENABLED" = "true" || exit 0
status_of_proc -p $PIDFILE $DAEMON nova-objectstore && exit 0 || exit $?
;;
*)
log_action_msg "Usage: /etc/init.d/nova-objectstore {start|stop|restart|force-reload|status}"
exit 1
;;
esac
exit 0

1
debian/nova-objectstore.install vendored Normal file
View File

@ -0,0 +1 @@
bin/nova-objectstore usr/bin

69
debian/nova-volume.init vendored Normal file
View File

@ -0,0 +1,69 @@
#! /bin/sh
### BEGIN INIT INFO
# Provides: nova-volume
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: nova-volume
# Description: nova-volume
### END INIT INFO
set -e
DAEMON=/usr/bin/nova-volume
DAEMON_ARGS="--flagfile=/etc/nova.conf"
PIDFILE=/var/run/nova-volume.pid
ENABLED=false
if test -f /etc/default/nova-volume; then
. /etc/default/nova-volume
fi
. /lib/lsb/init-functions
export PATH="${PATH:+$PATH:}/usr/sbin:/sbin"
case "$1" in
start)
test "$ENABLED" = "true" || exit 0
log_daemon_msg "Starting nova volume" "nova-volume"
cd /var/run
if $DAEMON $DAEMON_ARGS start; then
log_end_msg 0
else
log_end_msg 1
fi
;;
stop)
test "$ENABLED" = "true" || exit 0
log_daemon_msg "Stopping nova volume" "nova-volume"
cd /var/run
if $DAEMON $DAEMON_ARGS stop; then
log_end_msg 0
else
log_end_msg 1
fi
;;
restart|force-reload)
test "$ENABLED" = "true" || exit 1
cd /var/run
if $DAEMON $DAEMON_ARGS restart; then
log_end_msg 0
else
log_end_msg 1
fi
;;
status)
test "$ENABLED" = "true" || exit 0
status_of_proc -p $PIDFILE $DAEMON nova-volume && exit 0 || exit $?
;;
*)
log_action_msg "Usage: /etc/init.d/nova-volume {start|stop|restart|force-reload|status}"
exit 1
;;
esac
exit 0

1
debian/nova-volume.install vendored Normal file
View File

@ -0,0 +1 @@
bin/nova-volume usr/bin

1
debian/pycompat vendored Normal file
View File

@ -0,0 +1 @@
2

1
debian/pyversions vendored Normal file
View File

@ -0,0 +1 @@
2.6-

4
debian/rules vendored Executable file
View File

@ -0,0 +1,4 @@
#!/usr/bin/make -f
%:
dh $@

1
docs/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
_build/*

89
docs/Makefile Normal file
View File

@ -0,0 +1,89 @@
# Makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = _build
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest
help:
@echo "Please use \`make <target>' where <target> is one of"
@echo " html to make standalone HTML files"
@echo " dirhtml to make HTML files named index.html in directories"
@echo " pickle to make pickle files"
@echo " json to make JSON files"
@echo " htmlhelp to make HTML files and a HTML help project"
@echo " qthelp to make HTML files and a qthelp project"
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
@echo " changes to make an overview of all changed/added/deprecated items"
@echo " linkcheck to check all external links for integrity"
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
clean:
-rm -rf $(BUILDDIR)/*
html:
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
dirhtml:
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
pickle:
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
@echo
@echo "Build finished; now you can process the pickle files."
json:
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
@echo
@echo "Build finished; now you can process the JSON files."
htmlhelp:
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
@echo
@echo "Build finished; now you can run HTML Help Workshop with the" \
".hhp project file in $(BUILDDIR)/htmlhelp."
qthelp:
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
@echo
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/nova.qhcp"
@echo "To view the help file:"
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/nova.qhc"
latex:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
@echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \
"run these through (pdf)latex."
changes:
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
@echo
@echo "The overview file is in $(BUILDDIR)/changes."
linkcheck:
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
@echo
@echo "Link check complete; look for any errors in the above output " \
"or in $(BUILDDIR)/linkcheck/output.txt."
doctest:
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
@echo "Testing of doctests in the sources finished, look at the " \
"results in $(BUILDDIR)/doctest/output.txt."

1
docs/_build/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
*

0
docs/_static/.gitignore vendored Normal file
View File

0
docs/_templates/.gitignore vendored Normal file
View File

46
docs/architecture.rst Normal file
View File

@ -0,0 +1,46 @@
..
Copyright [2010] [Anso Labs, LLC]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
nova System Architecture
========================
Nova is built on a shared-nothing, messaging-based architecture. All of the major nova components can be run on multiple servers. This means that most component to component communication must go via message queue. In order to avoid blocking each component while waiting for a response, we use deferred objects, with a callback that gets triggered when a response is received.
In order to achieve shared-nothing with multiple copies of the same component (especially when the component is an API server that needs to reply with state information in a timely fashion), we need to keep all of our system state in a distributed data system. Updates to system state are written into this system, using atomic transactions when necessary. Requests for state are read out of this system. In limited cases, these read calls are memoized within controllers for short periods of time. (Such a limited case would be, for instance, the current list of system users.)
Components
----------
Below you will find a helpful explanation.
::
[ User Manager ] ---- ( LDAP )
|
| / [ Storage ] - ( ATAoE )
[ API server ] -> [ Cloud ] < AMQP >
| \ [ Nodes ] - ( libvirt/kvm )
< HTTP >
|
[ S3 ]
* API: receives http requests from boto, converts commands to/from API format, and sending requests to cloud controller
* Cloud Controller: global state of system, talks to ldap, s3, and node/storage workers through a queue
* Nodes: worker that spawns instances
* S3: tornado based http/s3 server
* User Manager: create/manage users, which are stored in ldap
* Network Controller: allocate and deallocate IPs and VLANs

213
docs/auth.rst Normal file
View File

@ -0,0 +1,213 @@
..
Copyright [2010] [Anso Labs, LLC]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Auth Documentation
==================
Nova provides RBAC (Role-based access control) of the AWS-type APIs. We define the following roles:
Roles-Based Access Control of AWS-style APIs using SAML Assertions
“Achieving FIPS 199 Moderate certification of a hybrid cloud environment using CloudAudit and declarative C.I.A. classifications”
Introduction
--------------
We will investigate one method for integrating an AWS-style API with US eAuthentication-compatible federated authentication systems, to achieve access controls and limits based on traditional operational roles.
Additionally, we will look at how combining this approach, with an implementation of the CloudAudit APIs, will allow us to achieve a certification under FIPS 199 Moderate classification for a hybrid cloud environment.
Relationship of US eAuth to RBAC
--------------------------------
Typical implementations of US eAuth authentication systems are structured as follows::
[ MS Active Directory or other federated LDAP user store ]
--> backends to…
[ SUN Identity Manager or other SAML Policy Controller ]
--> maps URLs to groups…
[ Apache Policy Agent in front of eAuth-secured Web Application ]
In more ideal implementations, the remainder of the application-specific account information is stored either in extended schema on the LDAP server itself, via the use of a translucent LDAP proxy, or in an independent datastore keyed off of the UID provided via SAML assertion.
Basic AWS API call structure
----------------------------
AWS API calls are traditionally secured via Access and Secret Keys, which are used to sign API calls, along with traditional timestamps to prevent replay attacks. The APIs can be logically grouped into sets that align with five typical roles:
* System User
* System Administrator
* Network Administrator
* Project Manager
* Cloud Administrator
* (IT-Sec?)
There is an additional, conceptual end-user that may or may not have API access:
* (EXTERNAL) End-user / Third-party User
Basic operations are available to any System User:
* Launch Instance
* Terminate Instance (their own)
* Create keypair
* Delete keypair
* Create, Upload, Delete: Buckets and Keys (Object Store) their own
* Create, Attach, Delete Volume (Block Store) their own
System Administrators:
* Register/Unregister Machine Image (project-wide)
* Change Machine Image properties (public / private)
* Request / Review CloudAudit Scans
Network Administrator:
* Change Firewall Rules, define Security Groups
* Allocate, Associate, Deassociate Public IP addresses
Project Manager:
* Launch and Terminate Instances (project-wide)
* CRUD of Object and Block store (project-wide)
Cloud Administrator:
* Register / Unregister Kernel and Ramdisk Images
* Register / Unregister Machine Image (any)
Enhancements
------------
* SAML Token passing
* REST interfaces
* SOAP interfaces
Wrapping the SAML token into the API calls.
Then store the UID (fetched via backchannel) into the instance metadata, providing end-to-end auditability of ownership and responsibility, without PII.
CloudAudit APIs
---------------
* Request formats
* Response formats
* Stateless asynchronous queries
CloudAudit queries may spawn long-running processes (similar to launching instances, etc.) They need to return a ReservationId in the same fashion, which can be returned in further queries for updates.
RBAC of CloudAudit API calls is critical, since detailed system information is a system vulnerability.
Type declarations
---------------------
* Data declarations Volumes and Objects
* System declarations Instances
Existing API calls to launch instances specific a single, combined “type” flag. We propose to extend this with three additional type declarations, mapping to the “Confidentiality, Integrity, Availability” classifications of FIPS 199. An example API call would look like::
RunInstances type=m1.large number=1 secgroup=default key=mykey confidentiality=low integrity=low availability=low
These additional parameters would also apply to creation of block storage volumes (along with the existing parameter of size), and creation of object storage buckets. (C.I.A. classifications on a bucket would be inherited by the keys within this bucket.)
Request Brokering
-----------------
* Cloud Interop
* IMF Registration / PubSub
* Digital C&A
Establishing declarative semantics for individual API calls will allow the cloud environment to seamlessly proxy these API calls to external, third-party vendors when the requested CIA levels match.
See related work within the Infrastructure 2.0 working group for more information on how the IMF Metadata specification could be utilized to manage registration of these vendors and their C&A credentials.
Dirty Cloud Hybrid Data Centers
---------------------------------
* CloudAudit bridge interfaces
* Anything in the ARP table
A hybrid cloud environment provides dedicated, potentially co-located physical hardware with a network interconnect to the project or users cloud virtual network.
This interconnect is typically a bridged VPN connection. Any machines that can be bridged into a hybrid environment in this fashion (at Layer 2) must implement a minimum version of the CloudAudit spec, such that they can be queried to provide a complete picture of the IT-sec runtime environment.
Network discovery protocols (ARP, CDP) can be applied in this case, and existing protocols (SNMP location data, DNS LOC records) overloaded to provide CloudAudit information.
The Details
-----------
* Preliminary Roles Definitions
* Categorization of available API calls
* SAML assertion vocabulary
System limits
-------------
The following limits need to be defined and enforced:
* Total number of instances allowed (user / project)
* Total number of instances, per instance type (user / project)
* Total number of volumes (user / project)
* Maximum size of volume
* Cumulative size of all volumes
* Total use of object storage (GB)
* Total number of Public IPs
Further Challenges
------------------
* Prioritization of users / jobs in shared computing environments
* Incident response planning
* Limit launch of instances to specific security groups based on AMI
* Store AMIs in LDAP for added property control
The :mod:`access` Module
--------------------------
.. automodule:: nova.auth.access
:members:
:undoc-members:
:show-inheritance:
The :mod:`signer` Module
------------------------
.. automodule:: nova.auth.signer
:members:
:undoc-members:
:show-inheritance:
The :mod:`users` Module
-----------------------
.. automodule:: nova.auth.users
:members:
:undoc-members:
:show-inheritance:
The :mod:`users_unittest` Module
--------------------------------
.. automodule:: nova.tests.users_unittest
:members:
:undoc-members:
:show-inheritance:
The :mod:`access_unittest` Module
---------------------------------
.. automodule:: nova.tests.access_unittest
:members:
:undoc-members:
:show-inheritance:

29
docs/binaries.rst Normal file
View File

@ -0,0 +1,29 @@
..
Copyright [2010] [Anso Labs, LLC]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Nova Binaries
===============
* nova-api
* nova-compute
* nova-manage
* nova-objectstore
* nova-volume
The configuration of these binaries relies on "flagfiles" using the google
gflags package. If present, the nova.conf file will be used as the flagfile
- otherwise, it must be specified on the command line::
$ python node_worker.py --flagfile flagfile

72
docs/compute.rst Normal file
View File

@ -0,0 +1,72 @@
..
Copyright [2010] [Anso Labs, LLC]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Compute Documentation
=====================
This page contains the Compute Package documentation.
The :mod:`disk` Module
----------------------
.. automodule:: nova.compute.disk
:members:
:undoc-members:
:show-inheritance:
The :mod:`exception` Module
---------------------------
.. automodule:: nova.compute.exception
:members:
:undoc-members:
:show-inheritance:
The :mod:`model` Module
-------------------------
.. automodule:: nova.compute.model
:members:
:undoc-members:
:show-inheritance:
The :mod:`network` Module
-------------------------
.. automodule:: nova.compute.network
:members:
:undoc-members:
:show-inheritance:
The :mod:`node` Module
----------------------
.. automodule:: nova.compute.node
:members:
:undoc-members:
:show-inheritance:
RELATED TESTS
---------------
The :mod:`node_unittest` Module
-------------------------------
.. automodule:: nova.tests.node_unittest
:members:
:undoc-members:
:show-inheritance:

202
docs/conf.py Normal file
View File

@ -0,0 +1,202 @@
# -*- coding: utf-8 -*-
#
# nova documentation build configuration file, created by
# sphinx-quickstart on Sat May 1 15:17:47 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
sys.path.append([os.path.abspath('../nova'),os.path.abspath('../'),os.path.abspath('../vendor')])
from nova import vendor
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath', 'sphinx.ext.ifconfig']
#sphinx_to_github = False
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'nova'
copyright = u'2010, Anso Labs, LLC'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.42'
# The full version, including alpha/beta/rc tags.
release = '0.42'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['nova.']
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'novadoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'nova.tex', u'nova Documentation',
u'Anso Labs, LLC', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}

89
docs/endpoint.rst Normal file
View File

@ -0,0 +1,89 @@
..
Copyright [2010] [Anso Labs, LLC]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Endpoint Documentation
======================
This page contains the Endpoint Package documentation.
The :mod:`admin` Module
-----------------------
.. automodule:: nova.endpoint.admin
:members:
:undoc-members:
:show-inheritance:
The :mod:`api` Module
---------------------
.. automodule:: nova.endpoint.api
:members:
:undoc-members:
:show-inheritance:
The :mod:`cloud` Module
-----------------------
.. automodule:: nova.endpoint.cloud
:members:
:undoc-members:
:show-inheritance:
The :mod:`images` Module
------------------------
.. automodule:: nova.endpoint.images
:members:
:undoc-members:
:show-inheritance:
RELATED TESTS
--------------
The :mod:`api_unittest` Module
------------------------------
.. automodule:: nova.tests.api_unittest
:members:
:undoc-members:
:show-inheritance:
The :mod:`api_integration` Module
---------------------------------
.. automodule:: nova.tests.api_integration
:members:
:undoc-members:
:show-inheritance:
The :mod:`cloud_unittest` Module
--------------------------------
.. automodule:: nova.tests.cloud_unittest
:members:
:undoc-members:
:show-inheritance:
The :mod:`network_unittest` Module
----------------------------------
.. automodule:: nova.tests.network_unittest
:members:
:undoc-members:
:show-inheritance:

41
docs/fakes.rst Normal file
View File

@ -0,0 +1,41 @@
..
Copyright [2010] [Anso Labs, LLC]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Nova Fakes
==========
The :mod:`fakevirt` Module
--------------------------
.. automodule:: nova.fakevirt
:members:
:undoc-members:
:show-inheritance:
The :mod:`fakeldap` Module
--------------------------
.. automodule:: nova.auth.fakeldap
:members:
:undoc-members:
:show-inheritance:
The :mod:`fakerabbit` Module
----------------------------
.. automodule:: nova.fakerabbit
:members:
:undoc-members:
:show-inheritance:

70
docs/getting.started.rst Normal file
View File

@ -0,0 +1,70 @@
..
Copyright [2010] [Anso Labs, LLC]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Getting Started with Nova
=========================
GOTTA HAVE A nova.pth file added or it WONT WORK (will write setup.py file soon)
DEPENDENCIES
------------
* RabbitMQ: messaging queue, used for all communication between components
* OpenLDAP: users, groups (maybe cut)
* Tornado: scalable non blocking web server for api requests
* Twisted: just for the twisted.internet.defer package
* boto: python api for aws api
* M2Crypto: python library interface for openssl
* IPy: library for managing ip addresses
* ReDIS: Remote Dictionary Store (for fast, shared state data)
Recommended
-----------------
* euca2ools: python implementation of aws ec2-tools and ami tools
* build tornado to use C module for evented section
Installation
--------------
::
# ON ALL SYSTEMS
apt-get install -y python-libvirt libvirt-bin python-setuptools python-dev python-pycurl python-m2crypto python-twisted
apt-get install -y aoetools vlan
modprobe aoe
# ON THE CLOUD CONTROLLER
apt-get install -y rabbitmq-server dnsmasq
# fix ec2 metadata/userdata uri - where $IP is the IP of the cloud
iptables -t nat -A PREROUTING -s 0.0.0.0/0 -d 169.254.169.254/32 -p tcp -m tcp --dport 80 -j DNAT --to-destination $IP:8773
iptables --table nat --append POSTROUTING --out-interface $PUBLICIFACE -j MASQUERADE
# setup ldap (slap.sh as root will remove ldap and reinstall it)
auth/slap.sh
/etc/init.d/rabbitmq-server start
# ON VOLUME NODE:
apt-get install -y vblade-persist
# ON THE COMPUTE NODE:
apt-get install -y kpartx kvm
# optional packages
apt-get install -y euca2ools
# Set up flagfiles with the appropriate hostnames, etc.
# start api_worker, s3_worker, node_worker, storage_worker
# Add yourself to the libvirtd group, log out, and log back in
# Make sure the user who will launch the workers has sudo privileges w/o pass (will fix later)

53
docs/index.rst Normal file
View File

@ -0,0 +1,53 @@
..
Copyright [2010] [Anso Labs, LLC]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Welcome to nova's documentation!
================================
Nova is a cloud computing fabric controller (the main part of an IaaS system) built to match the popular AWS EC2 and S3 APIs.
It is written in Python, using the Tornado and Twisted frameworks, and relies on the standard AMQP messaging protocol,
and the Redis distributed KVS.
Nova is intended to be easy to extend, and adapt. For example, it currently uses
an LDAP server for users and groups, but also includes a fake LDAP server,
that stores data in Redis. It has extensive test coverage, and uses the
Sphinx toolkit (the same as Python itself) for code and user documentation.
While Nova is currently in Beta use within several organizations, the codebase
is very much under active development - there are bugs!
Contents:
.. toctree::
:maxdepth: 2
getting.started
architecture
network
storage
auth
compute
endpoint
nova
fakes
binaries
todo
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`

32
docs/modules.rst Normal file
View File

@ -0,0 +1,32 @@
..
Copyright [2010] [Anso Labs, LLC]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Nova Documentation
==================
This page contains the Nova Modules documentation.
Modules:
--------
.. toctree::
:maxdepth: 4
auth
compute
endpoint
fakes
nova
volume

86
docs/network.rst Normal file
View File

@ -0,0 +1,86 @@
..
Copyright [2010] [Anso Labs, LLC]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
nova Networking
================
The nova networking components manage private networks, public IP addressing, VPN connectivity, and firewall rules.
Components
----------
There are several key components:
* NetworkController (Manages address and vlan allocation)
* RoutingNode (NATs public IPs to private IPs, and enforces firewall rules)
* AddressingNode (runs DHCP services for private networks)
* BridgingNode (a subclass of the basic nova ComputeNode)
* TunnelingNode (provides VPN connectivity)
Component Diagram
-----------------
Overview::
(PUBLIC INTERNET)
| \
/ \ / \
[RoutingNode] ... [RN] [TunnelingNode] ... [TN]
| \ / | |
| < AMQP > | |
[AddressingNode]-- (VLAN) ... | (VLAN)... (VLAN) --- [AddressingNode]
\ | \ /
/ \ / \ / \ / \
[BridgingNode] ... [BridgingNode]
[NetworkController] ... [NetworkController]
\ /
< AMQP >
|
/ \
[CloudController]...[CloudController]
While this diagram may not make this entirely clear, nodes and controllers communicate exclusively across the message bus (AMQP, currently).
State Model
-----------
Network State consists of the following facts:
* VLAN assignment (to a project)
* Private Subnet assignment (to a security group) in a VLAN
* Private IP assignments (to running instances)
* Public IP allocations (to a project)
* Public IP associations (to a private IP / running instance)
While copies of this state exist in many places (expressed in IPTables rule chains, DHCP hosts files, etc), the controllers rely only on the distributed "fact engine" for state, queried over RPC (currently AMQP). The NetworkController inserts most records into this datastore (allocating addresses, etc) - however, individual nodes update state e.g. when running instances crash.
The Public Traffic Path
-----------------------
Public Traffic::
(PUBLIC INTERNET)
|
<NAT> <-- [RoutingNode]
|
[AddressingNode] --> |
( VLAN )
| <-- [BridgingNode]
|
<RUNNING INSTANCE>
The RoutingNode is currently implemented using IPTables rules, which implement both NATing of public IP addresses, and the appropriate firewall chains. We are also looking at using Netomata / Clusto to manage NATting within a switch or router, and/or to manage firewall rules within a hardware firewall appliance.
Similarly, the AddressingNode currently manages running DNSMasq instances for DHCP services. However, we could run an internal DHCP server (using Scapy ala Clusto), or even switch to static addressing by inserting the private address into the disk image the same way we insert the SSH keys. (See compute for more details).

89
docs/nova.rst Normal file
View File

@ -0,0 +1,89 @@
..
Copyright [2010] [Anso Labs, LLC]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
NOVA Libraries
===============
The :mod:`crypto` Module
------------------------
.. automodule:: nova.crypto
:members:
:undoc-members:
:show-inheritance:
The :mod:`adminclient` Module
-----------------------------
.. automodule:: nova.adminclient
:members:
:undoc-members:
:show-inheritance:
The :mod:`datastore` Module
---------------------------
.. automodule:: nova.datastore
:members:
:undoc-members:
:show-inheritance:
The :mod:`exception` Module
---------------------------
.. automodule:: nova.exception
:members:
:undoc-members:
:show-inheritance:
The :mod:`flags` Module
---------------------------
.. automodule:: nova.flags
:members:
:undoc-members:
:show-inheritance:
The :mod:`rpc` Module
---------------------------
.. automodule:: nova.rpc
:members:
:undoc-members:
:show-inheritance:
The :mod:`server` Module
---------------------------
.. automodule:: nova.server
:members:
:undoc-members:
:show-inheritance:
The :mod:`test` Module
---------------------------
.. automodule:: nova.test
:members:
:undoc-members:
:show-inheritance:
The :mod:`utils` Module
---------------------------
.. automodule:: nova.utils
:members:
:undoc-members:
:show-inheritance:

64
docs/objectstore.rst Normal file
View File

@ -0,0 +1,64 @@
..
Copyright [2010] [Anso Labs, LLC]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Objectstore Documentation
=========================
This page contains the Objectstore Package documentation.
The :mod:`bucket` Module
------------------------
.. automodule:: nova.objectstore.bucket
:members:
:undoc-members:
:show-inheritance:
The :mod:`handler` Module
-------------------------
.. automodule:: nova.objectstore.handler
:members:
:undoc-members:
:show-inheritance:
The :mod:`image` Module
-----------------------
.. automodule:: nova.objectstore.image
:members:
:undoc-members:
:show-inheritance:
The :mod:`stored` Module
------------------------
.. automodule:: nova.objectstore.stored
:members:
:undoc-members:
:show-inheritance:
RELATED TESTS
-------------
The :mod:`objectstore_unittest` Module
--------------------------------------
.. automodule:: nova.tests.objectstore_unittest
:members:
:undoc-members:
:show-inheritance:

27
docs/packages.rst Normal file
View File

@ -0,0 +1,27 @@
..
Copyright [2010] [Anso Labs, LLC]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
nova Packages & Dependencies
============================
Nova is being built on Ubuntu Lucid.
The following packages are required:
apt-get install python-ipy, python-libvirt, python-boto, python-pycurl, python-twisted, python-daemon, python-redis, python-carrot, python-lockfile
In addition you need to install python:
* python-gflags - http://code.google.com/p/python-gflags/

29
docs/storage.rst Normal file
View File

@ -0,0 +1,29 @@
..
Copyright [2010] [Anso Labs, LLC]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Storage in the Nova Cloud
=========================
There are three primary classes of storage in a nova cloud environment:
* Ephemeral Storage (local disk within an instance)
* Volume Storage (network-attached FS)
* Object Storage (redundant KVS with locality and MR)
.. toctree::
:maxdepth: 2
volume
objectstore

43
docs/volume.rst Normal file
View File

@ -0,0 +1,43 @@
..
Copyright [2010] [Anso Labs, LLC]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Volume Documentation
====================
Nova uses ata-over-ethernet (AoE) to export storage volumes from multiple storage nodes. These AoE exports are attached (using libvirt) directly to running instances.
Nova volumes are exported over the primary system VLAN (usually VLAN 1), and not over individual VLANs.
AoE exports are numbered according to a "shelf and blade" syntax. In order to avoid collisions, we currently perform an AoE-discover of existing exports, and then grab the next unused number. (This obviously has race condition problems, and should be replaced by allocating a shelf-id to each storage node.)
The underlying volumes are LVM logical volumes, created on demand within a single large volume group.
The :mod:`storage` Module
-------------------------
.. automodule:: nova.volume.storage
:members:
:undoc-members:
:show-inheritance:
The :mod:`storage_unittest` Module
----------------------------------
.. automodule:: nova.tests.storage_unittest
:members:
:undoc-members:
:show-inheritance:

30
nova/__init__.py Normal file
View File

@ -0,0 +1,30 @@
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`nova` -- Cloud IaaS Platform
===================================
.. automodule:: nova
:platform: Unix
:synopsis: Infrastructure-as-a-Service Cloud platform.
.. moduleauthor:: Jesse Andrews <jesse@ansolabs.com>
.. moduleauthor:: Devin Carlen <devin.carlen@gmail.com>
.. moduleauthor:: Vishvananda Ishaya <vishvananda@yahoo.com>
.. moduleauthor:: Joshua McKenty <joshua@cognition.ca>
.. moduleauthor:: Manish Singh <yosh@gimp.org>
.. moduleauthor:: Andy Smith <andy@anarkystic.com>
"""
from exception import *

113
nova/adminclient.py Normal file
View File

@ -0,0 +1,113 @@
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Nova User API client library.
"""
import boto
from boto.ec2.regioninfo import RegionInfo
import base64
class UserInfo(object):
""" Information about a Nova user
fields include:
username
accesskey
secretkey
and an optional field containing a zip with X509 cert & rc
file
"""
def __init__(self, connection=None, username=None, endpoint=None):
self.connection = connection
self.username = username
self.endpoint = endpoint
def __repr__(self):
return 'UserInfo:%s' % self.username
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'username':
self.username = str(value)
elif name == 'file':
self.file = base64.b64decode(str(value))
elif name == 'accesskey':
self.accesskey = str(value)
elif name == 'secretkey':
self.secretkey = str(value)
class NovaAdminClient(object):
def __init__(self, clc_ip='127.0.0.1', region='nova', access_key='admin',
secret_key='admin', **kwargs):
self.clc_ip = clc_ip
self.region = region
self.access = access_key
self.secret = secret_key
self.apiconn = boto.connect_ec2(aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
is_secure=False,
region=RegionInfo(None, region, clc_ip),
port=8773,
path='/services/Admin',
**kwargs)
self.apiconn.APIVersion = 'nova'
def connection_for(self, username, **kwargs):
"""
Returns a boto ec2 connection for the given username.
"""
user = self.get_user(username)
return boto.connect_ec2(
aws_access_key_id=user.accesskey,
aws_secret_access_key=user.secretkey,
is_secure=False,
region=RegionInfo(None, self.region, self.clc_ip),
port=8773,
path='/services/Cloud',
**kwargs
)
def get_users(self):
""" grabs the list of all users """
return self.apiconn.get_list('DescribeUsers', {}, (['item', UserInfo]))
def get_user(self, name):
""" grab a single user by name """
user = self.apiconn.get_object('DescribeUser', {'Name': name}, UserInfo)
if user.username != None:
return user
def has_user(self, username):
""" determine if user exists """
return self.get_user(username) != None
def create_user(self, username):
""" creates a new user, returning the userinfo object with access/secret """
return self.apiconn.get_object('RegisterUser', {'Name': username}, UserInfo)
def delete_user(self, username):
""" deletes a user """
return self.apiconn.get_object('DeregisterUser', {'Name': username}, UserInfo)
def get_zip(self, username):
""" returns the content of a zip file containing novarc and access credentials. """
return self.apiconn.get_object('GenerateX509ForUser', {'Name': username}, UserInfo).file

25
nova/auth/__init__.py Normal file
View File

@ -0,0 +1,25 @@
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`nova.auth` -- Authentication and Access Control
=====================================================
.. automodule:: nova.auth
:platform: Unix
:synopsis: User-and-Project based RBAC using LDAP, SAML.
.. moduleauthor:: Jesse Andrews <jesse@ansolabs.com>
.. moduleauthor:: Vishvananda Ishaya <vishvananda@yahoo.com>
.. moduleauthor:: Joshua McKenty <joshua@cognition.ca>
"""

69
nova/auth/access.py Normal file
View File

@ -0,0 +1,69 @@
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Simple base set of RBAC rules which map API endpoints to LDAP groups.
For testing accounts, users will always have PM privileges.
"""
# This is logically a RuleSet or some such.
def allow_describe_images(user, project, target_object):
return True
def allow_describe_instances(user, project, target_object):
return True
def allow_describe_addresses(user, project, target_object):
return True
def allow_run_instances(user, project, target_object):
# target_object is a reservation, not an instance
# it needs to include count, type, image, etc.
# First, is the project allowed to use this image
# Second, is this user allowed to launch within this project
# Third, is the count or type within project quota
return True
def allow_terminate_instances(user, project, target_object):
# In a project, the PMs and Sysadmins can terminate
return True
def allow_get_console_output(user, project, target_object):
# If the user launched the instance,
# Or is a sysadmin in the project,
return True
def allow_allocate_address(user, project, target_object):
# There's no security concern in allocation,
# but it can get expensive. Limit to PM and NE.
return True
def allow_associate_address(user, project, target_object):
# project NE only
# In future, will perform a CloudAudit scan first
# (Pass / Fail gate)
return True
def allow_register(user, project, target_object):
return False
def is_allowed(action, user, project, target_object):
return globals()['allow_%s' % action](user, project, target_object)

81
nova/auth/fakeldap.py Normal file
View File

@ -0,0 +1,81 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fake LDAP server for test harnesses.
"""
import logging
from nova import datastore
SCOPE_SUBTREE = 1
class NO_SUCH_OBJECT(Exception):
pass
def initialize(uri):
return FakeLDAP(uri)
class FakeLDAP(object):
def __init__(self, _uri):
self.keeper = datastore.Keeper('fakeldap')
if self.keeper['objects'] is None:
self.keeper['objects'] = {}
def simple_bind_s(self, dn, password):
pass
def unbind_s(self):
pass
def search_s(self, dn, scope, query=None, fields=None):
logging.debug("searching for %s" % dn)
filtered = {}
d = self.keeper['objects'] or {}
for cn, attrs in d.iteritems():
if cn[-len(dn):] == dn:
filtered[cn] = attrs
if query:
k,v = query[1:-1].split('=')
objects = {}
for cn, attrs in filtered.iteritems():
if attrs.has_key(k) and (v in attrs[k] or
v == attrs[k]):
objects[cn] = attrs
if objects == {}:
raise NO_SUCH_OBJECT()
return objects.items()
def add_s(self, cn, attr):
logging.debug("adding %s" % cn)
stored = {}
for k, v in attr:
if type(v) is list:
stored[k] = v
else:
stored[k] = [v]
d = self.keeper['objects']
d[cn] = stored
self.keeper['objects'] = d
def delete_s(self, cn):
logging.debug("creating for %s" % cn)
d = self.keeper['objects'] or {}
del d[cn]
self.keeper['objects'] = d

26
nova/auth/novarc.template Normal file
View File

@ -0,0 +1,26 @@
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
NOVA_KEY_DIR=$(pushd $(dirname $BASH_SOURCE)>/dev/null; pwd; popd>/dev/null)
export EC2_ACCESS_KEY="%(access)s"
export EC2_SECRET_KEY="%(secret)s"
export EC2_URL="%(ec2)s"
export S3_URL="%(s3)s"
export EC2_USER_ID=42 # nova does not use user id, but bundling requires it
export EC2_PRIVATE_KEY=${NOVA_KEY_DIR}/%(key)s
export EC2_CERT=${NOVA_KEY_DIR}/%(cert)s
export NOVA_CERT=${NOVA_KEY_DIR}/%(nova)s
export EUCALYPTUS_CERT=${NOVA_CERT} # euca-bundle-image seems to require this set
alias ec2-bundle-image="ec2-bundle-image --cert ${EC2_CERT} --privatekey ${EC2_PRIVATE_KEY} --user 42 --ec2cert ${NOVA_CERT}"
alias ec2-upload-bundle="ec2-upload-bundle -a ${EC2_ACCESS_KEY} -s ${EC2_SECRET_KEY} --url ${S3_URL} --ec2cert ${NOVA_CERT}"

60
nova/auth/rbac.ldif Normal file
View File

@ -0,0 +1,60 @@
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# LDIF fragment to create group branch under root
#dn: ou=Groups,dc=example,dc=com
#objectclass:organizationalunit
#ou: groups
#description: generic groups branch
# create the itpeople entry
dn: cn=sysadmins,ou=Groups,dc=example,dc=com
objectclass: groupofnames
cn: itpeople
description: IT admin group
# add the group members all of which are
# assumed to exist under Users
#member: cn=micky mouse,ou=people,dc=example,dc=com
member: cn=admin,ou=Users,dc=example,dc=com
dn: cn=netadmins,ou=Groups,dc=example,dc=com
objectclass: groupofnames
cn: netadmins
description: Network admin group
member: cn=admin,ou=Users,dc=example,dc=com
dn: cn=cloudadmins,ou=Groups,dc=example,dc=com
objectclass: groupofnames
cn: cloudadmins
description: Cloud admin group
member: cn=admin,ou=Users,dc=example,dc=com
dn: cn=itsec,ou=Groups,dc=example,dc=com
objectclass: groupofnames
cn: itsec
description: IT security users group
member: cn=admin,ou=Users,dc=example,dc=com
# Example Project Group to demonstrate members
# and project members
dn: cn=myproject,ou=Groups,dc=example,dc=com
objectclass: groupofnames
objectclass: novaProject
cn: myproject
description: My Project Group
member: cn=admin,ou=Users,dc=example,dc=com
projectManager: cn=admin,ou=Users,dc=example,dc=com

127
nova/auth/signer.py Normal file
View File

@ -0,0 +1,127 @@
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# PORTIONS OF THIS FILE ARE FROM:
# http://code.google.com/p/boto
# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Utility class for parsing signed AMI manifests.
"""
import logging
import hashlib
import hmac
import urllib
import base64
from nova.exception import Error
_log = logging.getLogger('signer')
logging.getLogger('signer').setLevel(logging.WARN)
class Signer(object):
""" hacked up code from boto/connection.py """
def __init__(self, secret_key):
self.hmac = hmac.new(secret_key, digestmod=hashlib.sha1)
if hashlib.sha256:
self.hmac_256 = hmac.new(secret_key, digestmod=hashlib.sha256)
def generate(self, params, verb, server_string, path):
if params['SignatureVersion'] == '0':
t = self._calc_signature_0(params)
elif params['SignatureVersion'] == '1':
t = self._calc_signature_1(params)
elif params['SignatureVersion'] == '2':
t = self._calc_signature_2(params, verb, server_string, path)
else:
raise Error('Unknown Signature Version: %s' % self.SignatureVersion)
return t
def _get_utf8_value(self, value):
if not isinstance(value, str) and not isinstance(value, unicode):
value = str(value)
if isinstance(value, unicode):
return value.encode('utf-8')
else:
return value
def _calc_signature_0(self, params):
s = params['Action'] + params['Timestamp']
self.hmac.update(s)
keys = params.keys()
keys.sort(cmp = lambda x, y: cmp(x.lower(), y.lower()))
pairs = []
for key in keys:
val = self._get_utf8_value(params[key])
pairs.append(key + '=' + urllib.quote(val))
return base64.b64encode(self.hmac.digest())
def _calc_signature_1(self, params):
keys = params.keys()
keys.sort(cmp = lambda x, y: cmp(x.lower(), y.lower()))
pairs = []
for key in keys:
self.hmac.update(key)
val = self._get_utf8_value(params[key])
self.hmac.update(val)
pairs.append(key + '=' + urllib.quote(val))
return base64.b64encode(self.hmac.digest())
def _calc_signature_2(self, params, verb, server_string, path):
_log.debug('using _calc_signature_2')
string_to_sign = '%s\n%s\n%s\n' % (verb, server_string, path)
if self.hmac_256:
hmac = self.hmac_256
params['SignatureMethod'] = 'HmacSHA256'
else:
hmac = self.hmac
params['SignatureMethod'] = 'HmacSHA1'
keys = params.keys()
keys.sort()
pairs = []
for key in keys:
val = self._get_utf8_value(params[key])
pairs.append(urllib.quote(key, safe='') + '=' + urllib.quote(val, safe='-_~'))
qs = '&'.join(pairs)
_log.debug('query string: %s' % qs)
string_to_sign += qs
_log.debug('string_to_sign: %s' % string_to_sign)
hmac.update(string_to_sign)
b64 = base64.b64encode(hmac.digest())
_log.debug('len(b64)=%d' % len(b64))
_log.debug('base64 encoded digest: %s' % b64)
return b64
if __name__ == '__main__':
print Signer('foo').generate({"SignatureMethod": 'HmacSHA256', 'SignatureVersion': '2'}, "get", "server", "/foo")

226
nova/auth/slap.sh Executable file
View File

@ -0,0 +1,226 @@
#!/usr/bin/env bash
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# LDAP INSTALL SCRIPT - SHOULD BE IDEMPOTENT, but it SCRUBS all USERS
apt-get install -y slapd ldap-utils python-ldap
cat >/etc/ldap/schema/openssh-lpk_openldap.schema <<LPK_SCHEMA_EOF
#
# LDAP Public Key Patch schema for use with openssh-ldappubkey
# Author: Eric AUGE <eau@phear.org>
#
# Based on the proposal of : Mark Ruijter
#
# octetString SYNTAX
attributetype ( 1.3.6.1.4.1.24552.500.1.1.1.13 NAME 'sshPublicKey'
DESC 'MANDATORY: OpenSSH Public key'
EQUALITY octetStringMatch
SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 )
# printableString SYNTAX yes|no
objectclass ( 1.3.6.1.4.1.24552.500.1.1.2.0 NAME 'ldapPublicKey' SUP top AUXILIARY
DESC 'MANDATORY: OpenSSH LPK objectclass'
MAY ( sshPublicKey $ uid )
)
LPK_SCHEMA_EOF
cat >/etc/ldap/schema/nova.schema <<NOVA_SCHEMA_EOF
#
# Person object for Nova
# inetorgperson with extra attributes
# Author: Vishvananda Ishaya <vishvananda@yahoo.com>
#
#
# using internet experimental oid arc as per BP64 3.1
objectidentifier novaSchema 1.3.6.1.3.1.666.666
objectidentifier novaAttrs novaSchema:3
objectidentifier novaOCs novaSchema:4
attributetype (
novaAttrs:1
NAME 'accessKey'
DESC 'Key for accessing data'
EQUALITY caseIgnoreMatch
SUBSTR caseIgnoreSubstringsMatch
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
SINGLE-VALUE
)
attributetype (
novaAttrs:2
NAME 'secretKey'
DESC 'Secret key'
EQUALITY caseIgnoreMatch
SUBSTR caseIgnoreSubstringsMatch
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
SINGLE-VALUE
)
attributetype (
novaAttrs:3
NAME 'keyFingerprint'
DESC 'Fingerprint of private key'
EQUALITY caseIgnoreMatch
SUBSTR caseIgnoreSubstringsMatch
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
SINGLE-VALUE
)
attributetype (
novaAttrs:4
NAME 'isAdmin'
DESC 'Is user an administrator?'
EQUALITY booleanMatch
SYNTAX 1.3.6.1.4.1.1466.115.121.1.7
SINGLE-VALUE
)
attributetype (
novaAttrs:5
NAME 'projectManager'
DESC 'Project Managers of a project'
SYNTAX 1.3.6.1.4.1.1466.115.121.1.12
)
objectClass (
novaOCs:1
NAME 'novaUser'
DESC 'access and secret keys'
AUXILIARY
MUST ( uid )
MAY ( accessKey $ secretKey $ isAdmin )
)
objectClass (
novaOCs:2
NAME 'novaKeyPair'
DESC 'Key pair for User'
SUP top
STRUCTURAL
MUST ( cn $ sshPublicKey $ keyFingerprint )
)
objectClass (
novaOCs:3
NAME 'novaProject'
DESC 'Container for project'
SUP groupofnames
STRUCTURAL
MUST ( cn $ projectManager )
)
NOVA_SCHEMA_EOF
mv /etc/ldap/slapd.conf /etc/ldap/slapd.conf.orig
cat >/etc/ldap/slapd.conf <<SLAPD_CONF_EOF
# slapd.conf - Configuration file for LDAP SLAPD
##########
# Basics #
##########
include /etc/ldap/schema/core.schema
include /etc/ldap/schema/cosine.schema
include /etc/ldap/schema/inetorgperson.schema
include /etc/ldap/schema/openssh-lpk_openldap.schema
include /etc/ldap/schema/nova.schema
pidfile /var/run/slapd/slapd.pid
argsfile /var/run/slapd/slapd.args
loglevel none
modulepath /usr/lib/ldap
# modulepath /usr/local/libexec/openldap
moduleload back_hdb
##########################
# Database Configuration #
##########################
database hdb
suffix "dc=example,dc=com"
rootdn "cn=Manager,dc=example,dc=com"
rootpw changeme
directory /var/lib/ldap
# directory /usr/local/var/openldap-data
index objectClass,cn eq
########
# ACLs #
########
access to attrs=userPassword
by anonymous auth
by self write
by * none
access to *
by self write
by * none
SLAPD_CONF_EOF
mv /etc/ldap/ldap.conf /etc/ldap/ldap.conf.orig
cat >/etc/ldap/ldap.conf <<LDAP_CONF_EOF
# LDAP Client Settings
URI ldap://localhost
BASE dc=example,dc=com
BINDDN cn=Manager,dc=example,dc=com
SIZELIMIT 0
TIMELIMIT 0
LDAP_CONF_EOF
cat >/etc/ldap/base.ldif <<BASE_LDIF_EOF
# This is the root of the directory tree
dn: dc=example,dc=com
description: Example.Com, your trusted non-existent corporation.
dc: example
o: Example.Com
objectClass: top
objectClass: dcObject
objectClass: organization
# Subtree for users
dn: ou=Users,dc=example,dc=com
ou: Users
description: Users
objectClass: organizationalUnit
# Subtree for groups
dn: ou=Groups,dc=example,dc=com
ou: Groups
description: Groups
objectClass: organizationalUnit
# Subtree for system accounts
dn: ou=System,dc=example,dc=com
ou: System
description: Special accounts used by software applications.
objectClass: organizationalUnit
# Special Account for Authentication:
dn: uid=authenticate,ou=System,dc=example,dc=com
uid: authenticate
ou: System
description: Special account for authenticating users
userPassword: {MD5}TLnIqASP0CKUR3/LGkEZGg==
objectClass: account
objectClass: simpleSecurityObject
BASE_LDIF_EOF
/etc/init.d/slapd stop
rm -rf /var/lib/ldap/*
rm -rf /etc/ldap/slapd.d/*
slaptest -f /etc/ldap/slapd.conf -F /etc/ldap/slapd.d
cp /usr/share/slapd/DB_CONFIG /var/lib/ldap/DB_CONFIG
slapadd -v -l /etc/ldap/base.ldif
chown -R openldap:openldap /etc/ldap/slapd.d
chown -R openldap:openldap /var/lib/ldap
/etc/init.d/slapd start

454
nova/auth/users.py Executable file
View File

@ -0,0 +1,454 @@
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Nova users and user management, including RBAC hooks.
"""
import datetime
import logging
import os
import shutil
import tempfile
import uuid
import zipfile
try:
import ldap
except Exception, e:
import fakeldap as ldap
import fakeldap
from nova import datastore
# TODO(termie): clean up these imports
import signer
from nova import exception
from nova import flags
from nova import crypto
from nova import utils
import access as simplerbac
from nova import objectstore # for flags
FLAGS = flags.FLAGS
flags.DEFINE_string('ldap_url', 'ldap://localhost', 'Point this at your ldap server')
flags.DEFINE_string('ldap_password', 'changeme', 'LDAP password')
flags.DEFINE_string('user_dn', 'cn=Manager,dc=example,dc=com', 'DN of admin user')
flags.DEFINE_string('user_unit', 'Users', 'OID for Users')
flags.DEFINE_string('ldap_subtree', 'ou=Users,dc=example,dc=com', 'OU for Users')
flags.DEFINE_string('ldap_sysadmin',
'cn=sysadmins,ou=Groups,dc=example,dc=com', 'OU for Sysadmins')
flags.DEFINE_string('ldap_netadmin',
'cn=netadmins,ou=Groups,dc=example,dc=com', 'OU for NetAdmins')
flags.DEFINE_string('ldap_cloudadmin',
'cn=cloudadmins,ou=Groups,dc=example,dc=com', 'OU for Cloud Admins')
flags.DEFINE_string('ldap_itsec',
'cn=itsec,ou=Groups,dc=example,dc=com', 'OU for ItSec')
flags.DEFINE_string('credentials_template',
utils.abspath('auth/novarc.template'),
'Template for creating users rc file')
flags.DEFINE_string('credential_key_file', 'pk.pem',
'Filename of private key in credentials zip')
flags.DEFINE_string('credential_cert_file', 'cert.pem',
'Filename of certificate in credentials zip')
flags.DEFINE_string('credential_rc_file', 'novarc',
'Filename of rc in credentials zip')
_log = logging.getLogger('auth')
_log.setLevel(logging.WARN)
class UserError(exception.ApiError):
pass
class InvalidKeyPair(exception.ApiError):
pass
class User(object):
def __init__(self, id, name, access, secret, admin):
self.manager = UserManager.instance()
self.id = id
self.name = name
self.access = access
self.secret = secret
self.admin = admin
self.keeper = datastore.Keeper(prefix="user")
def is_admin(self):
return self.admin
def has_role(self, role_type):
return self.manager.has_role(self.id, role_type)
def is_authorized(self, owner_id, action=None):
if self.is_admin() or owner_id == self.id:
return True
if action == None:
return False
project = None #(Fixme)
target_object = None # (Fixme, should be passed in)
return simplerbac.is_allowed(action, self, project, target_object)
def get_credentials(self):
rc = self.generate_rc()
private_key, signed_cert = self.generate_x509_cert()
tmpdir = tempfile.mkdtemp()
zf = os.path.join(tmpdir, "temp.zip")
zippy = zipfile.ZipFile(zf, 'w')
zippy.writestr(FLAGS.credential_rc_file, rc)
zippy.writestr(FLAGS.credential_key_file, private_key)
zippy.writestr(FLAGS.credential_cert_file, signed_cert)
zippy.writestr(FLAGS.ca_file, crypto.fetch_ca(self.id))
zippy.close()
with open(zf, 'rb') as f:
buffer = f.read()
shutil.rmtree(tmpdir)
return buffer
def generate_rc(self):
rc = open(FLAGS.credentials_template).read()
rc = rc % { 'access': self.access,
'secret': self.secret,
'ec2': FLAGS.ec2_url,
's3': 'http://%s:%s' % (FLAGS.s3_host, FLAGS.s3_port),
'nova': FLAGS.ca_file,
'cert': FLAGS.credential_cert_file,
'key': FLAGS.credential_key_file,
}
return rc
def generate_key_pair(self, name):
return self.manager.generate_key_pair(self.id, name)
def generate_x509_cert(self):
return self.manager.generate_x509_cert(self.id)
def create_key_pair(self, name, public_key, fingerprint):
return self.manager.create_key_pair(self.id,
name,
public_key,
fingerprint)
def get_key_pair(self, name):
return self.manager.get_key_pair(self.id, name)
def delete_key_pair(self, name):
return self.manager.delete_key_pair(self.id, name)
def get_key_pairs(self):
return self.manager.get_key_pairs(self.id)
class KeyPair(object):
def __init__(self, name, owner, public_key, fingerprint):
self.manager = UserManager.instance()
self.owner = owner
self.name = name
self.public_key = public_key
self.fingerprint = fingerprint
def delete(self):
return self.manager.delete_key_pair(self.owner, self.name)
class UserManager(object):
def __init__(self):
if hasattr(self.__class__, '_instance'):
raise Exception('Attempted to instantiate singleton')
@classmethod
def instance(cls):
if not hasattr(cls, '_instance'):
inst = UserManager()
cls._instance = inst
if FLAGS.fake_users:
try:
inst.create_user('fake', 'fake', 'fake')
except: pass
try:
inst.create_user('user', 'user', 'user')
except: pass
try:
inst.create_user('admin', 'admin', 'admin', True)
except: pass
return cls._instance
def authenticate(self, params, signature, verb='GET', server_string='127.0.0.1:8773', path='/'):
# TODO: Check for valid timestamp
access_key = params['AWSAccessKeyId']
user = self.get_user_from_access_key(access_key)
if user == None:
return None
# hmac can't handle unicode, so encode ensures that secret isn't unicode
expected_signature = signer.Signer(user.secret.encode()).generate(params, verb, server_string, path)
_log.debug('user.secret: %s', user.secret)
_log.debug('expected_signature: %s', expected_signature)
_log.debug('signature: %s', signature)
if signature == expected_signature:
return user
def has_role(self, user, role, project=None):
# Map role to ldap group
group = FLAGS.__getitem__("ldap_%s" % role)
with LDAPWrapper() as conn:
return conn.is_member_of(user, group)
def add_role(self, user, role, project=None):
# TODO: Project-specific roles
group = FLAGS.__getitem__("ldap_%s" % role)
with LDAPWrapper() as conn:
return conn.add_to_group(user, group)
def get_user(self, uid):
with LDAPWrapper() as conn:
return conn.find_user(uid)
def get_user_from_access_key(self, access_key):
with LDAPWrapper() as conn:
return conn.find_user_by_access_key(access_key)
def get_users(self):
with LDAPWrapper() as conn:
return conn.find_users()
def create_user(self, uid, access=None, secret=None, admin=False):
if access == None: access = str(uuid.uuid4())
if secret == None: secret = str(uuid.uuid4())
with LDAPWrapper() as conn:
u = conn.create_user(uid, access, secret, admin)
return u
def delete_user(self, uid):
with LDAPWrapper() as conn:
conn.delete_user(uid)
def generate_key_pair(self, uid, key_name):
# generating key pair is slow so delay generation
# until after check
with LDAPWrapper() as conn:
if not conn.user_exists(uid):
raise UserError("User " + uid + " doesn't exist")
if conn.key_pair_exists(uid, key_name):
raise InvalidKeyPair("The keypair '" +
key_name +
"' already exists.",
"Duplicate")
private_key, public_key, fingerprint = crypto.generate_key_pair()
self.create_key_pair(uid, key_name, public_key, fingerprint)
return private_key, fingerprint
def create_key_pair(self, uid, key_name, public_key, fingerprint):
with LDAPWrapper() as conn:
return conn.create_key_pair(uid, key_name, public_key, fingerprint)
def get_key_pair(self, uid, key_name):
with LDAPWrapper() as conn:
return conn.find_key_pair(uid, key_name)
def get_key_pairs(self, uid):
with LDAPWrapper() as conn:
return conn.find_key_pairs(uid)
def delete_key_pair(self, uid, key_name):
with LDAPWrapper() as conn:
conn.delete_key_pair(uid, key_name)
def get_signed_zip(self, uid):
user = self.get_user(uid)
return user.get_credentials()
def generate_x509_cert(self, uid):
(private_key, csr) = crypto.generate_x509_cert(self.__cert_subject(uid))
# TODO - This should be async call back to the cloud controller
signed_cert = crypto.sign_csr(csr, uid)
return (private_key, signed_cert)
def sign_cert(self, csr, uid):
return crypto.sign_csr(csr, uid)
def __cert_subject(self, uid):
return "/C=US/ST=California/L=The_Mission/O=AnsoLabs/OU=Nova/CN=%s-%s" % (uid, str(datetime.datetime.utcnow().isoformat()))
class LDAPWrapper(object):
def __init__(self):
self.user = FLAGS.user_dn
self.passwd = FLAGS.ldap_password
def __enter__(self):
self.connect()
return self
def __exit__(self, type, value, traceback):
#logging.info('type, value, traceback: %s, %s, %s', type, value, traceback)
self.conn.unbind_s()
return False
def connect(self):
""" connect to ldap as admin user """
if FLAGS.fake_users:
self.conn = fakeldap.initialize(FLAGS.ldap_url)
else:
assert(ldap.__name__ != 'fakeldap')
self.conn = ldap.initialize(FLAGS.ldap_url)
self.conn.simple_bind_s(self.user, self.passwd)
def find_object(self, dn, query = None):
objects = self.find_objects(dn, query)
if len(objects) == 0:
return None
return objects[0]
def find_objects(self, dn, query = None):
try:
res = self.conn.search_s(dn, ldap.SCOPE_SUBTREE, query)
except Exception:
return []
# just return the attributes
return [x[1] for x in res]
def find_users(self):
attrs = self.find_objects(FLAGS.ldap_subtree, '(objectclass=novaUser)')
return [self.__to_user(attr) for attr in attrs]
def find_key_pairs(self, uid):
dn = 'uid=%s,%s' % (uid, FLAGS.ldap_subtree)
attrs = self.find_objects(dn, '(objectclass=novaKeyPair)')
return [self.__to_key_pair(uid, attr) for attr in attrs]
def find_user(self, name):
dn = 'uid=%s,%s' % (name, FLAGS.ldap_subtree)
attr = self.find_object(dn, '(objectclass=novaUser)')
return self.__to_user(attr)
def user_exists(self, name):
return self.find_user(name) != None
def find_key_pair(self, uid, key_name):
dn = 'cn=%s,uid=%s,%s' % (key_name,
uid,
FLAGS.ldap_subtree)
attr = self.find_object(dn, '(objectclass=novaKeyPair)')
return self.__to_key_pair(uid, attr)
def delete_key_pairs(self, uid):
keys = self.find_key_pairs(uid)
if keys != None:
for key in keys:
self.delete_key_pair(uid, key.name)
def key_pair_exists(self, uid, key_name):
return self.find_key_pair(uid, key_name) != None
def create_user(self, name, access_key, secret_key, is_admin):
if self.user_exists(name):
raise UserError("LDAP user " + name + " already exists")
attr = [
('objectclass', ['person',
'organizationalPerson',
'inetOrgPerson',
'novaUser']),
('ou', [FLAGS.user_unit]),
('uid', [name]),
('sn', [name]),
('cn', [name]),
('secretKey', [secret_key]),
('accessKey', [access_key]),
('isAdmin', [str(is_admin).upper()]),
]
self.conn.add_s('uid=%s,%s' % (name, FLAGS.ldap_subtree),
attr)
return self.__to_user(dict(attr))
def create_project(self, name, project_manager):
# PM can be user object or string containing DN
pass
def is_member_of(self, name, group):
return True
def add_to_group(self, name, group):
pass
def remove_from_group(self, name, group):
pass
def create_key_pair(self, uid, key_name, public_key, fingerprint):
"""create's a public key in the directory underneath the user"""
# TODO(vish): possibly refactor this to store keys in their own ou
# and put dn reference in the user object
attr = [
('objectclass', ['novaKeyPair']),
('cn', [key_name]),
('sshPublicKey', [public_key]),
('keyFingerprint', [fingerprint]),
]
self.conn.add_s('cn=%s,uid=%s,%s' % (key_name,
uid,
FLAGS.ldap_subtree),
attr)
return self.__to_key_pair(uid, dict(attr))
def find_user_by_access_key(self, access):
query = '(' + 'accessKey' + '=' + access + ')'
dn = FLAGS.ldap_subtree
return self.__to_user(self.find_object(dn, query))
def delete_key_pair(self, uid, key_name):
if not self.key_pair_exists(uid, key_name):
raise UserError("Key Pair " +
key_name +
" doesn't exist for user " +
uid)
self.conn.delete_s('cn=%s,uid=%s,%s' % (key_name, uid,
FLAGS.ldap_subtree))
def delete_user(self, name):
if not self.user_exists(name):
raise UserError("User " +
name +
" doesn't exist")
self.delete_key_pairs(name)
self.conn.delete_s('uid=%s,%s' % (name,
FLAGS.ldap_subtree))
def __to_user(self, attr):
if attr == None:
return None
return User(
id = attr['uid'][0],
name = attr['uid'][0],
access = attr['accessKey'][0],
secret = attr['secretKey'][0],
admin = (attr['isAdmin'][0] == 'TRUE')
)
def __to_key_pair(self, owner, attr):
if attr == None:
return None
return KeyPair(
owner = owner,
name = attr['cn'][0],
public_key = attr['sshPublicKey'][0],
fingerprint = attr['keyFingerprint'][0],
)

367
nova/datastore.py Normal file
View File

@ -0,0 +1,367 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Datastore:
Providers the Keeper class, a simple pseudo-dictionary that
persists on disk.
MAKE Sure that ReDIS is running, and your flags are set properly,
before trying to run this.
"""
import json
import logging
import os
import sqlite3
from nova import vendor
import redis
from nova import flags
from nova import utils
FLAGS = flags.FLAGS
flags.DEFINE_string('datastore_path', utils.abspath('../keeper'),
'where keys are stored on disk')
flags.DEFINE_string('redis_host', '127.0.0.1',
'Host that redis is running on.')
flags.DEFINE_integer('redis_port', 6379,
'Port that redis is running on.')
flags.DEFINE_integer('redis_db', 0, 'Multiple DB keeps tests away')
flags.DEFINE_string('keeper_backend', 'redis',
'which backend to use for keeper')
class Redis(object):
def __init__(self):
if hasattr(self.__class__, '_instance'):
raise Exception('Attempted to instantiate singleton')
@classmethod
def instance(cls):
if not hasattr(cls, '_instance'):
inst = redis.Redis(host=FLAGS.redis_host, port=FLAGS.redis_port, db=FLAGS.redis_db)
cls._instance = inst
return cls._instance
class RedisModel(object):
""" Wrapper around redis-backed properties """
object_type = 'generic'
def __init__(self, object_id):
""" loads an object from the datastore if exists """
self.object_id = object_id
self.initial_state = {}
self.state = Redis.instance().hgetall(self.__redis_key)
if self.state:
self.initial_state = self.state
else:
self.set_default_state()
def set_default_state(self):
self.state = {'state' : 'pending'}
self.state[self.object_type+"_id"] = self.object_id
@property
def __redis_key(self):
""" Magic string for instance keys """
return '%s:%s' % (self.object_type, self.object_id)
def __repr__(self):
return "<%s:%s>" % (self.object_type, self.object_id)
def __str__(self):
return str(self.state)
def keys(self):
return self.state.keys()
def copy(self):
copyDict = {}
for item in self.keys():
copyDict[item] = self[item]
return copyDict
def get(self, item, default):
return self.state.get(item, default)
def __getitem__(self, item):
return self.state[item]
def __setitem__(self, item, val):
self.state[item] = val
return self.state[item]
def __delitem__(self, item):
""" We don't support this """
raise Exception("Silly monkey, we NEED all our properties.")
def save(self):
""" update the directory with the state from this instance """
# TODO(ja): implement hmset in redis-py and use it
# instead of multiple calls to hset
for key, val in self.state.iteritems():
# if (not self.initial_state.has_key(key)
# or self.initial_state[key] != val):
Redis.instance().hset(self.__redis_key, key, val)
if self.initial_state == {}:
self.first_save()
self.initial_state = self.state
return True
def first_save(self):
pass
def destroy(self):
""" deletes all related records from datastore.
does NOT do anything to running state.
"""
Redis.instance().delete(self.__redis_key)
return True
def slugify(key, prefix=None):
"""
Key has to be a valid filename. Slugify solves that.
"""
return "%s%s" % (prefix, key)
class SqliteKeeper(object):
""" Keeper implementation in SQLite, mostly for in-memory testing """
_conn = {} # class variable
def __init__(self, prefix):
self.prefix = prefix
@property
def conn(self):
if self.prefix not in self.__class__._conn:
logging.debug('no sqlite connection (%s), making new', self.prefix)
if FLAGS.datastore_path != ':memory:':
try:
os.mkdir(FLAGS.datastore_path)
except Exception:
pass
conn = sqlite3.connect(os.path.join(
FLAGS.datastore_path, '%s.sqlite' % self.prefix))
else:
conn = sqlite3.connect(':memory:')
c = conn.cursor()
try:
c.execute('''CREATE TABLE data (item text, value text)''')
conn.commit()
except Exception:
logging.exception('create table failed')
finally:
c.close()
self.__class__._conn[self.prefix] = conn
return self.__class__._conn[self.prefix]
def __delitem__(self, item):
#logging.debug('sqlite deleting %s', item)
c = self.conn.cursor()
try:
c.execute('DELETE FROM data WHERE item = ?', (item, ))
self.conn.commit()
except Exception:
logging.exception('delete failed: %s', item)
finally:
c.close()
def __getitem__(self, item):
#logging.debug('sqlite getting %s', item)
result = None
c = self.conn.cursor()
try:
c.execute('SELECT value FROM data WHERE item = ?', (item, ))
row = c.fetchone()
if row:
result = json.loads(row[0])
else:
result = None
except Exception:
logging.exception('select failed: %s', item)
finally:
c.close()
#logging.debug('sqlite got %s: %s', item, result)
return result
def __setitem__(self, item, value):
serialized_value = json.dumps(value)
insert = True
if self[item] is not None:
insert = False
#logging.debug('sqlite insert %s: %s', item, value)
c = self.conn.cursor()
try:
if insert:
c.execute('INSERT INTO data VALUES (?, ?)',
(item, serialized_value))
else:
c.execute('UPDATE data SET item=?, value=? WHERE item = ?',
(item, serialized_value, item))
self.conn.commit()
except Exception:
logging.exception('select failed: %s', item)
finally:
c.close()
def clear(self):
if self.prefix not in self.__class__._conn:
return
self.conn.close()
if FLAGS.datastore_path != ':memory:':
os.unlink(os.path.join(FLAGS.datastore_path, '%s.sqlite' % self.prefix))
del self.__class__._conn[self.prefix]
def clear_all(self):
for k, conn in self.__class__._conn.iteritems():
conn.close()
if FLAGS.datastore_path != ':memory:':
os.unlink(os.path.join(FLAGS.datastore_path,
'%s.sqlite' % self.prefix))
self.__class__._conn = {}
def set_add(self, item, value):
group = self[item]
if not group:
group = []
group.append(value)
self[item] = group
def set_is_member(self, item, value):
group = self[item]
if not group:
return False
return value in group
def set_remove(self, item, value):
group = self[item]
if not group:
group = []
group.remove(value)
self[item] = group
def set_fetch(self, item):
# TODO(termie): I don't really know what set_fetch is supposed to do
group = self[item]
if not group:
group = []
return iter(group)
class JsonKeeper(object):
"""
Simple dictionary class that persists using
JSON in files saved to disk.
"""
def __init__(self, prefix):
self.prefix = prefix
def __delitem__(self, item):
"""
Removing a key means deleting a file from disk.
"""
item = slugify(item, self.prefix)
path = "%s/%s" % (FLAGS.datastore_path, item)
if os.path.isfile(path):
os.remove(path)
def __getitem__(self, item):
"""
Fetch file contents and dejsonify them.
"""
item = slugify(item, self.prefix)
path = "%s/%s" % (FLAGS.datastore_path, item)
if os.path.isfile(path):
return json.load(open(path, 'r'))
return None
def __setitem__(self, item, value):
"""
JSON encode value and save to file.
"""
item = slugify(item, self.prefix)
path = "%s/%s" % (FLAGS.datastore_path, item)
with open(path, "w") as blobfile:
blobfile.write(json.dumps(value))
return value
class RedisKeeper(object):
"""
Simple dictionary class that persists using
ReDIS.
"""
def __init__(self, prefix="redis-"):
self.prefix = prefix
Redis.instance().ping()
def __setitem__(self, item, value):
"""
JSON encode value and save to file.
"""
item = slugify(item, self.prefix)
Redis.instance().set(item, json.dumps(value))
return value
def __getitem__(self, item):
item = slugify(item, self.prefix)
value = Redis.instance().get(item)
if value:
return json.loads(value)
def __delitem__(self, item):
item = slugify(item, self.prefix)
return Redis.instance().delete(item)
def clear(self):
raise NotImplementedError()
def clear_all(self):
raise NotImplementedError()
def set_add(self, item, value):
item = slugify(item, self.prefix)
return Redis.instance().sadd(item, json.dumps(value))
def set_is_member(self, item, value):
item = slugify(item, self.prefix)
return Redis.instance().sismember(item, json.dumps(value))
def set_remove(self, item, value):
item = slugify(item, self.prefix)
return Redis.instance().srem(item, json.dumps(value))
def set_fetch(self, item):
item = slugify(item, self.prefix)
for obj in Redis.instance().sinter([item]):
yield json.loads(obj)
def Keeper(prefix=''):
KEEPERS = {'redis': RedisKeeper,
'sqlite': SqliteKeeper}
return KEEPERS[FLAGS.keeper_backend](prefix)

28
nova/endpoint/__init__.py Normal file
View File

@ -0,0 +1,28 @@
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`nova.endpoint` -- Main NOVA Api endpoints
=====================================================
.. automodule:: nova.endpoint
:platform: Unix
:synopsis: REST APIs for all nova functions
.. moduleauthor:: Jesse Andrews <jesse@ansolabs.com>
.. moduleauthor:: Devin Carlen <devin.carlen@gmail.com>
.. moduleauthor:: Vishvananda Ishaya <vishvananda@yahoo.com>
.. moduleauthor:: Joshua McKenty <joshua@cognition.ca>
.. moduleauthor:: Manish Singh <yosh@gimp.org>
.. moduleauthor:: Andy Smith <andy@anarkystic.com>
"""

131
nova/endpoint/admin.py Normal file
View File

@ -0,0 +1,131 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Admin API controller, exposed through http via the api worker.
"""
import base64
def user_dict(user, base64_file=None):
"""Convert the user object to a result dict"""
if user:
return {
'username': user.id,
'accesskey': user.access,
'secretkey': user.secret,
'file': base64_file,
}
else:
return {}
def node_dict(node):
"""Convert a node object to a result dict"""
if node:
return {
'node_id': node.id,
'workers': ", ".join(node.workers),
'disks': ", ".join(node.disks),
'ram': node.memory,
'load_average' : node.load_average,
}
else:
return {}
def admin_only(target):
"""Decorator for admin-only API calls"""
def wrapper(*args, **kwargs):
"""Internal wrapper method for admin-only API calls"""
context = args[1]
if context.user.is_admin():
return target(*args, **kwargs)
else:
return {}
return wrapper
class AdminController(object):
"""
API Controller for users, node status, and worker mgmt.
Trivial admin_only wrapper will be replaced with RBAC,
allowing project managers to administer project users.
"""
def __init__(self, user_manager, node_manager=None):
self.user_manager = user_manager
self.node_manager = node_manager
def __str__(self):
return 'AdminController'
@admin_only
def describe_user(self, _context, name, **_kwargs):
"""Returns user data, including access and secret keys.
"""
return user_dict(self.user_manager.get_user(name))
@admin_only
def describe_users(self, _context, **_kwargs):
"""Returns all users - should be changed to deal with a list.
"""
return {'userSet':
[user_dict(u) for u in self.user_manager.get_users()] }
@admin_only
def register_user(self, _context, name, **_kwargs):
""" Creates a new user, and returns generated credentials.
"""
self.user_manager.create_user(name)
return user_dict(self.user_manager.get_user(name))
@admin_only
def deregister_user(self, _context, name, **_kwargs):
"""Deletes a single user (NOT undoable.)
Should throw an exception if the user has instances,
volumes, or buckets remaining.
"""
self.user_manager.delete_user(name)
return True
@admin_only
def generate_x509_for_user(self, _context, name, **_kwargs):
"""Generates and returns an x509 certificate for a single user.
Is usually called from a client that will wrap this with
access and secret key info, and return a zip file.
"""
user = self.user_manager.get_user(name)
return user_dict(user, base64.b64encode(user.get_credentials()))
@admin_only
def describe_nodes(self, _context, **_kwargs):
"""Returns status info for all nodes. Includes:
* Disk Space
* Instance List
* RAM used
* CPU used
* DHCP servers running
* Iptables / bridges
"""
return {'nodeSet':
[node_dict(n) for n in self.node_manager.get_nodes()] }
@admin_only
def describe_node(self, _context, name, **_kwargs):
"""Returns status info for single node.
"""
return node_dict(self.node_manager.get_node(name))

337
nova/endpoint/api.py Executable file
View File

@ -0,0 +1,337 @@
#!/usr/bin/python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tornado REST API Request Handlers for Nova functions
Most calls are proxied into the responsible controller.
"""
import logging
import multiprocessing
import random
import re
import urllib
# TODO(termie): replace minidom with etree
from xml.dom import minidom
from nova import vendor
import tornado.web
from twisted.internet import defer
from nova import crypto
from nova import exception
from nova import flags
from nova import utils
from nova.endpoint import cloud
from nova.auth import users
FLAGS = flags.FLAGS
flags.DEFINE_integer('cc_port', 8773, 'cloud controller port')
_log = logging.getLogger("api")
_log.setLevel(logging.DEBUG)
_c2u = re.compile('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))')
def _camelcase_to_underscore(str):
return _c2u.sub(r'_\1', str).lower().strip('_')
def _underscore_to_camelcase(str):
return ''.join([x[:1].upper() + x[1:] for x in str.split('_')])
def _underscore_to_xmlcase(str):
res = _underscore_to_camelcase(str)
return res[:1].lower() + res[1:]
class APIRequestContext(object):
def __init__(self, handler, user):
self.handler = handler
self.user = user
self.request_id = ''.join(
[random.choice('ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890-')
for x in xrange(20)]
)
class APIRequest(object):
def __init__(self, handler, controller, action):
self.handler = handler
self.controller = controller
self.action = action
def send(self, user, **kwargs):
context = APIRequestContext(self.handler, user)
try:
method = getattr(self.controller,
_camelcase_to_underscore(self.action))
except AttributeError:
_error = ('Unsupported API request: controller = %s,'
'action = %s') % (self.controller, self.action)
_log.warning(_error)
# TODO: Raise custom exception, trap in apiserver,
# and reraise as 400 error.
raise Exception(_error)
args = {}
for key, value in kwargs.items():
parts = key.split(".")
key = _camelcase_to_underscore(parts[0])
if len(parts) > 1:
d = args.get(key, {})
d[parts[1]] = value[0]
value = d
else:
value = value[0]
args[key] = value
for key in args.keys():
if isinstance(args[key], dict):
if args[key] != {} and args[key].keys()[0].isdigit():
s = args[key].items()
s.sort()
args[key] = [v for k, v in s]
d = defer.maybeDeferred(method, context, **args)
d.addCallback(self._render_response, context.request_id)
return d
def _render_response(self, response_data, request_id):
xml = minidom.Document()
response_el = xml.createElement(self.action + 'Response')
response_el.setAttribute('xmlns',
'http://ec2.amazonaws.com/doc/2009-11-30/')
request_id_el = xml.createElement('requestId')
request_id_el.appendChild(xml.createTextNode(request_id))
response_el.appendChild(request_id_el)
if(response_data == True):
self._render_dict(xml, response_el, {'return': 'true'})
else:
self._render_dict(xml, response_el, response_data)
xml.appendChild(response_el)
response = xml.toxml()
xml.unlink()
_log.debug(response)
return response
def _render_dict(self, xml, el, data):
try:
for key in data.keys():
val = data[key]
el.appendChild(self._render_data(xml, key, val))
except:
_log.debug(data)
raise
def _render_data(self, xml, el_name, data):
el_name = _underscore_to_xmlcase(el_name)
data_el = xml.createElement(el_name)
if isinstance(data, list):
for item in data:
data_el.appendChild(self._render_data(xml, 'item', item))
elif isinstance(data, dict):
self._render_dict(xml, data_el, data)
elif hasattr(data, '__dict__'):
self._render_dict(xml, data_el, data.__dict__)
elif isinstance(data, bool):
data_el.appendChild(xml.createTextNode(str(data).lower()))
elif data != None:
data_el.appendChild(xml.createTextNode(str(data)))
return data_el
class RootRequestHandler(tornado.web.RequestHandler):
def get(self):
# available api versions
versions = [
'1.0',
'2007-01-19',
'2007-03-01',
'2007-08-29',
'2007-10-10',
'2007-12-15',
'2008-02-01',
'2008-09-01',
'2009-04-04',
]
for version in versions:
self.write('%s\n' % version)
self.finish()
class MetadataRequestHandler(tornado.web.RequestHandler):
def print_data(self, data):
if isinstance(data, dict):
output = ''
for key in data:
if key == '_name':
continue
output += key
if isinstance(data[key], dict):
if '_name' in data[key]:
output += '=' + str(data[key]['_name'])
else:
output += '/'
output += '\n'
self.write(output[:-1]) # cut off last \n
elif isinstance(data, list):
self.write('\n'.join(data))
else:
self.write(str(data))
def lookup(self, path, data):
items = path.split('/')
for item in items:
if item:
if not isinstance(data, dict):
return data
if not item in data:
return None
data = data[item]
return data
def get(self, path):
cc = self.application.controllers['Cloud']
meta_data = cc.get_metadata(self.request.remote_ip)
if meta_data is None:
_log.error('Failed to get metadata for ip: %s' %
self.request.remote_ip)
raise tornado.web.HTTPError(404)
data = self.lookup(path, meta_data)
if data is None:
raise tornado.web.HTTPError(404)
self.print_data(data)
self.finish()
class APIRequestHandler(tornado.web.RequestHandler):
def get(self, controller_name):
self.execute(controller_name)
@tornado.web.asynchronous
def execute(self, controller_name):
# Obtain the appropriate controller for this request.
try:
controller = self.application.controllers[controller_name]
except KeyError:
self._error('unhandled', 'no controller named %s' % controller_name)
return
args = self.request.arguments
# Read request signature.
try:
signature = args.pop('Signature')[0]
except:
raise tornado.web.HTTPError(400)
# Make a copy of args for authentication and signature verification.
auth_params = {}
for key, value in args.items():
auth_params[key] = value[0]
# Get requested action and remove authentication args for final request.
try:
action = args.pop('Action')[0]
args.pop('AWSAccessKeyId')
args.pop('SignatureMethod')
args.pop('SignatureVersion')
args.pop('Version')
args.pop('Timestamp')
except:
raise tornado.web.HTTPError(400)
# Authenticate the request.
user = self.application.user_manager.authenticate(
auth_params,
signature,
self.request.method,
self.request.host,
self.request.path
)
if not user:
raise tornado.web.HTTPError(403)
_log.debug('action: %s' % action)
for key, value in args.items():
_log.debug('arg: %s\t\tval: %s' % (key, value))
request = APIRequest(self, controller, action)
d = request.send(user, **args)
# d.addCallback(utils.debug)
# TODO: Wrap response in AWS XML format
d.addCallbacks(self._write_callback, self._error_callback)
def _write_callback(self, data):
self.set_header('Content-Type', 'text/xml')
self.write(data)
self.finish()
def _error_callback(self, failure):
try:
failure.raiseException()
except exception.ApiError as ex:
self._error(type(ex).__name__ + "." + ex.code, ex.message)
# TODO(vish): do something more useful with unknown exceptions
except Exception as ex:
self._error(type(ex).__name__, str(ex))
raise
def post(self, controller_name):
self.execute(controller_name)
def _error(self, code, message):
self._status_code = 400
self.set_header('Content-Type', 'text/xml')
self.write('<?xml version="1.0"?>\n')
self.write('<Response><Errors><Error><Code>%s</Code>'
'<Message>%s</Message></Error></Errors>'
'<RequestID>?</RequestID></Response>' % (code, message))
self.finish()
class APIServerApplication(tornado.web.Application):
def __init__(self, user_manager, controllers):
tornado.web.Application.__init__(self, [
(r'/', RootRequestHandler),
(r'/services/([A-Za-z0-9]+)/', APIRequestHandler),
(r'/latest/([-A-Za-z0-9/]*)', MetadataRequestHandler),
(r'/2009-04-04/([-A-Za-z0-9/]*)', MetadataRequestHandler),
(r'/2008-09-01/([-A-Za-z0-9/]*)', MetadataRequestHandler),
(r'/2008-02-01/([-A-Za-z0-9/]*)', MetadataRequestHandler),
(r'/2007-12-15/([-A-Za-z0-9/]*)', MetadataRequestHandler),
(r'/2007-10-10/([-A-Za-z0-9/]*)', MetadataRequestHandler),
(r'/2007-08-29/([-A-Za-z0-9/]*)', MetadataRequestHandler),
(r'/2007-03-01/([-A-Za-z0-9/]*)', MetadataRequestHandler),
(r'/2007-01-19/([-A-Za-z0-9/]*)', MetadataRequestHandler),
(r'/1.0/([-A-Za-z0-9/]*)', MetadataRequestHandler),
], pool=multiprocessing.Pool(4))
self.user_manager = user_manager
self.controllers = controllers

572
nova/endpoint/cloud.py Normal file
View File

@ -0,0 +1,572 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Cloud Controller: Implementation of EC2 REST API calls, which are
dispatched to other nodes via AMQP RPC. State is via distributed
datastore.
"""
import json
import logging
import os
import time
from nova import vendor
from twisted.internet import defer
from nova import datastore
from nova import flags
from nova import rpc
from nova import utils
from nova import exception
from nova.auth import users
from nova.compute import model
from nova.compute import network
from nova.endpoint import images
from nova.volume import storage
FLAGS = flags.FLAGS
flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on')
def _gen_key(user_id, key_name):
""" Tuck this into UserManager """
try:
manager = users.UserManager.instance()
private_key, fingerprint = manager.generate_key_pair(user_id, key_name)
except Exception as ex:
return {'exception': ex}
return {'private_key': private_key, 'fingerprint': fingerprint}
class CloudController(object):
""" CloudController provides the critical dispatch between
inbound API calls through the endpoint and messages
sent to the other nodes.
"""
def __init__(self):
self._instances = datastore.Keeper(FLAGS.instances_prefix)
self.instdir = model.InstanceDirectory()
self.network = network.NetworkController()
self.setup()
@property
def instances(self):
""" All instances in the system, as dicts """
for instance in self.instdir.all:
yield {instance['instance_id']: instance}
@property
def volumes(self):
""" returns a list of all volumes """
for volume_id in datastore.Redis.instance().smembers("volumes"):
volume = storage.Volume(volume_id=volume_id)
yield volume
def __str__(self):
return 'CloudController'
def setup(self):
""" Ensure the keychains and folders exist. """
# Create keys folder, if it doesn't exist
if not os.path.exists(FLAGS.keys_path):
os.makedirs(os.path.abspath(FLAGS.keys_path))
# Gen root CA, if we don't have one
root_ca_path = os.path.join(FLAGS.ca_path, FLAGS.ca_file)
if not os.path.exists(root_ca_path):
start = os.getcwd()
os.chdir(FLAGS.ca_path)
utils.runthis("Generating root CA: %s", "sh genrootca.sh")
os.chdir(start)
# TODO: Do this with M2Crypto instead
def get_instance_by_ip(self, ip):
return self.instdir.by_ip(ip)
def get_metadata(self, ip):
i = self.instdir.by_ip(ip)
if i is None:
return None
if i['key_name']:
keys = {
'0': {
'_name': i['key_name'],
'openssh-key': i['key_data']
}
}
else:
keys = ''
data = {
'user-data': base64.b64decode(i['user_data']),
'meta-data': {
'ami-id': i['image_id'],
'ami-launch-index': i['ami_launch_index'],
'ami-manifest-path': 'FIXME', # image property
'block-device-mapping': { # TODO: replace with real data
'ami': 'sda1',
'ephemeral0': 'sda2',
'root': '/dev/sda1',
'swap': 'sda3'
},
'hostname': i['private_dns_name'], # is this public sometimes?
'instance-action': 'none',
'instance-id': i['instance_id'],
'instance-type': i.get('instance_type', ''),
'local-hostname': i['private_dns_name'],
'local-ipv4': i['private_dns_name'], # TODO: switch to IP
'kernel-id': i.get('kernel_id', ''),
'placement': {
'availaibility-zone': i.get('availability_zone', 'nova'),
},
'public-hostname': i.get('dns_name', ''),
'public-ipv4': i.get('dns_name', ''), # TODO: switch to IP
'public-keys' : keys,
'ramdisk-id': i.get('ramdisk_id', ''),
'reservation-id': i['reservation_id'],
'security-groups': i.get('groups', '')
}
}
if False: # TODO: store ancestor ids
data['ancestor-ami-ids'] = []
if i.get('product_codes', None):
data['product-codes'] = i['product_codes']
return data
def describe_availability_zones(self, context, **kwargs):
return {'availabilityZoneInfo': [{'zoneName': 'nova',
'zoneState': 'available'}]}
def describe_key_pairs(self, context, key_name=None, **kwargs):
key_pairs = []
key_names = key_name and key_name or []
if len(key_names) > 0:
for key_name in key_names:
key_pair = context.user.get_key_pair(key_name)
if key_pair != None:
key_pairs.append({
'keyName': key_pair.name,
'keyFingerprint': key_pair.fingerprint,
})
else:
for key_pair in context.user.get_key_pairs():
key_pairs.append({
'keyName': key_pair.name,
'keyFingerprint': key_pair.fingerprint,
})
return { 'keypairsSet': key_pairs }
def create_key_pair(self, context, key_name, **kwargs):
try:
d = defer.Deferred()
p = context.handler.application.settings.get('pool')
def _complete(kwargs):
if 'exception' in kwargs:
d.errback(kwargs['exception'])
return
d.callback({'keyName': key_name,
'keyFingerprint': kwargs['fingerprint'],
'keyMaterial': kwargs['private_key']})
p.apply_async(_gen_key, [context.user.id, key_name],
callback=_complete)
return d
except users.UserError, e:
raise
def delete_key_pair(self, context, key_name, **kwargs):
context.user.delete_key_pair(key_name)
# aws returns true even if the key doens't exist
return True
def describe_security_groups(self, context, group_names, **kwargs):
groups = { 'securityGroupSet': [] }
# Stubbed for now to unblock other things.
return groups
def create_security_group(self, context, group_name, **kwargs):
return True
def delete_security_group(self, context, group_name, **kwargs):
return True
def get_console_output(self, context, instance_id, **kwargs):
# instance_id is passed in as a list of instances
instance = self.instdir.get(instance_id[0])
if instance['state'] == 'pending':
raise exception.ApiError('Cannot get output for pending instance')
if not context.user.is_authorized(instance.get('owner_id', None)):
raise exception.ApiError('Not authorized to view output')
return rpc.call('%s.%s' % (FLAGS.compute_topic, instance['node_name']),
{"method": "get_console_output",
"args" : {"instance_id": instance_id[0]}})
def _get_user_id(self, context):
if context and context.user:
return context.user.id
else:
return None
def describe_volumes(self, context, **kwargs):
volumes = []
for volume in self.volumes:
if context.user.is_authorized(volume.get('user_id', None)):
v = self.format_volume(context, volume)
volumes.append(v)
return defer.succeed({'volumeSet': volumes})
def format_volume(self, context, volume):
v = {}
v['volumeId'] = volume['volume_id']
v['status'] = volume['status']
v['size'] = volume['size']
v['availabilityZone'] = volume['availability_zone']
v['createTime'] = volume['create_time']
if context.user.is_admin():
v['status'] = '%s (%s, %s, %s, %s)' % (
volume.get('status', None),
volume.get('user_id', None),
volume.get('node_name', None),
volume.get('instance_id', ''),
volume.get('mountpoint', ''))
return v
def create_volume(self, context, size, **kwargs):
# TODO(vish): refactor this to create the volume object here and tell storage to create it
res = rpc.call(FLAGS.storage_topic, {"method": "create_volume",
"args" : {"size": size,
"user_id": context.user.id}})
def _format_result(result):
volume = self._get_volume(result['result'])
return {'volumeSet': [self.format_volume(context, volume)]}
res.addCallback(_format_result)
return res
def _get_by_id(self, nodes, id):
if nodes == {}:
raise exception.NotFound("%s not found" % id)
for node_name, node in nodes.iteritems():
if node.has_key(id):
return node_name, node[id]
raise exception.NotFound("%s not found" % id)
def _get_volume(self, volume_id):
for volume in self.volumes:
if volume['volume_id'] == volume_id:
return volume
def attach_volume(self, context, volume_id, instance_id, device, **kwargs):
volume = self._get_volume(volume_id)
storage_node = volume['node_name']
# TODO: (joshua) Fix volumes to store creator id
if not context.user.is_authorized(volume.get('user_id', None)):
raise exception.ApiError("%s not authorized for %s" %
(context.user.id, volume_id))
instance = self.instdir.get(instance_id)
compute_node = instance['node_name']
if not context.user.is_authorized(instance.get('owner_id', None)):
raise exception.ApiError(message="%s not authorized for %s" %
(context.user.id, instance_id))
aoe_device = volume['aoe_device']
# Needs to get right node controller for attaching to
# TODO: Maybe have another exchange that goes to everyone?
rpc.cast('%s.%s' % (FLAGS.compute_topic, compute_node),
{"method": "attach_volume",
"args" : {"aoe_device": aoe_device,
"instance_id" : instance_id,
"mountpoint" : device}})
rpc.cast('%s.%s' % (FLAGS.storage_topic, storage_node),
{"method": "attach_volume",
"args" : {"volume_id": volume_id,
"instance_id" : instance_id,
"mountpoint" : device}})
return defer.succeed(True)
def detach_volume(self, context, volume_id, **kwargs):
# TODO(joshua): Make sure the updated state has been received first
volume = self._get_volume(volume_id)
storage_node = volume['node_name']
if not context.user.is_authorized(volume.get('user_id', None)):
raise exception.ApiError("%s not authorized for %s" %
(context.user.id, volume_id))
if 'instance_id' in volume.keys():
instance_id = volume['instance_id']
try:
instance = self.instdir.get(instance_id)
compute_node = instance['node_name']
mountpoint = volume['mountpoint']
if not context.user.is_authorized(
instance.get('owner_id', None)):
raise exception.ApiError(
"%s not authorized for %s" %
(context.user.id, instance_id))
rpc.cast('%s.%s' % (FLAGS.compute_topic, compute_node),
{"method": "detach_volume",
"args" : {"instance_id": instance_id,
"mountpoint": mountpoint}})
except exception.NotFound:
pass
rpc.cast('%s.%s' % (FLAGS.storage_topic, storage_node),
{"method": "detach_volume",
"args" : {"volume_id": volume_id}})
return defer.succeed(True)
def _convert_to_set(self, lst, str):
if lst == None or lst == []:
return None
return [{str: x} for x in lst]
def describe_instances(self, context, **kwargs):
return defer.succeed(self.format_instances(context.user))
def format_instances(self, user, reservation_id = None):
if self.instances == {}:
return {'reservationSet': []}
reservations = {}
for inst in self.instances:
instance = inst.values()[0]
res_id = instance.get('reservation_id', 'Unknown')
if (user.is_authorized(instance.get('owner_id', None))
and (reservation_id == None or reservation_id == res_id)):
i = {}
i['instance_id'] = instance.get('instance_id', None)
i['image_id'] = instance.get('image_id', None)
i['instance_state'] = {
'code': 42,
'name': instance.get('state', 'pending')
}
i['public_dns_name'] = self.network.get_public_ip_for_instance(
i['instance_id'])
i['private_dns_name'] = instance.get('private_dns_name', None)
if not i['public_dns_name']:
i['public_dns_name'] = i['private_dns_name']
i['dns_name'] = instance.get('dns_name', None)
i['key_name'] = instance.get('key_name', None)
if user.is_admin():
i['key_name'] = '%s (%s, %s)' % (i['key_name'],
instance.get('owner_id', None), instance.get('node_name',''))
i['product_codes_set'] = self._convert_to_set(
instance.get('product_codes', None), 'product_code')
i['instance_type'] = instance.get('instance_type', None)
i['launch_time'] = instance.get('launch_time', None)
i['ami_launch_index'] = instance.get('ami_launch_index',
None)
if not reservations.has_key(res_id):
r = {}
r['reservation_id'] = res_id
r['owner_id'] = instance.get('owner_id', None)
r['group_set'] = self._convert_to_set(
instance.get('groups', None), 'group_id')
r['instances_set'] = []
reservations[res_id] = r
reservations[res_id]['instances_set'].append(i)
instance_response = {'reservationSet' : list(reservations.values()) }
return instance_response
def describe_addresses(self, context, **kwargs):
return self.format_addresses(context.user)
def format_addresses(self, user):
addresses = []
# TODO(vish): move authorization checking into network.py
for address_record in self.network.describe_addresses(
type=network.PublicNetwork):
#logging.debug(address_record)
if user.is_authorized(address_record[u'user_id']):
address = {
'public_ip': address_record[u'address'],
'instance_id' : address_record.get(u'instance_id', 'free')
}
# FIXME: add another field for user id
if user.is_admin():
address['instance_id'] = "%s (%s)" % (
address['instance_id'],
address_record[u'user_id'],
)
addresses.append(address)
# logging.debug(addresses)
return {'addressesSet': addresses}
def allocate_address(self, context, **kwargs):
# TODO: Verify user is valid?
kwargs['owner_id'] = context.user.id
(address,network_name) = self.network.allocate_address(
context.user.id, type=network.PublicNetwork)
return defer.succeed({'addressSet': [{'publicIp' : address}]})
def release_address(self, context, **kwargs):
self.network.deallocate_address(kwargs.get('public_ip', None))
return defer.succeed({'releaseResponse': ["Address released."]})
def associate_address(self, context, instance_id, **kwargs):
instance = self.instdir.get(instance_id)
rv = self.network.associate_address(
kwargs['public_ip'],
instance['private_dns_name'],
instance_id)
return defer.succeed({'associateResponse': ["Address associated."]})
def disassociate_address(self, context, **kwargs):
rv = self.network.disassociate_address(kwargs['public_ip'])
# TODO - Strip the IP from the instance
return rv
def run_instances(self, context, **kwargs):
logging.debug("Going to run instances...")
reservation_id = utils.generate_uid('r')
launch_time = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
key_data = None
if kwargs.has_key('key_name'):
key_pair = context.user.get_key_pair(kwargs['key_name'])
if not key_pair:
raise exception.ApiError('Key Pair %s not found' %
kwargs['key_name'])
key_data = key_pair.public_key
for num in range(int(kwargs['max_count'])):
inst = self.instdir.new()
# TODO(ja): add ari, aki
inst['image_id'] = kwargs['image_id']
inst['user_data'] = kwargs.get('user_data', '')
inst['instance_type'] = kwargs.get('instance_type', '')
inst['reservation_id'] = reservation_id
inst['launch_time'] = launch_time
inst['key_data'] = key_data or ''
inst['key_name'] = kwargs.get('key_name', '')
inst['owner_id'] = context.user.id
inst['mac_address'] = utils.generate_mac()
inst['ami_launch_index'] = num
address, _netname = self.network.allocate_address(
inst['owner_id'], mac=inst['mac_address'])
network = self.network.get_users_network(str(context.user.id))
inst['network_str'] = json.dumps(network.to_dict())
inst['bridge_name'] = network.bridge_name
inst['private_dns_name'] = str(address)
# TODO: allocate expresses on the router node
inst.save()
rpc.cast(FLAGS.compute_topic,
{"method": "run_instance",
"args": {"instance_id" : inst.instance_id}})
logging.debug("Casting to node for %s's instance with IP of %s" %
(context.user.name, inst['private_dns_name']))
# TODO: Make the NetworkComputeNode figure out the network name from ip.
return defer.succeed(self.format_instances(
context.user, reservation_id))
def terminate_instances(self, context, instance_id, **kwargs):
logging.debug("Going to start terminating instances")
# TODO: return error if not authorized
for i in instance_id:
logging.debug("Going to try and terminate %s" % i)
instance = self.instdir.get(i)
#if instance['state'] == 'pending':
# raise exception.ApiError('Cannot terminate pending instance')
if context.user.is_authorized(instance.get('owner_id', None)):
try:
self.network.disassociate_address(
instance.get('public_dns_name', 'bork'))
except:
pass
if instance.get('private_dns_name', None):
logging.debug("Deallocating address %s" % instance.get('private_dns_name', None))
try:
self.network.deallocate_address(instance.get('private_dns_name', None))
except Exception, _err:
pass
if instance.get('node_name', 'unassigned') != 'unassigned': #It's also internal default
rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']),
{"method": "terminate_instance",
"args" : {"instance_id": i}})
else:
instance.destroy()
return defer.succeed(True)
def reboot_instances(self, context, instance_id, **kwargs):
# TODO: return error if not authorized
for i in instance_id:
instance = self.instdir.get(i)
if instance['state'] == 'pending':
raise exception.ApiError('Cannot reboot pending instance')
if context.user.is_authorized(instance.get('owner_id', None)):
rpc.cast('%s.%s' % (FLAGS.node_topic, instance['node_name']),
{"method": "reboot_instance",
"args" : {"instance_id": i}})
return defer.succeed(True)
def delete_volume(self, context, volume_id, **kwargs):
# TODO: return error if not authorized
volume = self._get_volume(volume_id)
storage_node = volume['node_name']
if context.user.is_authorized(volume.get('user_id', None)):
rpc.cast('%s.%s' % (FLAGS.storage_topic, storage_node),
{"method": "delete_volume",
"args" : {"volume_id": volume_id}})
return defer.succeed(True)
def describe_images(self, context, image_id=None, **kwargs):
imageSet = images.list(context.user)
if not image_id is None:
imageSet = [i for i in imageSet if i['imageId'] in image_id]
return defer.succeed({'imagesSet': imageSet})
def deregister_image(self, context, image_id, **kwargs):
images.deregister(context.user, image_id)
return defer.succeed({'imageId': image_id})
def register_image(self, context, image_location=None, **kwargs):
if image_location is None and kwargs.has_key('name'):
image_location = kwargs['name']
image_id = images.register(context.user, image_location)
logging.debug("Registered %s as %s" % (image_location, image_id))
return defer.succeed({'imageId': image_id})
def modify_image_attribute(self, context, image_id,
attribute, operation_type, **kwargs):
if attribute != 'launchPermission':
raise exception.ApiError('only launchPermission is supported')
if len(kwargs['user_group']) != 1 and kwargs['user_group'][0] != 'all':
raise exception.ApiError('only group "all" is supported')
if not operation_type in ['add', 'delete']:
raise exception.ApiError('operation_type must be add or delete')
result = images.modify(context.user, image_id, operation_type)
return defer.succeed(result)
def update_state(self, topic, value):
""" accepts status reports from the queue and consolidates them """
# TODO(jmc): if an instance has disappeared from
# the node, call instance_death
if topic == "instances":
return defer.succeed(True)
aggregate_state = getattr(self, topic)
node_name = value.keys()[0]
items = value[node_name]
logging.debug("Updating %s state for %s" % (topic, node_name))
for item_id in items.keys():
if (aggregate_state.has_key('pending') and
aggregate_state['pending'].has_key(item_id)):
del aggregate_state['pending'][item_id]
aggregate_state[node_name] = items
return defer.succeed(True)

92
nova/endpoint/images.py Normal file
View File

@ -0,0 +1,92 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Proxy AMI-related calls from the cloud controller, to the running
objectstore daemon.
"""
import json
import random
import urllib
from nova import vendor
import boto
import boto.s3
from nova import flags
from nova import utils
FLAGS = flags.FLAGS
def modify(user, image_id, operation):
conn(user).make_request(
method='POST',
bucket='_images',
query_args=qs({'image_id': image_id, 'operation': operation}))
return True
def register(user, image_location):
""" rpc call to register a new image based from a manifest """
image_id = utils.generate_uid('ami')
conn(user).make_request(
method='PUT',
bucket='_images',
query_args=qs({'image_location': image_location,
'image_id': image_id}))
return image_id
def list(user, filter_list=[]):
""" return a list of all images that a user can see
optionally filtered by a list of image_id """
# FIXME: send along the list of only_images to check for
response = conn(user).make_request(
method='GET',
bucket='_images')
return json.loads(response.read())
def deregister(user, image_id):
""" unregister an image """
conn(user).make_request(
method='DELETE',
bucket='_images',
query_args=qs({'image_id': image_id}))
def conn(user):
return boto.s3.connection.S3Connection (
aws_access_key_id=user.access,
aws_secret_access_key=user.secret,
is_secure=False,
calling_format=boto.s3.connection.OrdinaryCallingFormat(),
port=FLAGS.s3_port,
host=FLAGS.s3_host)
def qs(params):
pairs = []
for key in params.keys():
pairs.append(key + '=' + urllib.quote(params[key]))
return '&'.join(pairs)

53
nova/exception.py Normal file
View File

@ -0,0 +1,53 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Nova base exception handling, including decorator for re-raising
Nova-type exceptions. SHOULD include dedicated exception logging.
"""
import logging
import traceback
import sys
class Error(Exception):
pass
class ApiError(Error):
def __init__(self, message='Unknown', code='Unknown'):
self.message = message
self.code = code
class NotFound(Error):
pass
class NotAuthorized(Error):
pass
def wrap_exception(f):
def _wrap(*args, **kw):
try:
return f(*args, **kw)
except Exception, e:
if not isinstance(e, Error):
# exc_type, exc_value, exc_traceback = sys.exc_info()
logging.exception('Uncaught exception')
# logging.debug(traceback.extract_stack(exc_traceback))
raise Error(str(e))
raise
_wrap.func_name = f.func_name
return _wrap

131
nova/fakerabbit.py Normal file
View File

@ -0,0 +1,131 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Based a bit on the carrot.backeds.queue backend... but a lot better """
import logging
import Queue as queue
from carrot.backends import base
class Message(base.BaseMessage):
pass
class Exchange(object):
def __init__(self, name, exchange_type):
self.name = name
self.exchange_type = exchange_type
self._queue = queue.Queue()
self._routes = {}
def publish(self, message, routing_key=None):
logging.debug('(%s) publish (key: %s) %s',
self.name, routing_key, message)
if routing_key in self._routes:
for f in self._routes[routing_key]:
logging.debug('Publishing to route %s', f)
f(message, routing_key=routing_key)
def bind(self, callback, routing_key):
self._routes.setdefault(routing_key, [])
self._routes[routing_key].append(callback)
class Queue(object):
def __init__(self, name):
self.name = name
self._queue = queue.Queue()
def __repr__(self):
return '<Queue: %s>' % self.name
def push(self, message, routing_key=None):
self._queue.put(message)
def size(self):
return self._queue.qsize()
def pop(self):
return self._queue.get()
class Backend(object):
""" Singleton backend for testing """
class __impl(base.BaseBackend):
def __init__(self, *args, **kwargs):
#super(__impl, self).__init__(*args, **kwargs)
self._exchanges = {}
self._queues = {}
def _reset_all(self):
self._exchanges = {}
self._queues = {}
def queue_declare(self, queue, **kwargs):
if queue not in self._queues:
logging.debug('Declaring queue %s', queue)
self._queues[queue] = Queue(queue)
def exchange_declare(self, exchange, type, *args, **kwargs):
if exchange not in self._exchanges:
logging.debug('Declaring exchange %s', exchange)
self._exchanges[exchange] = Exchange(exchange, type)
def queue_bind(self, queue, exchange, routing_key, **kwargs):
logging.debug('Binding %s to %s with key %s',
queue, exchange, routing_key)
self._exchanges[exchange].bind(self._queues[queue].push,
routing_key)
def get(self, queue, no_ack=False):
if not self._queues[queue].size():
return None
(message_data, content_type, content_encoding) = \
self._queues[queue].pop()
message = Message(backend=self, body=message_data,
content_type=content_type,
content_encoding=content_encoding)
logging.debug('Getting from %s: %s', queue, message)
return message
def prepare_message(self, message_data, delivery_mode,
content_type, content_encoding, **kwargs):
"""Prepare message for sending."""
return (message_data, content_type, content_encoding)
def publish(self, message, exchange, routing_key, **kwargs):
if exchange in self._exchanges:
self._exchanges[exchange].publish(
message, routing_key=routing_key)
__instance = None
def __init__(self, *args, **kwargs):
if Backend.__instance is None:
Backend.__instance = Backend.__impl(*args, **kwargs)
self.__dict__['_Backend__instance'] = Backend.__instance
def __getattr__(self, attr):
return getattr(self.__instance, attr)
def __setattr__(self, attr, value):
return setattr(self.__instance, attr, value)
def reset_all():
Backend()._reset_all()

109
nova/fakevirt.py Normal file
View File

@ -0,0 +1,109 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A fake (in-memory) hypervisor+api. Allows nova testing w/o KVM and libvirt.
"""
import StringIO
from xml.etree import ElementTree
class FakeVirtConnection(object):
# FIXME: networkCreateXML, listNetworks don't do anything since
# they aren't exercised in tests yet
def __init__(self):
self.next_index = 0
self.instances = {}
@classmethod
def instance(cls):
if not hasattr(cls, '_instance'):
cls._instance = cls()
return cls._instance
def lookupByID(self, i):
return self.instances[str(i)]
def listDomainsID(self):
return self.instances.keys()
def listNetworks(self):
return []
def lookupByName(self, instance_id):
for x in self.instances.values():
if x.name() == instance_id:
return x
raise Exception('no instance found for instance_id: %s' % instance_id)
def networkCreateXML(self, xml):
pass
def createXML(self, xml, flags):
# parse the xml :(
xml_stringio = StringIO.StringIO(xml)
my_xml = ElementTree.parse(xml_stringio)
name = my_xml.find('name').text
fake_instance = FakeVirtInstance(conn=self,
index=str(self.next_index),
name=name,
xml=my_xml)
self.instances[str(self.next_index)] = fake_instance
self.next_index += 1
def _removeInstance(self, i):
self.instances.pop(str(i))
class FakeVirtInstance(object):
NOSTATE = 0x00
RUNNING = 0x01
BLOCKED = 0x02
PAUSED = 0x03
SHUTDOWN = 0x04
SHUTOFF = 0x05
CRASHED = 0x06
def __init__(self, conn, index, name, xml):
self._conn = conn
self._destroyed = False
self._name = name
self._index = index
self._state = self.RUNNING
def name(self):
return self._name
def destroy(self):
if self._state == self.SHUTOFF:
raise Exception('instance already destroyed: %s' % self.name())
self._state = self.SHUTDOWN
self._conn._removeInstance(self._index)
def info(self):
return [self._state, 0, 2, 0, 0]
def XMLDesc(self, flags):
return open('fakevirtinstance.xml', 'r').read()
def blockStats(self, disk):
return [0L, 0L, 0L, 0L, null]
def interfaceStats(self, iface):
return [0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L]

78
nova/flags.py Normal file
View File

@ -0,0 +1,78 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Package-level global flags are defined here, the rest are defined
where they're used.
"""
import socket
from nova import vendor
from gflags import *
# This keeps pylint from barfing on the imports
FLAGS = FLAGS
DEFINE_string = DEFINE_string
DEFINE_integer = DEFINE_integer
DEFINE_bool = DEFINE_bool
# __GLOBAL FLAGS ONLY__
# Define any app-specific flags in their own files, docs at:
# http://code.google.com/p/python-gflags/source/browse/trunk/gflags.py#39
DEFINE_integer('s3_port', 3333, 's3 port')
DEFINE_integer('s3_internal_port', 3334, 's3 port')
DEFINE_string('s3_host', '127.0.0.1', 's3 host')
#DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on')
DEFINE_string('compute_topic', 'compute', 'the topic compute nodes listen on')
DEFINE_string('storage_topic', 'storage', 'the topic storage nodes listen on')
DEFINE_bool('fake_libvirt', False,
'whether to use a fake libvirt or not')
DEFINE_bool('verbose', False, 'show debug output')
DEFINE_boolean('fake_rabbit', False, 'use a fake rabbit')
DEFINE_bool('fake_network', False, 'should we use fake network devices and addresses')
DEFINE_bool('fake_users', False, 'use fake users')
DEFINE_string('rabbit_host', 'localhost', 'rabbit host')
DEFINE_integer('rabbit_port', 5672, 'rabbit port')
DEFINE_string('rabbit_userid', 'guest', 'rabbit userid')
DEFINE_string('rabbit_password', 'guest', 'rabbit password')
DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host')
DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to')
DEFINE_string('ec2_url',
'http://127.0.0.1:8773/services/Cloud',
'Url to ec2 api server')
DEFINE_string('default_image',
'ami-11111',
'default image to use, testing only')
DEFINE_string('default_kernel',
'aki-11111',
'default kernel to use, testing only')
DEFINE_string('default_ramdisk',
'ari-11111',
'default ramdisk to use, testing only')
DEFINE_string('default_instance_type',
'm1.small',
'default instance type to use, testing only')
# UNUSED
DEFINE_string('node_availability_zone',
'nova',
'availability zone of this node')
DEFINE_string('node_name',
socket.gethostname(),
'name of this node')

131
nova/process.py Normal file
View File

@ -0,0 +1,131 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Process pool, still buggy right now.
"""
import logging
import multiprocessing
from nova import vendor
from twisted.internet import defer
from twisted.internet import reactor
from twisted.internet import protocol
from twisted.internet import threads
# NOTE(termie): this is copied from twisted.internet.utils but since
# they don't export it I've copied.
class _BackRelay(protocol.ProcessProtocol):
"""
Trivial protocol for communicating with a process and turning its output
into the result of a L{Deferred}.
@ivar deferred: A L{Deferred} which will be called back with all of stdout
and, if C{errortoo} is true, all of stderr as well (mixed together in
one string). If C{errortoo} is false and any bytes are received over
stderr, this will fire with an L{_UnexpectedErrorOutput} instance and
the attribute will be set to C{None}.
@ivar onProcessEnded: If C{errortoo} is false and bytes are received over
stderr, this attribute will refer to a L{Deferred} which will be called
back when the process ends. This C{Deferred} is also associated with
the L{_UnexpectedErrorOutput} which C{deferred} fires with earlier in
this case so that users can determine when the process has actually
ended, in addition to knowing when bytes have been received via stderr.
"""
def __init__(self, deferred, errortoo=0):
self.deferred = deferred
self.s = StringIO.StringIO()
if errortoo:
self.errReceived = self.errReceivedIsGood
else:
self.errReceived = self.errReceivedIsBad
def errReceivedIsBad(self, text):
if self.deferred is not None:
self.onProcessEnded = defer.Deferred()
err = _UnexpectedErrorOutput(text, self.onProcessEnded)
self.deferred.errback(failure.Failure(err))
self.deferred = None
self.transport.loseConnection()
def errReceivedIsGood(self, text):
self.s.write(text)
def outReceived(self, text):
self.s.write(text)
def processEnded(self, reason):
if self.deferred is not None:
self.deferred.callback(self.s.getvalue())
elif self.onProcessEnded is not None:
self.onProcessEnded.errback(reason)
class BackRelayWithInput(_BackRelay):
def __init__(self, deferred, errortoo=0, input=None):
super(BackRelayWithInput, self).__init__(deferred, errortoo)
self.input = input
def connectionMade(self):
if self.input:
self.transport.write(self.input)
self.transport.closeStdin()
def getProcessOutput(executable, args=None, env=None, path=None, reactor=None,
errortoo=0, input=None):
if reactor is None:
from twisted.internet import reactor
args = args and args or ()
env = env and env and {}
d = defer.Deferred()
p = BackRelayWithInput(d, errortoo=errortoo, input=input)
reactor.spawnProcess(p, executable, (executable,)+tuple(args), env, path)
return d
class Pool(object):
""" A simple process pool implementation around mutliprocessing.
Allows up to `size` processes at a time and queues the rest.
Using workarounds for multiprocessing behavior described in:
http://pypi.python.org/pypi/twisted.internet.processes/1.0b1
"""
def __init__(self, size=None):
self._size = size
self._pool = multiprocessing.Pool(size)
self._registerShutdown()
def _registerShutdown(self):
reactor.addSystemEventTrigger(
'during', 'shutdown', self.shutdown, reactor)
def shutdown(self, reactor=None):
if not self._pool:
return
self._pool.close()
# wait for workers to finish
self._pool.terminate()
self._pool = None
def apply(self, f, *args, **kw):
""" Add a task to the pool and return a deferred. """
result = self._pool.apply_async(f, args, kw)
return threads.deferToThread(result.get)

222
nova/rpc.py Normal file
View File

@ -0,0 +1,222 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
AMQP-based RPC. Queues have consumers and publishers.
No fan-out support yet.
"""
import logging
import sys
import uuid
from nova import vendor
import anyjson
from carrot import connection
from carrot import messaging
from twisted.internet import defer
from twisted.internet import reactor
from twisted.internet import task
from nova import fakerabbit
from nova import flags
FLAGS = flags.FLAGS
_log = logging.getLogger('amqplib')
_log.setLevel(logging.WARN)
class Connection(connection.BrokerConnection):
@classmethod
def instance(cls):
if not hasattr(cls, '_instance'):
params = dict(hostname=FLAGS.rabbit_host,
port=FLAGS.rabbit_port,
userid=FLAGS.rabbit_userid,
password=FLAGS.rabbit_password,
virtual_host=FLAGS.rabbit_virtual_host)
if FLAGS.fake_rabbit:
params['backend_cls'] = fakerabbit.Backend
cls._instance = cls(**params)
return cls._instance
class Consumer(messaging.Consumer):
# TODO(termie): it would be nice to give these some way of automatically
# cleaning up after themselves
def attach_to_tornado(self, io_inst=None):
from tornado import ioloop
if io_inst is None:
io_inst = ioloop.IOLoop.instance()
injected = ioloop.PeriodicCallback(
lambda: self.fetch(enable_callbacks=True), 1, io_loop=io_inst)
injected.start()
return injected
attachToTornado = attach_to_tornado
def attach_to_twisted(self):
loop = task.LoopingCall(self.fetch, enable_callbacks=True)
loop.start(interval=0.001)
class Publisher(messaging.Publisher):
pass
class TopicConsumer(Consumer):
exchange_type = "topic"
def __init__(self, connection=None, topic="broadcast"):
self.queue = topic
self.routing_key = topic
self.exchange = FLAGS.control_exchange
super(TopicConsumer, self).__init__(connection=connection)
class AdapterConsumer(TopicConsumer):
def __init__(self, connection=None, topic="broadcast", proxy=None):
_log.debug('Initing the Adapter Consumer for %s' % (topic))
self.proxy = proxy
super(AdapterConsumer, self).__init__(connection=connection, topic=topic)
def receive(self, message_data, message):
_log.debug('received %s' % (message_data))
msg_id = message_data.pop('_msg_id', None)
method = message_data.get('method')
args = message_data.get('args', {})
if not method:
return
node_func = getattr(self.proxy, str(method))
node_args = dict((str(k), v) for k, v in args.iteritems())
d = defer.maybeDeferred(node_func, **node_args)
if msg_id:
d.addCallback(lambda rval: msg_reply(msg_id, rval))
d.addErrback(lambda e: msg_reply(msg_id, str(e)))
message.ack()
return
class TopicPublisher(Publisher):
exchange_type = "topic"
def __init__(self, connection=None, topic="broadcast"):
self.routing_key = topic
self.exchange = FLAGS.control_exchange
super(TopicPublisher, self).__init__(connection=connection)
class DirectConsumer(Consumer):
exchange_type = "direct"
def __init__(self, connection=None, msg_id=None):
self.queue = msg_id
self.routing_key = msg_id
self.exchange = msg_id
self.auto_delete = True
super(DirectConsumer, self).__init__(connection=connection)
class DirectPublisher(Publisher):
exchange_type = "direct"
def __init__(self, connection=None, msg_id=None):
self.routing_key = msg_id
self.exchange = msg_id
self.auto_delete = True
super(DirectPublisher, self).__init__(connection=connection)
def msg_reply(msg_id, reply):
conn = Connection.instance()
publisher = DirectPublisher(connection=conn, msg_id=msg_id)
try:
publisher.send({'result': reply})
except TypeError:
publisher.send(
{'result': dict((k, repr(v))
for k, v in reply.__dict__.iteritems())
})
publisher.close()
def call(topic, msg):
_log.debug("Making asynchronous call...")
msg_id = uuid.uuid4().hex
msg.update({'_msg_id': msg_id})
_log.debug("MSG_ID is %s" % (msg_id))
conn = Connection.instance()
d = defer.Deferred()
consumer = DirectConsumer(connection=conn, msg_id=msg_id)
consumer.register_callback(lambda data, message: d.callback(data))
injected = consumer.attach_to_tornado()
# clean up after the injected listened and return x
d.addCallback(lambda x: injected.stop() and x or x)
publisher = TopicPublisher(connection=conn, topic=topic)
publisher.send(msg)
publisher.close()
return d
def cast(topic, msg):
_log.debug("Making asynchronous cast...")
conn = Connection.instance()
publisher = TopicPublisher(connection=conn, topic=topic)
publisher.send(msg)
publisher.close()
def generic_response(message_data, message):
_log.debug('response %s', message_data)
message.ack()
sys.exit(0)
def send_message(topic, message, wait=True):
msg_id = uuid.uuid4().hex
message.update({'_msg_id': msg_id})
_log.debug('topic is %s', topic)
_log.debug('message %s', message)
if wait:
consumer = messaging.Consumer(connection=rpc.Connection.instance(),
queue=msg_id,
exchange=msg_id,
auto_delete=True,
exchange_type="direct",
routing_key=msg_id)
consumer.register_callback(generic_response)
publisher = messaging.Publisher(connection=rpc.Connection.instance(),
exchange="nova",
exchange_type="topic",
routing_key=topic)
publisher.send(message)
publisher.close()
if wait:
consumer.wait()
# TODO: Replace with a docstring test
if __name__ == "__main__":
send_message(sys.argv[1], anyjson.deserialize(sys.argv[2]))

139
nova/server.py Normal file
View File

@ -0,0 +1,139 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Base functionality for nova daemons - gradually being replaced with twistd.py.
"""
import logging
import logging.handlers
import os
import signal
import sys
import time
from nova import vendor
import daemon
from daemon import pidlockfile
from nova import flags
FLAGS = flags.FLAGS
flags.DEFINE_bool('daemonize', False, 'daemonize this process')
# NOTE(termie): right now I am defaulting to using syslog when we daemonize
# it may be better to do something else -shrug-
# NOTE(Devin): I think we should let each process have its own log file
# and put it in /var/logs/nova/(appname).log
# This makes debugging much easier and cuts down on sys log
# clutter.
flags.DEFINE_bool('use_syslog', True, 'output to syslog when daemonizing')
flags.DEFINE_string('logfile', None, 'log file to output to')
flags.DEFINE_string('pidfile', None, 'pid file to output to')
flags.DEFINE_string('working_directory', './', 'working directory...')
def stop(pidfile):
"""
Stop the daemon
"""
# Get the pid from the pidfile
try:
pf = file(pidfile,'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if not pid:
message = "pidfile %s does not exist. Daemon not running?\n"
sys.stderr.write(message % pidfile)
return # not an error in a restart
# Try killing the daemon process
try:
while 1:
os.kill(pid, signal.SIGTERM)
time.sleep(0.1)
except OSError, err:
err = str(err)
if err.find("No such process") > 0:
if os.path.exists(pidfile):
os.remove(pidfile)
else:
print str(err)
sys.exit(1)
def serve(name, main):
argv = FLAGS(sys.argv)
if not FLAGS.pidfile:
FLAGS.pidfile = '%s.pid' % name
logging.debug("Full set of FLAGS: \n\n\n" )
for flag in FLAGS:
logging.debug("%s : %s" % (flag, FLAGS.get(flag, None) ))
action = 'start'
if len(argv) > 1:
action = argv.pop()
if action == 'stop':
stop(FLAGS.pidfile)
sys.exit()
elif action == 'restart':
stop(FLAGS.pidfile)
elif action == 'start':
pass
else:
print 'usage: %s [options] [start|stop|restart]' % argv[0]
sys.exit(1)
logging.getLogger('amqplib').setLevel(logging.WARN)
if FLAGS.daemonize:
logger = logging.getLogger()
formatter = logging.Formatter(
name + '(%(name)s): %(levelname)s %(message)s')
if FLAGS.use_syslog and not FLAGS.logfile:
syslog = logging.handlers.SysLogHandler(address='/dev/log')
syslog.setFormatter(formatter)
logger.addHandler(syslog)
else:
if not FLAGS.logfile:
FLAGS.logfile = '%s.log' % name
logfile = logging.handlers.FileHandler(FLAGS.logfile)
logfile.setFormatter(formatter)
logger.addHandler(logfile)
stdin, stdout, stderr = None, None, None
else:
stdin, stdout, stderr = sys.stdin, sys.stdout, sys.stderr
if FLAGS.verbose:
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.getLogger().setLevel(logging.WARNING)
with daemon.DaemonContext(
detach_process=FLAGS.daemonize,
working_directory=FLAGS.working_directory,
pidfile=pidlockfile.TimeoutPIDLockFile(FLAGS.pidfile,
acquire_timeout=1,
threaded=False),
stdin=stdin,
stdout=stdout,
stderr=stderr
):
main(argv)

246
nova/test.py Normal file
View File

@ -0,0 +1,246 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Base classes for our unit tests.
Allows overriding of flags for use of fakes,
and some black magic for inline callbacks.
"""
import logging
import time
import unittest
from nova import vendor
import mox
from tornado import ioloop
from twisted.internet import defer
from twisted.python import failure
from twisted.trial import unittest as trial_unittest
import stubout
from nova import datastore
from nova import fakerabbit
from nova import flags
FLAGS = flags.FLAGS
flags.DEFINE_bool('fake_tests', True,
'should we use everything for testing')
def skip_if_fake(f):
def _skipper(*args, **kw):
if FLAGS.fake_tests:
raise trial_unittest.SkipTest('Test cannot be run in fake mode')
else:
return f(*args, **kw)
_skipper.func_name = f.func_name
return _skipper
class TrialTestCase(trial_unittest.TestCase):
def setUp(self):
super(TrialTestCase, self).setUp()
# emulate some of the mox stuff, we can't use the metaclass
# because it screws with our generators
self.mox = mox.Mox()
self.stubs = stubout.StubOutForTesting()
self.flag_overrides = {}
def tearDown(self):
super(TrialTestCase, self).tearDown()
self.reset_flags()
self.mox.UnsetStubs()
self.stubs.UnsetAll()
self.stubs.SmartUnsetAll()
self.mox.VerifyAll()
if FLAGS.fake_rabbit:
fakerabbit.reset_all()
# attempt to wipe all keepers
#keeper = datastore.Keeper()
#keeper.clear_all()
def flags(self, **kw):
for k, v in kw.iteritems():
if k in self.flag_overrides:
self.reset_flags()
raise Exception(
'trying to override already overriden flag: %s' % k)
self.flag_overrides[k] = getattr(FLAGS, k)
setattr(FLAGS, k, v)
def reset_flags(self):
for k, v in self.flag_overrides.iteritems():
setattr(FLAGS, k, v)
class BaseTestCase(TrialTestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
# TODO(termie): we could possibly keep a more global registry of
# the injected listeners... this is fine for now though
self.injected = []
self.ioloop = ioloop.IOLoop.instance()
self._waiting = None
self._doneWaiting = False
self._timedOut = False
self.set_up()
def set_up(self):
pass
def tear_down(self):
pass
def tearDown(self):
super(BaseTestCase, self).tearDown()
for x in self.injected:
x.stop()
if FLAGS.fake_rabbit:
fakerabbit.reset_all()
self.tear_down()
def _waitForTest(self, timeout=60):
""" Push the ioloop along to wait for our test to complete. """
self._waiting = self.ioloop.add_timeout(time.time() + timeout,
self._timeout)
def _wait():
if self._timedOut:
self.fail('test timed out')
self._done()
if self._doneWaiting:
self.ioloop.stop()
return
# we can use add_callback here but this uses less cpu when testing
self.ioloop.add_timeout(time.time() + 0.01, _wait)
self.ioloop.add_callback(_wait)
self.ioloop.start()
def _done(self):
if self._waiting:
try:
self.ioloop.remove_timeout(self._waiting)
except Exception:
pass
self._waiting = None
self._doneWaiting = True
def _maybeInlineCallbacks(self, f):
""" If we're doing async calls in our tests, wait on them.
This is probably the most complicated hunk of code we have so far.
First up, if the function is normal (not async) we just act normal
and return.
Async tests will use the "Inline Callbacks" pattern, which means
you yield Deferreds at every "waiting" step of your code instead
of making epic callback chains.
Example (callback chain, ugly):
d = self.node.terminate_instance(instance_id) # a Deferred instance
def _describe(_):
d_desc = self.node.describe_instances() # another Deferred instance
return d_desc
def _checkDescribe(rv):
self.assertEqual(rv, [])
d.addCallback(_describe)
d.addCallback(_checkDescribe)
d.addCallback(lambda x: self._done())
self._waitForTest()
Example (inline callbacks! yay!):
yield self.node.terminate_instance(instance_id)
rv = yield self.node.describe_instances()
self.assertEqual(rv, [])
If the test fits the Inline Callbacks pattern we will automatically
handle calling wait and done.
"""
# TODO(termie): this can be a wrapper function instead and
# and we can make a metaclass so that we don't
# have to copy all that "run" code below.
g = f()
if not hasattr(g, 'send'):
self._done()
return defer.succeed(g)
inlined = defer.inlineCallbacks(f)
d = inlined()
return d
def _catchExceptions(self, result, failure):
exc = (failure.type, failure.value, failure.getTracebackObject())
if isinstance(failure.value, self.failureException):
result.addFailure(self, exc)
elif isinstance(failure.value, KeyboardInterrupt):
raise
else:
result.addError(self, exc)
self._done()
def _timeout(self):
self._waiting = False
self._timedOut = True
def run(self, result=None):
if result is None: result = self.defaultTestResult()
result.startTest(self)
testMethod = getattr(self, self._testMethodName)
try:
try:
self.setUp()
except KeyboardInterrupt:
raise
except:
result.addError(self, self._exc_info())
return
ok = False
try:
d = self._maybeInlineCallbacks(testMethod)
d.addErrback(lambda x: self._catchExceptions(result, x))
d.addBoth(lambda x: self._done() and x)
self._waitForTest()
ok = True
except self.failureException:
result.addFailure(self, self._exc_info())
except KeyboardInterrupt:
raise
except:
result.addError(self, self._exc_info())
try:
self.tearDown()
except KeyboardInterrupt:
raise
except:
result.addError(self, self._exc_info())
ok = False
if ok: result.addSuccess(self)
finally:
result.stopTest(self)

27
nova/tests/__init__.py Normal file
View File

@ -0,0 +1,27 @@
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`nova.tests` -- Nova Unittests
=====================================================
.. automodule:: nova.tests
:platform: Unix
.. moduleauthor:: Jesse Andrews <jesse@ansolabs.com>
.. moduleauthor:: Devin Carlen <devin.carlen@gmail.com>
.. moduleauthor:: Vishvananda Ishaya <vishvananda@yahoo.com>
.. moduleauthor:: Joshua McKenty <joshua@cognition.ca>
.. moduleauthor:: Manish Singh <yosh@gimp.org>
.. moduleauthor:: Andy Smith <andy@anarkystic.com>
"""

View File

@ -0,0 +1,60 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import unittest
from nova import flags
from nova import test
from nova.auth import users
from nova.endpoint import cloud
FLAGS = flags.FLAGS
class AccessTestCase(test.BaseTestCase):
def setUp(self):
FLAGS.fake_libvirt = True
FLAGS.fake_storage = True
self.users = users.UserManager.instance()
super(AccessTestCase, self).setUp()
# Make a test project
# Make a test user
self.users.create_user('test1', 'access', 'secret')
# Make the test user a member of the project
def tearDown(self):
# Delete the test user
# Delete the test project
self.users.delete_user('test1')
pass
def test_001_basic_user_access(self):
user = self.users.get_user('test1')
# instance-foo, should be using object and not owner_id
instance_id = "i-12345678"
self.assertTrue(user.is_authorized(instance_id, action="describe_instances"))
def test_002_sysadmin_access(self):
user = self.users.get_user('test1')
bucket = "foo/bar/image"
self.assertFalse(user.is_authorized(bucket, action="register"))
self.users.add_role(user, "sysadmin")
if __name__ == "__main__":
# TODO: Implement use_fake as an option
unittest.main()

View File

@ -0,0 +1,50 @@
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import boto
from boto.ec2.regioninfo import RegionInfo
ACCESS_KEY = 'fake'
SECRET_KEY = 'fake'
CLC_IP = '127.0.0.1'
CLC_PORT = 8773
REGION = 'test'
def get_connection():
return boto.connect_ec2 (
aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET_KEY,
is_secure=False,
region=RegionInfo(None, REGION, CLC_IP),
port=CLC_PORT,
path='/services/Cloud',
debug=99
)
class APIIntegrationTests(unittest.TestCase):
def test_001_get_all_images(self):
conn = get_connection()
res = conn.get_all_images()
print res
if __name__ == '__main__':
unittest.main()
#print conn.get_all_key_pairs()
#print conn.create_key_pair
#print conn.create_security_group('name', 'description')

189
nova/tests/api_unittest.py Normal file
View File

@ -0,0 +1,189 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import httplib
import random
import StringIO
from nova import vendor
import boto
from boto.ec2 import regioninfo
from tornado import httpserver
from twisted.internet import defer
from nova import flags
from nova import test
from nova.auth import users
from nova.endpoint import api
from nova.endpoint import cloud
FLAGS = flags.FLAGS
# NOTE(termie): These are a bunch of helper methods and classes to short
# circuit boto calls and feed them into our tornado handlers,
# it's pretty damn circuitous so apologies if you have to fix
# a bug in it
def boto_to_tornado(method, path, headers, data, host, connection=None):
""" translate boto requests into tornado requests
connection should be a FakeTornadoHttpConnection instance
"""
headers = httpserver.HTTPHeaders()
for k, v in headers.iteritems():
headers[k] = v
req = httpserver.HTTPRequest(method=method,
uri=path,
headers=headers,
body=data,
host=host,
remote_ip='127.0.0.1',
connection=connection)
return req
def raw_to_httpresponse(s):
""" translate a raw tornado http response into an httplib.HTTPResponse """
sock = FakeHttplibSocket(s)
resp = httplib.HTTPResponse(sock)
resp.begin()
return resp
class FakeHttplibSocket(object):
""" a fake socket implementation for httplib.HTTPResponse, trivial """
def __init__(self, s):
self.fp = StringIO.StringIO(s)
def makefile(self, mode, other):
return self.fp
class FakeTornadoStream(object):
""" a fake stream to satisfy tornado's assumptions, trivial """
def set_close_callback(self, f):
pass
class FakeTornadoConnection(object):
""" a fake connection object for tornado to pass to its handlers
web requests are expected to write to this as they get data and call
finish when they are done with the request, we buffer the writes and
kick off a callback when it is done so that we can feed the result back
into boto.
"""
def __init__(self, d):
self.d = d
self._buffer = StringIO.StringIO()
def write(self, chunk):
self._buffer.write(chunk)
def finish(self):
s = self._buffer.getvalue()
self.d.callback(s)
xheaders = None
@property
def stream(self):
return FakeTornadoStream()
class FakeHttplibConnection(object):
""" a fake httplib.HTTPConnection for boto to use
requests made via this connection actually get translated and routed into
our tornado app, we then wait for the response and turn it back into
the httplib.HTTPResponse that boto expects.
"""
def __init__(self, app, host, is_secure=False):
self.app = app
self.host = host
self.deferred = defer.Deferred()
def request(self, method, path, data, headers):
req = boto_to_tornado
conn = FakeTornadoConnection(self.deferred)
request = boto_to_tornado(connection=conn,
method=method,
path=path,
headers=headers,
data=data,
host=self.host)
handler = self.app(request)
self.deferred.addCallback(raw_to_httpresponse)
def getresponse(self):
@defer.inlineCallbacks
def _waiter():
result = yield self.deferred
defer.returnValue(result)
d = _waiter()
# NOTE(termie): defer.returnValue above should ensure that
# this deferred has already been called by the time
# we get here, we are going to cheat and return
# the result of the callback
return d.result
def close(self):
pass
class ApiEc2TestCase(test.BaseTestCase):
def setUp(self):
super(ApiEc2TestCase, self).setUp()
self.users = users.UserManager.instance()
self.cloud = cloud.CloudController()
self.host = '127.0.0.1'
self.app = api.APIServerApplication(self.users, {'Cloud': self.cloud})
self.ec2 = boto.connect_ec2(
aws_access_key_id='fake',
aws_secret_access_key='fake',
is_secure=False,
region=regioninfo.RegionInfo(None, 'test', self.host),
port=FLAGS.cc_port,
path='/services/Cloud')
self.mox.StubOutWithMock(self.ec2, 'new_http_connection')
def expect_http(self, host=None, is_secure=False):
http = FakeHttplibConnection(
self.app, '%s:%d' % (self.host, FLAGS.cc_port), False)
self.ec2.new_http_connection(host, is_secure).AndReturn(http)
return http
def test_describe_instances(self):
self.expect_http()
self.mox.ReplayAll()
self.assertEqual(self.ec2.get_all_instances(), [])
def test_get_all_key_pairs(self):
self.expect_http()
self.mox.ReplayAll()
keyname = "".join(random.choice("sdiuisudfsdcnpaqwertasd") for x in range(random.randint(4, 8)))
self.users.generate_key_pair('fake', keyname)
rv = self.ec2.get_all_key_pairs()
self.assertTrue(filter(lambda k: k.name == keyname, rv))

View File

@ -0,0 +1,161 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import StringIO
import time
import unittest
from xml.etree import ElementTree
from nova import vendor
import mox
from tornado import ioloop
from twisted.internet import defer
from nova import flags
from nova import rpc
from nova import test
from nova.auth import users
from nova.compute import node
from nova.endpoint import api
from nova.endpoint import cloud
FLAGS = flags.FLAGS
class CloudTestCase(test.BaseTestCase):
def setUp(self):
super(CloudTestCase, self).setUp()
self.flags(fake_libvirt=True,
fake_storage=True,
fake_users=True,
redis_db=8)
self.conn = rpc.Connection.instance()
logging.getLogger().setLevel(logging.DEBUG)
# set up our cloud
self.cloud = cloud.CloudController()
self.cloud_consumer = rpc.AdapterConsumer(connection=self.conn,
topic=FLAGS.cloud_topic,
proxy=self.cloud)
self.injected.append(self.cloud_consumer.attach_to_tornado(self.ioloop))
# set up a node
self.node = node.Node()
self.node_consumer = rpc.AdapterConsumer(connection=self.conn,
topic=FLAGS.compute_topic,
proxy=self.node)
self.injected.append(self.node_consumer.attach_to_tornado(self.ioloop))
user_mocker = mox.Mox()
self.admin = user_mocker.CreateMock(users.User)
self.admin.is_authorized(mox.IgnoreArg()).AndReturn(True)
self.context = api.APIRequestContext(handler=None,user=self.admin)
def test_console_output(self):
if FLAGS.fake_libvirt:
logging.debug("Can't test instances without a real virtual env.")
return
instance_id = 'foo'
inst = yield self.node.run_instance(instance_id)
output = yield self.cloud.get_console_output(self.context, [instance_id])
logging.debug(output)
self.assert_(output)
rv = yield self.node.terminate_instance(instance_id)
def test_run_instances(self):
if FLAGS.fake_libvirt:
logging.debug("Can't test instances without a real virtual env.")
return
image_id = FLAGS.default_image
instance_type = FLAGS.default_instance_type
max_count = 1
kwargs = {'image_id': image_id,
'instance_type': instance_type,
'max_count': max_count}
rv = yield self.cloud.run_instances(self.context, **kwargs)
# TODO: check for proper response
instance = rv['reservationSet'][0][rv['reservationSet'][0].keys()[0]][0]
logging.debug("Need to watch instance %s until it's running..." % instance['instance_id'])
while True:
rv = yield defer.succeed(time.sleep(1))
info = self.cloud._get_instance(instance['instance_id'])
logging.debug(info['state'])
if info['state'] == node.Instance.RUNNING:
break
self.assert_(rv)
if not FLAGS.fake_libvirt:
time.sleep(45) # Should use boto for polling here
for reservations in rv['reservationSet']:
# for res_id in reservations.keys():
# logging.debug(reservations[res_id])
# for instance in reservations[res_id]:
for instance in reservations[reservations.keys()[0]]:
logging.debug("Terminating instance %s" % instance['instance_id'])
rv = yield self.node.terminate_instance(instance['instance_id'])
def test_instance_update_state(self):
def instance(num):
return {
'reservation_id': 'r-1',
'instance_id': 'i-%s' % num,
'image_id': 'ami-%s' % num,
'private_dns_name': '10.0.0.%s' % num,
'dns_name': '10.0.0%s' % num,
'ami_launch_index': str(num),
'instance_type': 'fake',
'availability_zone': 'fake',
'key_name': None,
'kernel_id': 'fake',
'ramdisk_id': 'fake',
'groups': ['default'],
'product_codes': None,
'state': 0x01,
'user_data': ''
}
rv = self.cloud.format_instances(self.admin)
print rv
self.assert_(len(rv['reservationSet']) == 0)
# simulate launch of 5 instances
# self.cloud.instances['pending'] = {}
#for i in xrange(5):
# inst = instance(i)
# self.cloud.instances['pending'][inst['instance_id']] = inst
#rv = self.cloud.format_instances(self.admin)
#self.assert_(len(rv['reservationSet']) == 1)
#self.assert_(len(rv['reservationSet'][0]['instances_set']) == 5)
# report 4 nodes each having 1 of the instances
#for i in xrange(4):
# self.cloud.update_state('instances', {('node-%s' % i): {('i-%s' % i): instance(i)}})
# one instance should be pending still
#self.assert_(len(self.cloud.instances['pending'].keys()) == 1)
# check that the reservations collapse
#rv = self.cloud.format_instances(self.admin)
#self.assert_(len(rv['reservationSet']) == 1)
#self.assert_(len(rv['reservationSet'][0]['instances_set']) == 5)
# check that we can get metadata for each instance
#for i in xrange(4):
# data = self.cloud.get_metadata(instance(i)['private_dns_name'])
# self.assert_(data['meta-data']['ami-id'] == 'ami-%s' % i)

View File

@ -0,0 +1,60 @@
from nova import test
from nova import datastore
import random
class KeeperTestCase(test.BaseTestCase):
"""
Basic persistence tests for Keeper datastore.
Generalize, then use these to support
migration to redis / cassandra / multiple stores.
"""
def __init__(self, *args, **kwargs):
"""
Create a new keeper instance for test keys.
"""
super(KeeperTestCase, self).__init__(*args, **kwargs)
self.keeper = datastore.Keeper('test-')
def tear_down(self):
"""
Scrub out test keeper data.
"""
pass
def test_store_strings(self):
"""
Confirm that simple strings go in and come out safely.
Should also test unicode strings.
"""
randomstring = ''.join(
[random.choice('ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890-')
for _x in xrange(20)]
)
self.keeper['test_string'] = randomstring
self.assertEqual(randomstring, self.keeper['test_string'])
def test_store_dicts(self):
"""
Arbitrary dictionaries should be storable.
"""
test_dict = {'key_one': 'value_one'}
self.keeper['test_dict'] = test_dict
self.assertEqual(test_dict['key_one'],
self.keeper['test_dict']['key_one'])
def test_sets(self):
"""
A keeper dict should be self-serializing.
"""
self.keeper.set_add('test_set', 'foo')
test_dict = {'arbitrary': 'dict of stuff'}
self.keeper.set_add('test_set', test_dict)
self.assertTrue(self.keeper.set_is_member('test_set', 'foo'))
self.assertFalse(self.keeper.set_is_member('test_set', 'bar'))
self.keeper.set_remove('test_set', 'foo')
self.assertFalse(self.keeper.set_is_member('test_set', 'foo'))
rv = self.keeper.set_fetch('test_set')
self.assertEqual(test_dict, rv.next())
self.keeper.set_remove('test_set', test_dict)

26
nova/tests/fake_flags.py Normal file
View File

@ -0,0 +1,26 @@
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nova import flags
FLAGS = flags.FLAGS
FLAGS.fake_libvirt = True
FLAGS.fake_storage = True
FLAGS.fake_rabbit = True
FLAGS.fake_network = True
FLAGS.fake_users = True
FLAGS.keeper_backend = 'sqlite'
FLAGS.datastore_path = ':memory:'
FLAGS.verbose = True

View File

@ -0,0 +1,74 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import StringIO
import time
import unittest
from xml.etree import ElementTree
from nova import vendor
import mox
from tornado import ioloop
from twisted.internet import defer
from nova import cloud
from nova import exception
from nova import flags
from nova import node
from nova import rpc
from nova import test
FLAGS = flags.FLAGS
class AdminTestCase(test.BaseTestCase):
def setUp(self):
super(AdminTestCase, self).setUp()
self.flags(fake_libvirt=True,
fake_rabbit=True)
self.conn = rpc.Connection.instance()
logging.getLogger().setLevel(logging.INFO)
# set up our cloud
self.cloud = cloud.CloudController()
self.cloud_consumer = rpc.AdapterConsumer(connection=self.conn,
topic=FLAGS.cloud_topic,
proxy=self.cloud)
self.injected.append(self.cloud_consumer.attach_to_tornado(self.ioloop))
# set up a node
self.node = node.Node()
self.node_consumer = rpc.AdapterConsumer(connection=self.conn,
topic=FLAGS.compute_topic,
proxy=self.node)
self.injected.append(self.node_consumer.attach_to_tornado(self.ioloop))
def test_flush_terminated(self):
# Launch an instance
# Wait until it's running
# Terminate it
# Wait until it's terminated
# Flush terminated nodes
# ASSERT that it's gone
pass

View File

@ -0,0 +1,57 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import random
from nova import datastore
from nova import test
class KeeperTestCase(test.TrialTestCase):
"""
Basic persistence tests for Keeper datastore.
Generalize, then use these to support
migration to redis / cassandra / multiple stores.
"""
def setUp(self):
super(KeeperTestCase, self).setUp()
self.keeper = datastore.Keeper('test')
def tearDown(self):
super(KeeperTestCase, self).tearDown()
self.keeper.clear()
def test_store_strings(self):
"""
Confirm that simple strings go in and come out safely.
Should also test unicode strings.
"""
randomstring = ''.join(
[random.choice('ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890-')
for _x in xrange(20)]
)
self.keeper['test_string'] = randomstring
self.assertEqual(randomstring, self.keeper['test_string'])
def test_store_dicts(self):
"""
Arbitrary dictionaries should be storable.
"""
test_dict = {'key_one': 'value_one'}
self.keeper['test_dict'] = test_dict
self.assertEqual(test_dict['key_one'],
self.keeper['test_dict']['key_one'])
def test_sets(self):
"""
A keeper dict should be self-serializing.
"""
self.keeper.set_add('test_set', 'foo')
test_dict = {'arbitrary': 'dict of stuff'}
self.keeper.set_add('test_set', test_dict)
self.assertTrue(self.keeper.set_is_member('test_set', 'foo'))
self.assertFalse(self.keeper.set_is_member('test_set', 'bar'))
self.keeper.set_remove('test_set', 'foo')
self.assertFalse(self.keeper.set_is_member('test_set', 'foo'))
rv = self.keeper.set_fetch('test_set')
self.assertEqual(test_dict, rv.next())
self.keeper.set_remove('test_set', test_dict)

View File

@ -0,0 +1,113 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import unittest
from nova import vendor
import IPy
from nova import flags
from nova import test
from nova.compute import network
from nova.auth import users
class NetworkTestCase(test.TrialTestCase):
def setUp(self):
super(NetworkTestCase, self).setUp()
logging.getLogger().setLevel(logging.DEBUG)
self.manager = users.UserManager.instance()
for i in range(0, 6):
name = 'user%s' % i
if not self.manager.get_user(name):
self.manager.create_user(name, name, name)
self.network = network.NetworkController(netsize=16)
def tearDown(self):
super(NetworkTestCase, self).tearDown()
for i in range(0, 6):
name = 'user%s' % i
self.manager.delete_user(name)
def test_network_serialization(self):
net1 = network.Network(vlan=100, network="192.168.100.0/24", conn=None)
address = net1.allocate_ip("user0", "01:24:55:36:f2:a0")
net_json = str(net1)
net2 = network.Network.from_json(net_json)
self.assertEqual(net_json, str(net2))
self.assertTrue(IPy.IP(address) in net2.network)
def test_allocate_deallocate_address(self):
for flag in flags.FLAGS:
print "%s=%s" % (flag, flags.FLAGS.get(flag, None))
(address, net_name) = self.network.allocate_address(
"user0", "01:24:55:36:f2:a0")
logging.debug("Was allocated %s" % (address))
self.assertEqual(True, address in self._get_user_addresses("user0"))
rv = self.network.deallocate_address(address)
self.assertEqual(False, address in self._get_user_addresses("user0"))
def test_range_allocation(self):
(address, net_name) = self.network.allocate_address(
"user0", "01:24:55:36:f2:a0")
(secondaddress, net_name) = self.network.allocate_address(
"user1", "01:24:55:36:f2:a0")
self.assertEqual(True, address in self._get_user_addresses("user0"))
self.assertEqual(True,
secondaddress in self._get_user_addresses("user1"))
self.assertEqual(False, address in self._get_user_addresses("user1"))
rv = self.network.deallocate_address(address)
self.assertEqual(False, address in self._get_user_addresses("user0"))
rv = self.network.deallocate_address(secondaddress)
self.assertEqual(False,
secondaddress in self._get_user_addresses("user1"))
def test_subnet_edge(self):
(secondaddress, net_name) = self.network.allocate_address("user0")
for user in range(1,5):
user_id = "user%s" % (user)
(address, net_name) = self.network.allocate_address(
user_id, "01:24:55:36:f2:a0")
(address2, net_name) = self.network.allocate_address(
user_id, "01:24:55:36:f2:a0")
(address3, net_name) = self.network.allocate_address(
user_id, "01:24:55:36:f2:a0")
self.assertEqual(False,
address in self._get_user_addresses("user0"))
self.assertEqual(False,
address2 in self._get_user_addresses("user0"))
self.assertEqual(False,
address3 in self._get_user_addresses("user0"))
rv = self.network.deallocate_address(address)
rv = self.network.deallocate_address(address2)
rv = self.network.deallocate_address(address3)
rv = self.network.deallocate_address(secondaddress)
def test_too_many_users(self):
for i in range(0, 30):
name = 'toomany-user%s' % i
self.manager.create_user(name, name, name)
(address, net_name) = self.network.allocate_address(
name, "01:24:55:36:f2:a0")
self.manager.delete_user(name)
def _get_user_addresses(self, user_id):
rv = self.network.describe_addresses()
user_addresses = []
for item in rv:
if item['user_id'] == user_id:
user_addresses.append(item['address'])
return user_addresses

128
nova/tests/node_unittest.py Normal file
View File

@ -0,0 +1,128 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import StringIO
import time
import unittest
from xml.etree import ElementTree
from nova import vendor
import mox
from tornado import ioloop
from twisted.internet import defer
from nova import exception
from nova import flags
from nova import test
from nova import utils
from nova.compute import model
from nova.compute import node
FLAGS = flags.FLAGS
class InstanceXmlTestCase(test.TrialTestCase):
# @defer.inlineCallbacks
def test_serialization(self):
# TODO: Reimplement this, it doesn't make sense in redis-land
return
# instance_id = 'foo'
# first_node = node.Node()
# inst = yield first_node.run_instance(instance_id)
#
# # force the state so that we can verify that it changes
# inst._s['state'] = node.Instance.NOSTATE
# xml = inst.toXml()
# self.assert_(ElementTree.parse(StringIO.StringIO(xml)))
#
# second_node = node.Node()
# new_inst = node.Instance.fromXml(second_node._conn, pool=second_node._pool, xml=xml)
# self.assertEqual(new_inst.state, node.Instance.RUNNING)
# rv = yield first_node.terminate_instance(instance_id)
class NodeConnectionTestCase(test.TrialTestCase):
def setUp(self):
logging.getLogger().setLevel(logging.DEBUG)
super(NodeConnectionTestCase, self).setUp()
self.flags(fake_libvirt=True,
fake_storage=True,
fake_users=True,
redis_db=8)
self.node = node.Node()
def create_instance(self):
instdir = model.InstanceDirectory()
inst = instdir.new()
# TODO(ja): add ami, ari, aki, user_data
inst['reservation_id'] = 'r-fakeres'
inst['launch_time'] = '10'
inst['owner_id'] = 'fake'
inst['node_name'] = FLAGS.node_name
inst['mac_address'] = utils.generate_mac()
inst['ami_launch_index'] = 0
inst.save()
return inst['instance_id']
@defer.inlineCallbacks
def test_run_describe_terminate(self):
instance_id = self.create_instance()
rv = yield self.node.run_instance(instance_id)
rv = yield self.node.describe_instances()
self.assertEqual(rv[instance_id].name, instance_id)
rv = yield self.node.terminate_instance(instance_id)
rv = yield self.node.describe_instances()
self.assertEqual(rv, {})
@defer.inlineCallbacks
def test_reboot(self):
instance_id = self.create_instance()
rv = yield self.node.run_instance(instance_id)
rv = yield self.node.describe_instances()
logging.debug("describe_instances returns %s" % (rv))
self.assertEqual(rv[instance_id].name, instance_id)
yield self.node.reboot_instance(instance_id)
rv = yield self.node.describe_instances()
self.assertEqual(rv[instance_id].name, instance_id)
rv = yield self.node.terminate_instance(instance_id)
@defer.inlineCallbacks
def test_console_output(self):
instance_id = self.create_instance()
rv = yield self.node.run_instance(instance_id)
console = yield self.node.get_console_output(instance_id)
self.assert_(console)
rv = yield self.node.terminate_instance(instance_id)
@defer.inlineCallbacks
def test_run_instance_existing(self):
instance_id = self.create_instance()
rv = yield self.node.run_instance(instance_id)
rv = yield self.node.describe_instances()
self.assertEqual(rv[instance_id].name, instance_id)
self.assertRaises(exception.Error, self.node.run_instance, instance_id)
rv = yield self.node.terminate_instance(instance_id)

View File

@ -0,0 +1,190 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import hashlib
import logging
import os
import shutil
import tempfile
from nova import vendor
from nova import flags
from nova import rpc
from nova import objectstore
from nova import test
from nova.auth import users
FLAGS = flags.FLAGS
oss_tempdir = tempfile.mkdtemp(prefix='test_oss-')
# delete tempdirs from previous runs (we don't delete after test to allow
# checking the contents after running tests)
for path in glob.glob(os.path.abspath(os.path.join(oss_tempdir, '../test_oss-*'))):
if path != oss_tempdir:
shutil.rmtree(path)
# create bucket/images path
os.makedirs(os.path.join(oss_tempdir, 'images'))
os.makedirs(os.path.join(oss_tempdir, 'buckets'))
class ObjectStoreTestCase(test.BaseTestCase):
def setUp(self):
super(ObjectStoreTestCase, self).setUp()
self.flags(fake_users=True,
buckets_path=os.path.join(oss_tempdir, 'buckets'),
images_path=os.path.join(oss_tempdir, 'images'),
ca_path=os.path.join(os.path.dirname(__file__), 'CA'))
self.conn = rpc.Connection.instance()
logging.getLogger().setLevel(logging.DEBUG)
self.um = users.UserManager.instance()
def test_buckets(self):
try:
self.um.create_user('user1')
except: pass
try:
self.um.create_user('user2')
except: pass
try:
self.um.create_user('admin_user', admin=True)
except: pass
objectstore.bucket.Bucket.create('new_bucket', self.um.get_user('user1'))
bucket = objectstore.bucket.Bucket('new_bucket')
# creator is authorized to use bucket
self.assert_(bucket.is_authorized(self.um.get_user('user1')))
# another user is not authorized
self.assert_(bucket.is_authorized(self.um.get_user('user2')) == False)
# admin is authorized to use bucket
self.assert_(bucket.is_authorized(self.um.get_user('admin_user')))
# new buckets are empty
self.assert_(bucket.list_keys()['Contents'] == [])
# storing keys works
bucket['foo'] = "bar"
self.assert_(len(bucket.list_keys()['Contents']) == 1)
self.assert_(bucket['foo'].read() == 'bar')
# md5 of key works
self.assert_(bucket['foo'].md5 == hashlib.md5('bar').hexdigest())
# deleting non-empty bucket throws exception
exception = False
try:
bucket.delete()
except:
exception = True
self.assert_(exception)
# deleting key
del bucket['foo']
# deleting empty button
bucket.delete()
# accessing deleted bucket throws exception
exception = False
try:
objectstore.bucket.Bucket('new_bucket')
except:
exception = True
self.assert_(exception)
self.um.delete_user('user1')
self.um.delete_user('user2')
self.um.delete_user('admin_user')
def test_images(self):
try:
self.um.create_user('image_creator')
except: pass
image_user = self.um.get_user('image_creator')
# create a bucket for our bundle
objectstore.bucket.Bucket.create('image_bucket', image_user)
bucket = objectstore.bucket.Bucket('image_bucket')
# upload an image manifest/parts
bundle_path = os.path.join(os.path.dirname(__file__), 'bundle')
for path in glob.glob(bundle_path + '/*'):
bucket[os.path.basename(path)] = open(path, 'rb').read()
# register an image
objectstore.image.Image.create('i-testing', 'image_bucket/1mb.manifest.xml', image_user)
# verify image
my_img = objectstore.image.Image('i-testing')
result_image_file = os.path.join(my_img.path, 'image')
self.assertEqual(os.stat(result_image_file).st_size, 1048576)
sha = hashlib.sha1(open(result_image_file).read()).hexdigest()
self.assertEqual(sha, '3b71f43ff30f4b15b5cd85dd9e95ebc7e84eb5a3')
# verify image permissions
try:
self.um.create_user('new_user')
except: pass
new_user = self.um.get_user('new_user')
self.assert_(my_img.is_authorized(new_user) == False)
self.um.delete_user('new_user')
self.um.delete_user('image_creator')
# class ApiObjectStoreTestCase(test.BaseTestCase):
# def setUp(self):
# super(ApiObjectStoreTestCase, self).setUp()
# FLAGS.fake_users = True
# FLAGS.buckets_path = os.path.join(tempdir, 'buckets')
# FLAGS.images_path = os.path.join(tempdir, 'images')
# FLAGS.ca_path = os.path.join(os.path.dirname(__file__), 'CA')
#
# self.users = users.UserManager.instance()
# self.app = handler.Application(self.users)
#
# self.host = '127.0.0.1'
#
# self.conn = boto.s3.connection.S3Connection(
# aws_access_key_id=user.access,
# aws_secret_access_key=user.secret,
# is_secure=False,
# calling_format=boto.s3.connection.OrdinaryCallingFormat(),
# port=FLAGS.s3_port,
# host=FLAGS.s3_host)
#
# self.mox.StubOutWithMock(self.ec2, 'new_http_connection')
#
# def tearDown(self):
# FLAGS.Reset()
# super(ApiObjectStoreTestCase, self).tearDown()
#
# def test_describe_instances(self):
# self.expect_http()
# self.mox.ReplayAll()
#
# self.assertEqual(self.ec2.get_all_instances(), [])

24
nova/tests/real_flags.py Normal file
View File

@ -0,0 +1,24 @@
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nova import flags
FLAGS = flags.FLAGS
FLAGS.fake_libvirt = False
FLAGS.fake_storage = False
FLAGS.fake_rabbit = False
FLAGS.fake_network = False
FLAGS.fake_users = False
FLAGS.verbose = False

View File

@ -0,0 +1,86 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import StringIO
import time
import unittest
from xml.etree import ElementTree
from nova import vendor
import mox
from tornado import ioloop
from twisted.internet import defer
from nova import exception
from nova import flags
from nova import test
from nova.compute import node
from nova.volume import storage
FLAGS = flags.FLAGS
class StorageTestCase(test.TrialTestCase):
def setUp(self):
logging.getLogger().setLevel(logging.DEBUG)
super(StorageTestCase, self).setUp()
self.mynode = node.Node()
self.mystorage = None
self.flags(fake_libvirt=True,
fake_storage=True,
redis_db=8)
if FLAGS.fake_storage:
self.mystorage = storage.FakeBlockStore()
else:
self.mystorage = storage.BlockStore()
@test.skip_if_fake
def test_run_create_volume(self):
vol_size = '0'
user_id = 'fake'
volume_id = self.mystorage.create_volume(vol_size, user_id)
# rv = self.mystorage.describe_volumes()
# Volumes have to be sorted by timestamp in order to work here...
# TODO(termie): get_volume returns differently than create_volume
self.assertEqual(volume_id,
self.mystorage.get_volume(volume_id)['volume_id'])
rv = self.mystorage.delete_volume(volume_id)
self.assertRaises(exception.Error,
self.mystorage.get_volume,
volume_id)
@test.skip_if_fake
def test_run_attach_detach_volume(self):
# Create one volume and one node to test with
instance_id = "storage-test"
# TODO(joshua) - Redo this test, can't make fake instances this way any more
# rv = self.mynode.run_instance(instance_id)
vol_size = "5"
user_id = "fake"
volume_id = self.mystorage.create_volume(vol_size, user_id)
rv = self.mystorage.attach_volume(volume_id,
instance_id,
"/dev/sdf")
volume_obj = self.mystorage.get_volume(volume_id)
self.assertEqual(volume_obj['status'], "attached")
# TODO(???): assert that it's attached to the right instance
rv = self.mystorage.detach_volume(volume_id)
volume_obj = self.mystorage.get_volume(volume_id)
self.assertEqual(volume_obj['status'], "available")

View File

@ -0,0 +1,137 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import unittest
from nova import vendor
from M2Crypto import BIO
from M2Crypto import RSA
from M2Crypto import X509
from nova import crypto
from nova import flags
from nova import test
from nova import utils
from nova.auth import users
from nova.endpoint import cloud
FLAGS = flags.FLAGS
class UserTestCase(test.BaseTestCase):
def setUp(self):
super(UserTestCase, self).setUp()
self.flags(fake_libvirt=True,
fake_storage=True,
redis_db=8)
self.users = users.UserManager.instance()
def test_001_can_create_user(self):
self.users.create_user('test1', 'access', 'secret')
def test_002_can_get_user(self):
user = self.users.get_user('test1')
def test_003_can_retreive_properties(self):
user = self.users.get_user('test1')
self.assertEqual('test1', user.id)
self.assertEqual('access', user.access)
self.assertEqual('secret', user.secret)
def test_004_signature_is_valid(self):
#self.assertTrue(self.users.authenticate( **boto.generate_url ... ? ? ? ))
pass
#raise NotImplementedError
def test_005_can_get_credentials(self):
return
credentials = self.users.get_user('test1').get_credentials()
self.assertEqual(credentials,
'export EC2_ACCESS_KEY="access"\n' +
'export EC2_SECRET_KEY="secret"\n' +
'export EC2_URL="http://127.0.0.1:8773/services/Cloud"\n' +
'export S3_URL="http://127.0.0.1:3333/"\n' +
'export EC2_USER_ID="test1"\n')
def test_006_test_key_storage(self):
user = self.users.get_user('test1')
user.create_key_pair('public', 'key', 'fingerprint')
key = user.get_key_pair('public')
self.assertEqual('key', key.public_key)
self.assertEqual('fingerprint', key.fingerprint)
def test_007_test_key_generation(self):
user = self.users.get_user('test1')
private_key, fingerprint = user.generate_key_pair('public2')
key = RSA.load_key_string(private_key, callback=lambda: None)
bio = BIO.MemoryBuffer()
public_key = user.get_key_pair('public2').public_key
key.save_pub_key_bio(bio)
converted = crypto.ssl_pub_to_ssh_pub(bio.read())
# assert key fields are equal
print converted
self.assertEqual(public_key.split(" ")[1].strip(),
converted.split(" ")[1].strip())
def test_008_can_list_key_pairs(self):
keys = self.users.get_user('test1').get_key_pairs()
self.assertTrue(filter(lambda k: k.name == 'public', keys))
self.assertTrue(filter(lambda k: k.name == 'public2', keys))
def test_009_can_delete_key_pair(self):
self.users.get_user('test1').delete_key_pair('public')
keys = self.users.get_user('test1').get_key_pairs()
self.assertFalse(filter(lambda k: k.name == 'public', keys))
def test_010_can_list_users(self):
users = self.users.get_users()
self.assertTrue(filter(lambda u: u.id == 'test1', users))
def test_011_can_generate_x509(self):
# MUST HAVE RUN CLOUD SETUP BY NOW
self.cloud = cloud.CloudController()
self.cloud.setup()
private_key, signed_cert_string = self.users.get_user('test1').generate_x509_cert()
logging.debug(signed_cert_string)
# Need to verify that it's signed by the right intermediate CA
full_chain = crypto.fetch_ca(username='test1', chain=True)
int_cert = crypto.fetch_ca(username='test1', chain=False)
cloud_cert = crypto.fetch_ca()
logging.debug("CA chain:\n\n =====\n%s\n\n=====" % full_chain)
signed_cert = X509.load_cert_string(signed_cert_string)
chain_cert = X509.load_cert_string(full_chain)
int_cert = X509.load_cert_string(int_cert)
cloud_cert = X509.load_cert_string(cloud_cert)
self.assertTrue(signed_cert.verify(chain_cert.get_pubkey()))
self.assertTrue(signed_cert.verify(int_cert.get_pubkey()))
if not FLAGS.use_intermediate_ca:
self.assertTrue(signed_cert.verify(cloud_cert.get_pubkey()))
else:
self.assertFalse(signed_cert.verify(cloud_cert.get_pubkey()))
def test_012_can_delete_user(self):
self.users.delete_user('test1')
users = self.users.get_users()
if users != None:
self.assertFalse(filter(lambda u: u.id == 'test1', users))
if __name__ == "__main__":
# TODO: Implement use_fake as an option
unittest.main()

249
nova/twistd.py Normal file
View File

@ -0,0 +1,249 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Twisted daemon helpers, specifically to parse out gFlags from twisted flags,
manage pid files and support syslogging.
"""
import logging
import os
import signal
import sys
import time
import UserDict
import logging.handlers
from nova import vendor
from twisted.scripts import twistd
from twisted.python import log
from twisted.python import reflect
from twisted.python import runtime
from twisted.python import usage
from nova import flags
if runtime.platformType == "win32":
from twisted.scripts._twistw import ServerOptions
else:
from twisted.scripts._twistd_unix import ServerOptions
FLAGS = flags.FLAGS
class TwistdServerOptions(ServerOptions):
def parseArgs(self, *args):
return
def WrapTwistedOptions(wrapped):
class TwistedOptionsToFlags(wrapped):
subCommands = None
def __init__(self):
# NOTE(termie): _data exists because Twisted stuff expects
# to be able to set arbitrary things that are
# not actual flags
self._data = {}
self._flagHandlers = {}
self._paramHandlers = {}
# Absorb the twistd flags into our FLAGS
self._absorbFlags()
self._absorbParameters()
self._absorbHandlers()
super(TwistedOptionsToFlags, self).__init__()
def _absorbFlags(self):
twistd_flags = []
reflect.accumulateClassList(self.__class__, 'optFlags', twistd_flags)
for flag in twistd_flags:
key = flag[0].replace('-', '_')
flags.DEFINE_boolean(key, None, str(flag[-1]))
def _absorbParameters(self):
twistd_params = []
reflect.accumulateClassList(self.__class__, 'optParameters', twistd_params)
for param in twistd_params:
key = param[0].replace('-', '_')
flags.DEFINE_string(key, param[2], str(param[-1]))
def _absorbHandlers(self):
twistd_handlers = {}
reflect.addMethodNamesToDict(self.__class__, twistd_handlers, "opt_")
# NOTE(termie): Much of the following is derived/copied from
# twisted.python.usage with the express purpose of
# providing compatibility
for name in twistd_handlers.keys():
method = getattr(self, 'opt_'+name)
takesArg = not usage.flagFunction(method, name)
doc = getattr(method, '__doc__', None)
if not doc:
doc = 'undocumented'
if not takesArg:
if name not in FLAGS:
flags.DEFINE_boolean(name, None, doc)
self._flagHandlers[name] = method
else:
if name not in FLAGS:
flags.DEFINE_string(name, None, doc)
self._paramHandlers[name] = method
def _doHandlers(self):
for flag, handler in self._flagHandlers.iteritems():
if self[flag]:
handler()
for param, handler in self._paramHandlers.iteritems():
if self[param] is not None:
handler(self[param])
def __str__(self):
return str(FLAGS)
def parseOptions(self, options=None):
if options is None:
options = sys.argv
else:
options.insert(0, '')
args = FLAGS(options)
argv = args[1:]
# ignore subcommands
try:
self.parseArgs(*argv)
except TypeError:
raise usage.UsageError("Wrong number of arguments.")
self.postOptions()
return args
def parseArgs(self, *args):
# TODO(termie): figure out a decent way of dealing with args
#return
super(TwistedOptionsToFlags, self).parseArgs(*args)
def postOptions(self):
self._doHandlers()
super(TwistedOptionsToFlags, self).postOptions()
def __getitem__(self, key):
key = key.replace('-', '_')
try:
return getattr(FLAGS, key)
except (AttributeError, KeyError):
return self._data[key]
def __setitem__(self, key, value):
key = key.replace('-', '_')
try:
return setattr(FLAGS, key, value)
except (AttributeError, KeyError):
self._data[key] = value
return TwistedOptionsToFlags
def stop(pidfile):
"""
Stop the daemon
"""
# Get the pid from the pidfile
try:
pf = file(pidfile,'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if not pid:
message = "pidfile %s does not exist. Daemon not running?\n"
sys.stderr.write(message % pidfile)
return # not an error in a restart
# Try killing the daemon process
try:
while 1:
os.kill(pid, signal.SIGKILL)
time.sleep(0.1)
except OSError, err:
err = str(err)
if err.find("No such process") > 0:
if os.path.exists(pidfile):
os.remove(pidfile)
else:
print str(err)
sys.exit(1)
def serve(filename):
logging.debug("Serving %s" % filename)
name = os.path.basename(filename)
OptionsClass = WrapTwistedOptions(TwistdServerOptions)
options = OptionsClass()
argv = options.parseOptions()
logging.getLogger('amqplib').setLevel(logging.WARN)
FLAGS.python = filename
FLAGS.no_save = True
if not FLAGS.pidfile:
FLAGS.pidfile = '%s.pid' % name
elif FLAGS.pidfile.endswith('twistd.pid'):
FLAGS.pidfile = FLAGS.pidfile.replace('twistd.pid', '%s.pid' % name)
if not FLAGS.logfile:
FLAGS.logfile = '%s.log' % name
action = 'start'
if len(argv) > 1:
action = argv.pop()
if action == 'stop':
stop(FLAGS.pidfile)
sys.exit()
elif action == 'restart':
stop(FLAGS.pidfile)
elif action == 'start':
pass
else:
print 'usage: %s [options] [start|stop|restart]' % argv[0]
sys.exit(1)
formatter = logging.Formatter(
name + '(%(name)s): %(levelname)s %(message)s')
handler = logging.StreamHandler(log.StdioOnnaStick())
handler.setFormatter(formatter)
logging.getLogger().addHandler(handler)
if FLAGS.verbose:
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.getLogger().setLevel(logging.WARNING)
if FLAGS.syslog:
syslog = logging.handlers.SysLogHandler(address='/dev/log')
syslog.setFormatter(formatter)
logging.getLogger().addHandler(syslog)
logging.debug("Full set of FLAGS:")
for flag in FLAGS:
logging.debug("%s : %s" % (flag, FLAGS.get(flag, None)))
twistd.runApp(options)

96
nova/utils.py Normal file
View File

@ -0,0 +1,96 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
System-level utilities and helper functions.
"""
import logging
import socket
import sys
import os.path
import inspect
import subprocess
import random
def fetchfile(url, target):
logging.debug("Fetching %s" % url)
# c = pycurl.Curl()
# fp = open(target, "wb")
# c.setopt(c.URL, url)
# c.setopt(c.WRITEDATA, fp)
# c.perform()
# c.close()
# fp.close()
execute("curl %s -o %s" % (url, target))
def execute(cmd, input=None):
#logging.debug("Running %s" % (cmd))
obj = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
result = None
if input != None:
result = obj.communicate(input)
else:
result = obj.communicate()
obj.stdin.close()
if obj.returncode:
logging.debug("Result was %s" % (obj.returncode))
return result
def abspath(s):
return os.path.join(os.path.dirname(__file__), s)
def default_flagfile(filename='nova.conf'):
for arg in sys.argv:
if arg.find('flagfile') != -1:
break
else:
if not os.path.isabs(filename):
# turn relative filename into an absolute path
script_dir = os.path.dirname(inspect.stack()[-1][1])
filename = os.path.abspath(os.path.join(script_dir, filename))
if os.path.exists(filename):
sys.argv = sys.argv[:1] + ['--flagfile=%s' % filename] + sys.argv[1:]
def debug(arg):
logging.debug('debug in callback: %s', arg)
return arg
def runthis(prompt, cmd):
logging.debug("Running %s" % (cmd))
logging.debug(prompt % (subprocess.call(cmd.split(" "))))
def generate_uid(topic, size=8):
return '%s-%s' % (topic, ''.join([random.choice('01234567890abcdefghijklmnopqrstuvwxyz') for x in xrange(size)]))
def generate_mac():
mac = [0x00, 0x16, 0x3e, random.randint(0x00, 0x7f),
random.randint(0x00, 0xff), random.randint(0x00, 0xff)
]
return ':'.join(map(lambda x: "%02x" % x, mac))
def last_octet(address):
return int(address.split(".")[-1])
def get_my_ip():
''' returns the actual ip of the local machine.
'''
csock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
csock.connect(('www.google.com', 80))
(addr, port) = csock.getsockname()
csock.close()
return addr

43
nova/vendor.py Normal file
View File

@ -0,0 +1,43 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Get our vendor folders into the system path.
"""
import os
import sys
# abspath/__file__/../vendor
VENDOR_PATH = os.path.abspath(
os.path.join(os.path.dirname(os.path.dirname(__file__)), 'vendor'))
if not os.path.exists(VENDOR_PATH):
print 'warning: no vendor libraries included'
else:
paths = [VENDOR_PATH,
os.path.join(VENDOR_PATH, 'pymox'),
os.path.join(VENDOR_PATH, 'tornado'),
os.path.join(VENDOR_PATH, 'python-gflags'),
os.path.join(VENDOR_PATH, 'python-daemon'),
os.path.join(VENDOR_PATH, 'lockfile'),
os.path.join(VENDOR_PATH, 'boto'),
os.path.join(VENDOR_PATH, 'Twisted-10.0.0'),
os.path.join(VENDOR_PATH, 'redis-py'),
]
for p in paths:
if p not in sys.path:
sys.path.insert(0, p)

99
run_tests.py Normal file
View File

@ -0,0 +1,99 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This is our basic test running framework based on Twisted's Trial.
Usage Examples:
# to run all the tests
python run_tests.py
# to run a specific test suite imported here
python run_tests.py NodeConnectionTestCase
# to run a specific test imported here
python run_tests.py NodeConnectionTestCase.test_reboot
# to run some test suites elsewhere
python run_tests.py nova.tests.node_unittest
python run_tests.py nova.tests.node_unittest.NodeConnectionTestCase
Due to our use of multiprocessing it we frequently get some ignorable
'Interrupted system call' exceptions after test completion.
"""
import __main__
import sys
from nova import vendor
from twisted.scripts import trial as trial_script
from nova import flags
from nova import twistd
from nova.tests.access_unittest import *
from nova.tests.api_unittest import *
from nova.tests.cloud_unittest import *
from nova.tests.keeper_unittest import *
from nova.tests.network_unittest import *
from nova.tests.node_unittest import *
from nova.tests.objectstore_unittest import *
from nova.tests.storage_unittest import *
from nova.tests.users_unittest import *
from nova.tests.datastore_unittest import *
FLAGS = flags.FLAGS
if __name__ == '__main__':
OptionsClass = twistd.WrapTwistedOptions(trial_script.Options)
config = OptionsClass()
argv = config.parseOptions()
FLAGS.verbose = True
# TODO(termie): these should make a call instead of doing work on import
if FLAGS.fake_tests:
from nova.tests.fake_flags import *
else:
from nova.tests.real_flags import *
if len(argv) == 1 and len(config['tests']) == 0:
# If no tests were specified run the ones imported in this file
# NOTE(termie): "tests" is not a flag, just some Trial related stuff
config['tests'].update(['__main__'])
elif len(config['tests']):
# If we specified tests check first whether they are in __main__
for arg in config['tests']:
key = arg.split('.')[0]
if hasattr(__main__, key):
config['tests'].remove(arg)
config['tests'].add('__main__.%s' % arg)
trial_script._initialDebugSetup(config)
trialRunner = trial_script._makeRunner(config)
suite = trial_script._getSuite(config)
if config['until-failure']:
test_result = trialRunner.runUntilFailure(suite)
else:
test_result = trialRunner.run(suite)
if config.tracer:
sys.settrace(None)
results = config.tracer.results()
results.write_results(show_missing=1, summary=False,
coverdir=config.coverdir)
sys.exit(not test_result.wasSuccessful())

32
setup.py Normal file
View File

@ -0,0 +1,32 @@
#!/usr/bin/env python
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
import sys
from setuptools import setup, find_packages
srcdir = os.path.join(os.path.dirname(sys.argv[0]), 'src')
setup(name='nova',
version='0.3.0',
description='None Other, Vaguely Awesome',
author='nova-core',
author_email='nova-core@googlegroups.com',
url='http://novacc.org/',
packages = find_packages(),
)

1304
vendor/IPy.py vendored Normal file

File diff suppressed because it is too large Load Diff

32
vendor/Twisted-10.0.0/INSTALL vendored Normal file
View File

@ -0,0 +1,32 @@
Requirements
Python 2.4, 2.5 or 2.6.
Zope Interfaces 3.0.1 (http://zope.org/Products/ZopeInterface) - if
you have ZopeX3 (at least version 3.0.0c1) installed that should
work too.
On Windows pywin32 is recommended (this is built in to ActivePython,
so no need to reinstall if you use it instead of standard Python)
http://sourceforge.net/project/showfiles.php?group_id=78018
The Windows IOCP reactor requires pywin32 build 205 or later.
If you would like to use Trial's subunit reporter, then you will need to
install Subunit 0.0.2 or later (https://launchpad.net/subunit).
Installation
* Debian and Ubuntu
Packages are included in the main distribution.
* FreeBSD, Gentoo
Twisted is in their package repositories.
* Win32
EXEs are available from http://twistedmatrix.com/
* Other
As with other Python packages, the standard way of installing from source
is:
python setup.py install

57
vendor/Twisted-10.0.0/LICENSE vendored Normal file
View File

@ -0,0 +1,57 @@
Copyright (c) 2001-2010
Allen Short
Andy Gayton
Andrew Bennetts
Antoine Pitrou
Apple Computer, Inc.
Benjamin Bruheim
Bob Ippolito
Canonical Limited
Christopher Armstrong
David Reid
Donovan Preston
Eric Mangold
Eyal Lotem
Itamar Shtull-Trauring
James Knight
Jason A. Mobarak
Jean-Paul Calderone
Jessica McKellar
Jonathan Jacobs
Jonathan Lange
Jonathan D. Simms
Jürgen Hermann
Kevin Horn
Kevin Turner
Mary Gardiner
Matthew Lefkowitz
Massachusetts Institute of Technology
Moshe Zadka
Paul Swartz
Pavel Pergamenshchik
Ralph Meijer
Sean Riley
Software Freedom Conservancy
Travis B. Hartwell
Thijs Triemstra
Thomas Herve
Timothy Allen
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

1416
vendor/Twisted-10.0.0/NEWS vendored Normal file

File diff suppressed because it is too large Load Diff

118
vendor/Twisted-10.0.0/README vendored Normal file
View File

@ -0,0 +1,118 @@
Twisted 10.0.0
Quote of the Release:
[on picking the quote of the release]
<glyph> Man, we're going to have to get a lot funnier if we're going
to do time-based releases
For information on what's new in Twisted 10.0.0, see the NEWS file that comes
with the distribution.
What is this?
=============
Twisted is an event-based framework for internet applications which works on
Python 2.4 through 2.6. The following are some of the modules included
with Twisted::
- twisted.application
A "Service" system that allows you to organize your application in
hierarchies with well-defined startup and dependency semantics,
- twisted.cred
A general credentials and authentication system that facilitates
pluggable authentication backends,
- twisted.enterprise
Asynchronous database access, compatible with any Python DBAPI2.0
modules,
- twisted.internet
Low-level asynchronous networking APIs that allow you to define
your own protocols that run over certain transports,
- twisted.manhole
A tool for remote debugging of your services which gives you a
Python interactive interpreter,
- twisted.protocols
Basic protocol implementations and helpers for your own protocol
implementations,
- twisted.python
A large set of utilities for Python tricks, reflection, text
processing, and anything else,
- twisted.spread
A secure, fast remote object system,
- twisted.trial
A unit testing framework that integrates well with Twisted-based code.
Twisted supports integration of the Tk, GTK+, GTK+ 2, Qt, Mac OS X,
or wxPython event loop with its main event loop. The Win32 event
loop is also supported.
For more information, visit http://www.twistedmatrix.com, or join the list
at http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-python
There are many official Twisted subprojects, including clients and
servers for web, mail, DNS, and more. You can find out more about
these projects at http://twistedmatrix.com/trac/wiki/TwistedProjects
Installing
==========
Instructions for installing this software are in INSTALL.
Unit Tests
==========
See our unit tests run proving that the software is BugFree(TM)::
% trial twisted
Some of these tests may fail if you
* don't have the dependancies required for a particular subsystem installed,
* have a firewall blocking some ports (or things like Multicast, which Linux
NAT has shown itself to do), or
* run them as root.
Documentation and Support
=========================
Examples on how to use Twisted APIs are located in doc/examples;
this might ease the learning curve a little bit, since all these
files are kept as short as possible. The file doc/howto/index.xhtml
contains an index of all the HOWTOs: this should be your starting
point when looking for documentation.
Help is available on the Twisted mailing list::
http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-python
There is also a very lively IRC channel, #twisted, on
irc.freenode.net.
Copyright
=========
All of the code in this distribution is Copyright (c) 2001-2010
Twisted Matrix Laboratories.
Twisted is made available under the MIT license. The included
LICENSE file describes this in detail.
Warranty
========
THIS SOFTWARE IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER
EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
TO THE USE OF THIS SOFTWARE IS WITH YOU.
IN NO EVENT WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
AND/OR REDISTRIBUTE THE LIBRARY, BE LIABLE TO YOU FOR ANY DAMAGES, EVEN IF
SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
DAMAGES.
Again, see the included LICENSE file for specific legal details.

Some files were not shown because too many files have changed in this diff Show More