REST branch

Change-Id: If4f9d153ca2c3ac8dd728f116817dbfe497d7fd1
This commit is contained in:
Steve Loranz 2013-06-24 16:36:48 -05:00
parent e950d6a686
commit 6a3b561c5d
36 changed files with 2372 additions and 286 deletions

View File

@ -1,278 +0,0 @@
#!/usr/bin/python
#
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import os.path
import sys
import shutil
import argparse
from tempfile import mkdtemp, NamedTemporaryFile, TemporaryFile
from image_utils import *
parser = argparse.ArgumentParser(description='Launch and snapshot a kickstart install using syslinux and Glance')
ospar=parser.add_argument_group('OpenStack Enviornment')
ospar.add_argument('--auth-url', dest='auth_url', required=True,
help='URL for keystone authorization')
ospar.add_argument('--username', dest='username', required=True,
help='username for keystone authorization')
ospar.add_argument('--tenant', dest='tenant', required=True,
help='tenant for keystone authorization')
ospar.add_argument('--password', dest='password', required=True,
help='password for keystone authorization')
ospar.add_argument('--glance-url', dest='glance_url', required=True,
help='URL for glance service')
install_media_desc="""When one of these arguments is given the install environment will contain a second
block device. The image presented on this device can come from a URL, a file or
a pre-existing volume snapshot. You may only use one of these options at a time
and you can only use them in conjunction with the 'create-volume' option."""
install_media = parser.add_argument_group('Install Media', install_media_desc)
install_media.add_argument('--install-media-url', dest='install_media_url',
help='Add an install media device using content at this URL')
install_media.add_argument('--install-media-file', dest='install_media_file',
help='Add an install media device using this file as a media image')
install_media.add_argument('--install-media-snapshot', dest='install_media_snapshot',
help='Add an install media device by creating a volume from this snapshot id')
instpar = parser.add_argument_group('Installation Parameters')
instpar.add_argument('--root-password', dest='admin_password', required=True,
help='root password for the resulting image - also used for optional remote access during install')
instpar.add_argument('--create-volume', dest='create_volume', action='store_true', default=False,
help='Create a volume snapshot instead of the default Glance snapshot (optional)')
instpar.add_argument('--install-volume-size', dest='install_volume_size', default=10,
help='Size of the install destination volume in GB (default: 10)')
instpar.add_argument('--install-tree-url', dest='install_tree_url',
help='URL for preferred network install tree (optional)')
instpar.add_argument('--distro', dest='distro',
help='distro - must be "rpm" or "ubuntu" (optional)')
instpar.add_argument('--image-name', dest='image_name',
help='name to assign newly created image (optional)')
instpar.add_argument('--leave-mess', dest='leave_mess', action='store_true', default=False,
help='Do not clean up local or remote artifacts when finished or when an error is encountered')
parser.add_argument('ks_file',
help='kickstart/install-script file to use for install')
args = parser.parse_args()
# This is a string
working_kickstart = do_pw_sub(args.ks_file, args.admin_password)
distro = detect_distro(working_kickstart)
if args.distro:
# Allow the command line distro to override our guess above
distro = args.distro
(install_tree_url, console_password, console_command, poweroff) = install_extract_bits(working_kickstart, distro)
if args.install_tree_url:
# Allow the specified tree to override anything extracted above
install_tree_url = args.install_tree_url
if args.image_name:
image_name = args.image_name
else:
image_name = "Image from ks file: %s - Date: %s" % (os.path.basename(args.ks_file), strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime()))
# Let's be nice and report as many error conditions as we can before exiting
error = False
if (args.install_media_url or args.install_media_file or args.install_media_snapshot) and not args.create_volume:
print "ERROR: You can only use install media when creating a volume snapshot image using the --create-volume option."
error = True
if (args.install_media_url and args.install_media_file) or (args.install_media_file and args.install_media_snapshot) or \
(args.install_media_url and args.install_media_snapshot):
print "ERROR: You may only specify a single install media source"
error = True
if not install_tree_url:
print "ERROR: no install tree URL specified and could not extract one from the kickstart/install-script"
error = True
if not distro:
print "ERROR: no distro specified and could not guess based on the kickstart/install-script"
error = True
if not poweroff:
if distro == "rpm":
print "ERROR: supplied kickstart file must contain a 'poweroff' line"
elif distro == "ubuntu":
print "ERROR: supplied preseed must contain a 'd-i debian-installer/exit/poweroff boolean true' line"
error = True
if error:
sys.exit(1)
# We start creating artifacts here - cleanup in finally
modified_image = None # filename
tmp_content_dir = None # directory
install_image = None # Nova image object
install_media_volume=None # cinder volume object
install_media_snapshot_id=None # UUID string
installed_instance = None # Nova instance object
finished = False # silly marker
retcode = 0
try:
# Artifact of borrowing factory code - pass this as a dict
creds = { 'username': args.username, 'tenant': args.tenant, 'password': args.password, 'auth_url': args.auth_url }
# Generate "blank" syslinux bootable mini-image
# This is the only step that strictly requires root access due to the need
# for a loopback mount to install the bootloader
generate_blank_syslinux()
# Take a copy of it
if args.create_volume:
disk_format = 'raw'
modified_image = "./syslinux_modified_%s.raw" % os.getpid()
try:
subprocess_check_output(["qemu-img","convert","-O","raw","./syslinux.qcow2",modified_image])
except:
print "Exception while converting image to raw"
raise
else:
disk_format = 'qcow2'
modified_image = "./syslinux_modified_%s.qcow2" % os.getpid()
shutil.copy("./syslinux.qcow2",modified_image)
# Generate the content to put into the image
tmp_content_dir = mkdtemp()
print "Collecting boot content for auto-install image"
generate_boot_content(install_tree_url, tmp_content_dir, distro, args.create_volume)
# Copy in the kernel, initrd and conf files into the blank boot stub using libguestfs
print "Copying boot content into a bootable syslinux image"
copy_content_to_image(tmp_content_dir, modified_image)
# Upload the resulting image to glance
print "Uploading image to glance"
install_image = glance_upload(image_filename = modified_image, image_url = None, creds = creds, glance_url = args.glance_url,
name = "INSTALL for: %s" % (image_name), disk_format=disk_format)
print "Uploaded successfully as glance image (%s)" % (install_image.id)
install_volume=None
# TODO: Make volume size configurable
if args.create_volume:
print "Converting Glance install image to a Cinder volume"
install_volume = volume_from_image(install_image.id, creds, args.glance_url, volume_size = args.install_volume_size)
if args.install_media_url or args.install_media_file:
if args.install_media_url:
print "Generating Glance image from URL: %s" % (args.install_media_url)
install_media_image = glance_upload(image_filename = None, image_url = args.install_media_url,
creds = creds, glance_url = args.glance_url, name = "FromURL: %s" % (args.install_media_url),
disk_format='raw')
else:
print "Generating Glance image from file: %s" % (args.install_media_file)
install_media_image = glance_upload(image_filename = args.install_media_file, image_url = None,
creds = creds, glance_url = args.glance_url, name = os.path.basename(args.install_media_file),
disk_format='raw')
print "Generating volume from image (%s)" % (install_media_image.id)
install_media_volume = volume_from_image(install_media_image.id, creds, args.glance_url)
print "Generating snapshot of volume (%s) to allow install media reuse" % (install_media_volume.id)
install_media_snapshot = snapshot_from_volume(install_media_volume.id, creds)
install_media_snapshot_id = install_media_snapshot.id
print "#### Future installs can reference this snapshot with the following argument:"
print " --install-media-snapshot %s" % install_media_snapshot_id
elif args.install_media_snapshot:
print "Generating working volume from snapshot (%s)" % (args.install_media_snapshot)
install_media_snapshot_id = args.install_media_snapshot
install_media_volume = volume_from_snapshot(args.install_media_snapshot, creds)
# Launch the image with the provided ks.cfg as the user data
# Optionally - spawn a vncviewer to watch the install graphically
# Poll on image status until it is SHUTDOWN or timeout
print "Launching install image"
installed_instance = launch_and_wait(install_image, install_volume, install_media_volume, working_kickstart,
os.path.basename(args.ks_file), creds, console_password, console_command)
# Take a snapshot of the now safely shutdown image
# For volume snapshots we must terminate the instance first then snapshot
# For glance/image snapshots we must _not_ terminate the instance until the snapshot is complete
print "Taking snapshot of completed install"
if args.create_volume:
print "Terminating instance (%s) in preparation for taking a snapshot of the root volume" % (installed_instance.id)
terminate_instance(installed_instance.id, creds)
installed_instance = None
finished_image_snapshot = snapshot_from_volume(install_volume.id, creds)
print "Volume-based image available from snapshot ID: %s" % (finished_image_snapshot.id)
print "Finished snapshot name is: %s" % (finished_image_snapshot.display_name)
finished = True
else:
finished_image_id = installed_instance.create_image(image_name)
print "Waiting for glance image snapshot to complete"
wait_for_glance_snapshot(finished_image_id, creds, args.glance_url)
print "Terminating instance (%s) now that snapshot is complete" % (installed_instance.id)
terminate_instance(installed_instance.id, creds)
installed_instance = None
print "Finished image snapshot ID is: %s" % (finished_image_id)
print "Finished image name is: %s" % (image_name)
finished = True
except Exception as e:
print "Uncaught exception encountered during install"
print str(e)
retcode = 1
finally:
if args.leave_mess:
print "Leaving a mess - this includes local files, local dirs, remote images, remote volumes and remote snapshots"
sys.exit(retcode)
print "Cleaning up"
try:
if tmp_content_dir:
print "Removing boot content dir"
shutil.rmtree(tmp_content_dir)
if modified_image:
print "Removing install image %s" % (modified_image)
#TODO:Note that thie is actually cacheable on a per-os-version basis
os.remove(modified_image)
if installed_instance:
# Note that under normal operation this is terminated when completing the snapshot process
print "Terminating install instance (%s)" % (installed_instance.id)
terminate_instance(installed_instance.id, creds)
if install_image:
print "Deleting Glance image (%s) used to launch install" % (install_image.id)
install_image.delete()
if install_media_volume:
print "Removing working volume containing install media"
print "Snapshot (%s) remains available for future use" % (install_media_snapshot_id)
install_media_volume.delete()
except:
print "WARNING: Exception while attempting to clean up - we may have left a mess"
retcode = 1
# For usability - reprint the most important bits from above as the last output
if finished:
print "FINISHED!"
print
print "Image Details:"
if args.create_volume:
print "Volume snapshot name: %s" % (finished_image_snapshot.display_name)
print "ID: %s" % (finished_image_snapshot.id)
else:
print "Glance image name: %s" % (image_name)
print "ID: %s" % (finished_image_id)
sys.exit(retcode)

View File

@ -0,0 +1,5 @@
[DEFAULT]
host=0.0.0.0
port=1235
persistence_backend=SQLAlquemy

24
imagebuilder-api Executable file
View File

@ -0,0 +1,24 @@
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright © 2012 New Dream Network, LLC (DreamHost)
#
# Author: Doug Hellmann <doug.hellmann@dreamhost.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from imagebuilder.api import app
if __name__ == '__main__':
app.start()

View File

@ -0,0 +1,124 @@
# encoding: utf-8
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import pymongo
DB_NAME = "imagebuilder_db"
COLLECTION_NAME = "imagebuilder_collection"
class MongoPersistentBuildManager(object):
""" TODO: Docstring for PersistentBuildManager """
def __init__(self):
self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__))
self.con = pymongo.Connection()
self.db = self.con[DB_NAME]
self.collection = self.db[COLLECTION_NAME]
def all_builds(self):
try:
builds = self.builds_from_query(None)
except Exception as e:
self.log.exception('Failure listing builds: %s' % e)
return builds
def build_with_id(self, build_id):
"""
TODO: Docstring for build_with_id
@param build_id TODO
@return TODO
"""
try:
# build = self._builds_from_query({"_id": ObjectId(build_id)})
build = self.builds_from_query({"identifier": build_id})[0]
except Exception as e:
self.log.debug('Exception caught: %s' % e)
return None
return build
def add_build(self, build):
"""
Add a PersistentBuild-type object to this PersistentBuildManager
This should only be called with an build that has not yet been added to the store.
To retrieve a previously persisted build use build_with_id() or build_query()
@param build TODO
@return TODO
"""
if 'identifier' in build:
metadata = self.collection.find_one({"_id": build['identifier']})
if metadata:
raise Exception("Image %s already managed, use build_with_id() and save_build()" %
(build['identifier']))
return self._save_build(build)
def save_build(self, build):
"""
TODO: Docstring for save_build
@param build TODO
@return TODO
"""
build_id = str(build['identifier'])
metadata = self._builds_from_mongo_cursor(self.collection.find_one({"_id": build_id}))
if not metadata:
raise Exception('Image %s not managed, use "add_build()" first.' % build_id)
self._save_build(build)
def _save_build(self, build):
try:
self.collection.insert(build)
self.log.debug("Saved metadata for build (%s)" % (build['identifier']))
return build['identifier']
except Exception as e:
self.log.debug('Exception caught: %s' % e)
raise Exception('Unable to save build metadata: %s' % e)
def delete_build_with_id(self, build_id):
"""
TODO: Docstring for delete_build_with_id
@param build_id TODO
@return TODO
"""
try:
self.collection.remove(build_id)
except Exception as e:
self.log.warn('Unable to remove record: %s' % e)
def builds_from_query(self, query):
mongo_cursor = self.collection.find(query)
builds = self._builds_from_mongo_cursor(mongo_cursor)
return builds
def _builds_from_mongo_cursor(self, mongo_cursor):
builds = []
for build in mongo_cursor:
build_dict = {}
for k, v in build.items():
build_dict[k.__str__()] = v.__str__()
builds.append(build_dict)
return builds

View File

@ -0,0 +1,119 @@
#
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine
import logging
Base = declarative_base()
engine = create_engine('sqlite:///imagebuilder.db', echo=True)
Session = sessionmaker(bind=engine)
class Build(Base):
__tablename__ = 'imagebuilder_builds'
id = Column(String, primary_key=True)
status = Column(String)
name = Column(String)
glance_id = Column(String)
cinder_id = Column(String)
nova_id = Column(String)
def __init__(self, id, name):
self.id = id
self.name = name
def __repr__(self):
return "<Build('%s','%s')>" % (self.name, self.id)
class SQLAlchemyPersistentBuildManager(object):
""" TODO: Docstring for PersistentBuildManager """
def __init__(self):
self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__))
self.session = Session()
def build_with_id(self, build_id):
"""
TODO: Docstring for build_with_id
@param build_id TODO
@return TODO
"""
build = self.session.query(Build).filter_by(id=build_id)
return self._builds_from_iterative(build)
def add_build(self, build):
"""
Add a PersistentBuild-type object to this PersistenBuildManager
This should only be called with an build that has not yet been added to the store.
To retrieve a previously persisted build use build_with_id() or build_query()
@param build TODO
@return TODO
"""
return self._save_build(build)
def save_build(self, build):
"""
TODO: Docstring for save_build
@param build TODO
@return TODO
"""
self._save_build(build)
def _save_build(self, build):
try:
b = Build(build['id'], build['name'])
b.status = build['state']
self.session.add(b)
self.session.commit()
self.log.debug("Saved metadata for build (%s)" % (b))
return b.id
except Exception as e:
self.log.debug('Exception caught: %s' % e)
raise Exception('Unable to save build metadata: %s' % e)
def all_builds(self):
builds = self.session.query(Build).all()
return self._builds_from_iterative(builds)
def builds_from_query(self, query):
if not query:
return self.all_builds()
def _builds_from_iterative(self, iterative):
builds = []
for build in iterative:
build_dict = {}
build_dict['id'] = build.id
build_dict['name'] = build.name
build_dict['status'] = build.status
build_dict['glance_id'] = build.glance_id
build_dict['cinder_id'] = build.cinder_id
build_dict['nova_id'] = build.cinder_id
builds.append(build_dict)
return builds
Base.metadata.create_all(engine)

15
imagebuilder/__init__.py Normal file
View File

@ -0,0 +1,15 @@
#
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

28
imagebuilder/api/README Normal file
View File

@ -0,0 +1,28 @@
WARNING:
-------
THIS IS STILL VERY BUGGY!
1) The response on the POST doesn't work with every client.
2) The responses are all str instead of real objects.
REQUIREMENTS:
------------
Requires pecan and wsme
On Fedora, install the packages python-pecan and python-wsme
RUNNING:
-------
For now, run the following:
image-building-poc% pecan serve openstack-imagebuilder/api/config.py
- Once the server starts, use HTTPie or curl to POST to /osib/v1/builds
ex
http --json POST localhost:8080/osib/v1/builds
- Now you can GET that object using the '_id'
ex
http --json localhost:8080/osib/v1/builds/51a9293ff731080a5ac2a24b

View File

@ -0,0 +1,15 @@
#
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

75
imagebuilder/api/app.py Normal file
View File

@ -0,0 +1,75 @@
#
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import logging
import pecan
from imagebuilder import service
from imagebuilder.api import config as pecan_config
from imagebuilder.openstack.common import log
from oslo.config import cfg
from wsgiref import simple_server
def get_pecan_config():
# Set up the pecan configuration
filename = pecan_config.__file__.replace('.pyc', '.py')
return pecan.configuration.conf_from_file(filename)
def setup_app(config):
if not config:
config = get_pecan_config()
pecan.configuration.set_config(dict(config), overwrite=True)
return pecan.make_app(
config.app['root'],
static_root=config.app['static_root'],
template_path=config.app['template_path'],
debug=cfg.CONF.debug,
force_canonical=getattr(config.app, 'force_canonical', True),
)
def start():
# Parse OpenStack config file and command line options, then
# configure logging.
service.prepare_service(sys.argv)
# Build the WSGI app
host, port = cfg.CONF['host'], cfg.CONF['port']
srvr_config = get_pecan_config()
srvr_config['server']['host'] = host
srvr_config['server']['port'] = port
root = setup_app(srvr_config)
# Create the WSGI server and start it
srvr = simple_server.make_server(host, port, root)
LOG = log.getLogger(__name__)
LOG.info('Starting server in PID %s' % os.getpid())
LOG.info("Configuration:")
cfg.CONF.log_opt_values(LOG, logging.INFO)
if host == '0.0.0.0':
LOG.info('serving on 0.0.0.0:%s, view at http://127.0.0.1:%s' %
(port, port))
else:
LOG.info("serving on http://%s:%s" % (host, port))
try:
srvr.serve_forever()
except KeyboardInterrupt:
# allow CTRL+C to shutdown without an error
LOG.info("Shutting down...")

View File

@ -0,0 +1,60 @@
#
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Server Specific Configurations
server = {
'port': 8080,
'host': '0.0.0.0'
}
# Pecan Application Configurations
app = {
'root': 'imagebuilder.api.controllers.RootController',
'modules': ['imagebuilder.api'],
'static_root': '%(confdir)s/public',
'template_path': '%(confdir)s/api/templates',
'debug': True,
'errors': {
404: '/error/404',
'__force_dict__': True
}
}
logging = {
'loggers': {
'root': {'level': 'INFO', 'handlers': ['console']},
'osib': {'level': 'DEBUG', 'handlers': ['console']}
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
}
},
'formatters': {
'simple': {
'format': ('%(asctime)s %(levelname)-5.5s [%(name)s]'
'[%(threadName)s] %(message)s')
}
}
}
# Custom Configurations must be in Python dictionary format::
#
# foo = {'bar':'baz'}
#
# All configurations are accessible at::
# pecan.conf

View File

@ -0,0 +1,22 @@
#
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pecan.rest import RestController
from osib import OSIB
class RootController(RestController):
osib = OSIB()

View File

@ -0,0 +1,22 @@
#
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pecan.rest import RestController
from v1 import V1Controller
class OSIB(RestController):
v1 = V1Controller()

View File

@ -0,0 +1,72 @@
#
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pecan.rest import RestController
from wsmeext.pecan import wsexpose as expose
from wsme import types as wtypes
from imagebuilder.MongoPersistentBuildManager import MongoPersistentBuildManager
from uuid import uuid4 as uuid
class Build(object):
identifier = wtypes.text
status = wtypes.text
name = wtypes.text
glance_id = wtypes.text
cinder_id = wtypes.text
nova_id = wtypes.text
def __init__(self, props={}):
for k in props.keys():
setattr(self, k, props[k])
class BuildController(RestController):
def __init__(self):
self.pim = MongoPersistentBuildManager()
# RESOURCE PATH: [GET] /osib/v1/builds
@expose([Build])
def get_all(self):
builds = []
for item in self.pim.all_builds():
builds.append(Build(item))
return builds
# RESOURCE PATH: [GET] /osib/v1/builds/:uuid
@expose(Build, wtypes.text)
def get_one(self, build_id):
data = self.pim.build_with_id(build_id)
return Build(data)
# RESOURCE PATH: [POST] /osib/v1/builds
@expose(Build)
def post(self):
build = {'identifier': str(uuid())}
self.pim.add_build(build)
return Build(build)
# RESOURCE PATH: [PUT] /osib/v1/builds/:uuid
@expose(Build, wtypes.text, wtypes.text)
def put(self, build_id, build_updates):
build = self.pim.build_with_id(build_id)
build.update(build_updates)
self.pim.save_build(build)
return Build(build)
# RESOURCE PATH: [DELETE] /osib/v1/builds/:uuid
@expose(wtypes.text)
def delete(self, build_id):
self.pim.delete_build_with_id(build_id)

View File

@ -0,0 +1,21 @@
#
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pecan.rest import RestController
from Builds import BuildController
class V1Controller(RestController):
builds = BuildController()

View File

@ -0,0 +1,12 @@
<%inherit file="layout.html" />
## provide definitions for blocks we want to redefine
<%def name="title()">
Server Error ${status}
</%def>
## now define the body of the template
<header>
<h1>Server Error ${status}</h1>
</header>
<p>${message}</p>

View File

@ -0,0 +1,34 @@
<%inherit file="layout.html" />
## provide definitions for blocks we want to redefine
<%def name="title()">
Welcome to Pecan!
</%def>
## now define the body of the template
<header>
<h1><img src="/images/logo.png" /></h1>
</header>
<div id="content">
<p>This is a sample Pecan project.</p>
<p>
Instructions for getting started can be found online at <a
href="http://pecanpy.org" target="window">pecanpy.org</a>
</p>
<p>
...or you can search the documentation here:
</p>
<form method="POST" action="/">
<fieldset>
<input name="q" />
<input type="submit" value="Search" />
<fieldset>
<small>Enter search terms or a module, class or function name.</small>
</form>
</div>

View File

@ -0,0 +1,22 @@
<html>
<head>
<title>${self.title()}</title>
${self.style()}
${self.javascript()}
</head>
<body>
${self.body()}
</body>
</html>
<%def name="title()">
Default Title
</%def>
<%def name="style()">
<link rel="stylesheet" type="text/css" media="screen" href="/css/style.css" />
</%def>
<%def name="javascript()">
<script language="text/javascript" src="/javascript/shared.js"></script>
</%def>

View File

@ -0,0 +1,22 @@
import os
from unittest import TestCase
from pecan import set_config
from pecan.testing import load_test_app
__all__ = ['FunctionalTest']
class FunctionalTest(TestCase):
"""
Used for functional tests where you need to test your
literal application and its integration with the framework.
"""
def setUp(self):
self.app = load_test_app(os.path.join(
os.path.dirname(__file__),
'config.py'
))
def tearDown(self):
set_config({}, overwrite=True)

View File

@ -0,0 +1,25 @@
# Server Specific Configurations
server = {
'port': '8080',
'host': '0.0.0.0'
}
# Pecan Application Configurations
app = {
'root': 'imagebuilder.api.controllers.RootController',
'modules': ['imagebuilder.api'],
'static_root': '%(confdir)s/../../public',
'template_path': '%(confdir)s/../templates',
'debug': True,
'errors': {
'404': '/error/404',
'__force_dict__': True
}
}
# Custom Configurations must be in Python dictionary format::
#
# foo = {'bar':'baz'}
#
# All configurations are accessible at::
# pecan.conf

View File

@ -0,0 +1,22 @@
from unittest import TestCase
from webtest import TestApp
from pdiddy.tests import FunctionalTest
class TestRootController(FunctionalTest):
def test_get(self):
response = self.app.get('/')
assert response.status_int == 200
def test_search(self):
response = self.app.post('/', params={'q': 'RestController'})
assert response.status_int == 302
assert response.headers['Location'] == (
'http://pecan.readthedocs.org/en/latest/search.html'
'?q=RestController'
)
def test_get_not_found(self):
response = self.app.get('/a/bogus/url', expect_errors=True)
assert response.status_int == 404

View File

@ -0,0 +1,7 @@
from unittest import TestCase
class TestUnits(TestCase):
def test_units(self):
assert 5 * 5 == 25

274
imagebuilder/create_image.py Executable file
View File

@ -0,0 +1,274 @@
#!/usr/bin/python
#
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import shutil
import argparse
from tempfile import mkdtemp
from time import strftime, gmtime
from image_utils import *
def get_cli_arguments():
parser = argparse.ArgumentParser(description='Launch and snapshot a kickstart install using syslinux and Glance')
ospar = parser.add_argument_group('OpenStack Enviornment')
ospar.add_argument('--auth-url', dest='auth_url', required=True, help='URL for keystone authorization')
ospar.add_argument('--username', dest='username', required=True, help='username for keystone authorization')
ospar.add_argument('--tenant', dest='tenant', required=True, help='tenant for keystone authorization')
ospar.add_argument('--password', dest='password', required=True, help='password for keystone authorization')
ospar.add_argument('--glance-url', dest='glance_url', required=True, help='URL for glance service')
install_media_desc="""When one of these arguments is given the install environment will contain a second
block device. The image presented on this device can come from a URL, a file or
a pre-existing volume snapshot. You may only use one of these options at a time
and you can only use them in conjunction with the 'create-volume' option."""
install_media = parser.add_argument_group('Install Media', install_media_desc)
install_media.add_argument('--install-media-url', dest='install_media_url',
help='Add an install media device using content at this URL')
install_media.add_argument('--install-media-file', dest='install_media_file',
help='Add an install media device using this file as a media image')
install_media.add_argument('--install-media-snapshot', dest='install_media_snapshot',
help='Add an install media device by creating a volume from this snapshot id')
instpar = parser.add_argument_group('Installation Parameters')
instpar.add_argument('--root-password', dest='admin_password', required=True,
help='root password for the resulting image - also used for optional remote access during install')
instpar.add_argument('--create-volume', dest='create_volume', action='store_true', default=False,
help='Create a volume snapshot instead of the default Glance snapshot (optional)')
instpar.add_argument('--install-volume-size', dest='install_volume_size', default=10,
help='Size of the install destination volume in GB (default: 10)')
instpar.add_argument('--install-tree-url', dest='install_tree_url',
help='URL for preferred network install tree (optional)')
instpar.add_argument('--distro', dest='distro', help='distro - must be "rpm" or "ubuntu" (optional)')
instpar.add_argument('--image-name', dest='image_name', help='name to assign newly created image (optional)')
instpar.add_argument('--leave-mess', dest='leave_mess', action='store_true', default=False,
help='Do not clean up local or remote artifacts when finished or when an error is encountered')
parser.add_argument('ks_file', help='kickstart/install-script file to use for install')
return parser.parse_args()
def create_image(args):
# This is a string
working_kickstart = do_pw_sub(args.ks_file, args.admin_password)
distro = detect_distro(working_kickstart)
if args.distro:
# Allow the command line distro to override our guess above
distro = args.distro
(install_tree_url, console_password, console_command, poweroff) = install_extract_bits(working_kickstart, distro)
if args.install_tree_url:
# Allow the specified tree to override anything extracted above
install_tree_url = args.install_tree_url
if args.image_name:
image_name = args.image_name
else:
image_name = "Image from ks file: %s - Date: %s" % (os.path.basename(args.ks_file), strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime()))
# Let's be nice and report as many error conditions as we can before exiting
error = False
if (args.install_media_url or args.install_media_file or args.install_media_snapshot) and not args.create_volume:
print "ERROR: You can only use install media when creating a volume snapshot image using the --create-volume option."
error = True
if (args.install_media_url and args.install_media_file) or (args.install_media_file and args.install_media_snapshot) or \
(args.install_media_url and args.install_media_snapshot):
print "ERROR: You may only specify a single install media source"
error = True
if not install_tree_url:
print "ERROR: no install tree URL specified and could not extract one from the kickstart/install-script"
error = True
if not distro:
print "ERROR: no distro specified and could not guess based on the kickstart/install-script"
error = True
if not poweroff:
if distro == "rpm":
print "ERROR: supplied kickstart file must contain a 'poweroff' line"
elif distro == "ubuntu":
print "ERROR: supplied preseed must contain a 'd-i debian-installer/exit/poweroff boolean true' line"
error = True
if error:
sys.exit(1)
# We start creating artifacts here - cleanup in finally
modified_image = None # filename
tmp_content_dir = None # directory
install_image = None # Nova image object
install_media_volume=None # cinder volume object
install_media_snapshot_id=None # UUID string
installed_instance = None # Nova instance object
finished = False # silly marker
retcode = 0
try:
# Artifact of borrowing factory code - pass this as a dict
creds = { 'username': args.username, 'tenant': args.tenant, 'password': args.password, 'auth_url': args.auth_url }
# Generate "blank" syslinux bootable mini-image
# This is the only step that strictly requires root access due to the need
# for a loopback mount to install the bootloader
generate_blank_syslinux()
# Take a copy of it
if args.create_volume:
disk_format = 'raw'
modified_image = "./syslinux_modified_%s.raw" % os.getpid()
try:
subprocess_check_output(["qemu-img","convert","-O","raw","./syslinux.qcow2",modified_image])
except:
print "Exception while converting image to raw"
raise
else:
disk_format = 'qcow2'
modified_image = "./syslinux_modified_%s.qcow2" % os.getpid()
shutil.copy("./syslinux.qcow2",modified_image)
# Generate the content to put into the image
tmp_content_dir = mkdtemp()
print "Collecting boot content for auto-install image"
generate_boot_content(install_tree_url, tmp_content_dir, distro, args.create_volume)
# Copy in the kernel, initrd and conf files into the blank boot stub using libguestfs
print "Copying boot content into a bootable syslinux image"
copy_content_to_image(tmp_content_dir, modified_image)
# Upload the resulting image to glance
print "Uploading image to glance"
install_image = glance_upload(image_filename = modified_image, image_url = None, creds = creds, glance_url = args.glance_url,
name = "INSTALL for: %s" % (image_name), disk_format=disk_format)
print "Uploaded successfully as glance image (%s)" % (install_image.id)
install_volume=None
# TODO: Make volume size configurable
if args.create_volume:
print "Converting Glance install image to a Cinder volume"
install_volume = volume_from_image(install_image.id, creds, args.glance_url, volume_size = args.install_volume_size)
if args.install_media_url or args.install_media_file:
if args.install_media_url:
print "Generating Glance image from URL: %s" % (args.install_media_url)
install_media_image = glance_upload(image_filename = None, image_url = args.install_media_url,
creds = creds, glance_url = args.glance_url, name = "FromURL: %s" % (args.install_media_url),
disk_format='raw')
else:
print "Generating Glance image from file: %s" % (args.install_media_file)
install_media_image = glance_upload(image_filename = args.install_media_file, image_url = None,
creds = creds, glance_url = args.glance_url, name = os.path.basename(args.install_media_file),
disk_format='raw')
print "Generating volume from image (%s)" % (install_media_image.id)
install_media_volume = volume_from_image(install_media_image.id, creds, args.glance_url)
print "Generating snapshot of volume (%s) to allow install media reuse" % (install_media_volume.id)
install_media_snapshot = snapshot_from_volume(install_media_volume.id, creds)
install_media_snapshot_id = install_media_snapshot.id
print "#### Future installs can reference this snapshot with the following argument:"
print " --install-media-snapshot %s" % install_media_snapshot_id
elif args.install_media_snapshot:
print "Generating working volume from snapshot (%s)" % (args.install_media_snapshot)
install_media_snapshot_id = args.install_media_snapshot
install_media_volume = volume_from_snapshot(args.install_media_snapshot, creds)
# Launch the image with the provided ks.cfg as the user data
# Optionally - spawn a vncviewer to watch the install graphically
# Poll on image status until it is SHUTDOWN or timeout
print "Launching install image"
installed_instance = launch_and_wait(install_image, install_volume, install_media_volume, working_kickstart,
os.path.basename(args.ks_file), creds, console_password, console_command)
# Take a snapshot of the now safely shutdown image
# For volume snapshots we must terminate the instance first then snapshot
# For glance/image snapshots we must _not_ terminate the instance until the snapshot is complete
print "Taking snapshot of completed install"
if args.create_volume:
print "Terminating instance (%s) in preparation for taking a snapshot of the root volume" % (installed_instance.id)
terminate_instance(installed_instance.id, creds)
installed_instance = None
finished_image_snapshot = snapshot_from_volume(install_volume.id, creds)
print "Volume-based image available from snapshot ID: %s" % (finished_image_snapshot.id)
print "Finished snapshot name is: %s" % (finished_image_snapshot.display_name)
finished = True
else:
finished_image_id = installed_instance.create_image(image_name)
print "Waiting for glance image snapshot to complete"
wait_for_glance_snapshot(finished_image_id, creds, args.glance_url)
print "Terminating instance (%s) now that snapshot is complete" % (installed_instance.id)
terminate_instance(installed_instance.id, creds)
installed_instance = None
print "Finished image snapshot ID is: %s" % (finished_image_id)
print "Finished image name is: %s" % (image_name)
finished = True
except Exception as e:
print "Uncaught exception encountered during install"
print str(e)
retcode = 1
finally:
if args.leave_mess:
print "Leaving a mess - this includes local files, local dirs, remote images, remote volumes and remote snapshots"
sys.exit(retcode)
print "Cleaning up"
try:
if tmp_content_dir:
print "Removing boot content dir"
shutil.rmtree(tmp_content_dir)
if modified_image:
print "Removing install image %s" % (modified_image)
#TODO:Note that thie is actually cacheable on a per-os-version basis
os.remove(modified_image)
if installed_instance:
# Note that under normal operation this is terminated when completing the snapshot process
print "Terminating install instance (%s)" % (installed_instance.id)
terminate_instance(installed_instance.id, creds)
if install_image:
print "Deleting Glance image (%s) used to launch install" % (install_image.id)
install_image.delete()
if install_media_volume:
print "Removing working volume containing install media"
print "Snapshot (%s) remains available for future use" % (install_media_snapshot_id)
install_media_volume.delete()
except:
print "WARNING: Exception while attempting to clean up - we may have left a mess"
retcode = 1
# For usability - reprint the most important bits from above as the last output
if finished:
print "FINISHED!"
print
print "Image Details:"
if args.create_volume:
print "Volume snapshot name: %s" % (finished_image_snapshot.display_name)
print "ID: %s" % (finished_image_snapshot.id)
else:
print "Glance image name: %s" % (image_name)
print "ID: %s" % (finished_image_id)
sys.exit(retcode)
if __name__ == '__main__':
create_image(get_cli_arguments())

View File

@ -17,22 +17,20 @@
# limitations under the License.
import os
import os.path
import sys
import guestfs
import pycurl
import shutil
import subprocess
import argparse
import re
from string import Template
from tempfile import mkdtemp, NamedTemporaryFile, TemporaryFile
from tempfile import NamedTemporaryFile, TemporaryFile
from time import sleep
import guestfs
import pycurl
from glanceclient import client as glance_client
from cinderclient import client as cinder_client
from keystoneclient.v2_0 import client as keystone_client
from novaclient.v1_1 import client as nova_client
from time import sleep, gmtime, strftime
from ping import do_one
### Utility functions borrowed from Oz and lightly modified

View File

View File

@ -0,0 +1,226 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Red Hat, Inc.
# All Rights Reserved.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
gettext for openstack-common modules.
Usual usage in an openstack.common module:
from imagebuilder.openstack.common.gettextutils import _
"""
import copy
import gettext
import logging.handlers
import os
import UserString
_localedir = os.environ.get('imagebuilder'.upper() + '_LOCALEDIR')
_t = gettext.translation('imagebuilder', localedir=_localedir, fallback=True)
def _(msg):
return _t.ugettext(msg)
def install(domain):
"""Install a _() function using the given translation domain.
Given a translation domain, install a _() function using gettext's
install() function.
The main difference from gettext.install() is that we allow
overriding the default localedir (e.g. /usr/share/locale) using
a translation-domain-specific environment variable (e.g.
NOVA_LOCALEDIR).
"""
gettext.install(domain,
localedir=os.environ.get(domain.upper() + '_LOCALEDIR'),
unicode=True)
"""
Lazy gettext functionality.
The following is an attempt to introduce a deferred way
to do translations on messages in OpenStack. We attempt to
override the standard _() function and % (format string) operation
to build Message objects that can later be translated when we have
more information. Also included is an example LogHandler that
translates Messages to an associated locale, effectively allowing
many logs, each with their own locale.
"""
def get_lazy_gettext(domain):
"""Assemble and return a lazy gettext function for a given domain.
Factory method for a project/module to get a lazy gettext function
for its own translation domain (i.e. nova, glance, cinder, etc.)
"""
def _lazy_gettext(msg):
"""
Create and return a Message object encapsulating a string
so that we can translate it later when needed.
"""
return Message(msg, domain)
return _lazy_gettext
class Message(UserString.UserString, object):
"""Class used to encapsulate translatable messages."""
def __init__(self, msg, domain):
# _msg is the gettext msgid and should never change
self._msg = msg
self._left_extra_msg = ''
self._right_extra_msg = ''
self.params = None
self.locale = None
self.domain = domain
@property
def data(self):
# NOTE(mrodden): this should always resolve to a unicode string
# that best represents the state of the message currently
localedir = os.environ.get(self.domain.upper() + '_LOCALEDIR')
if self.locale:
lang = gettext.translation(self.domain,
localedir=localedir,
languages=[self.locale],
fallback=True)
else:
# use system locale for translations
lang = gettext.translation(self.domain,
localedir=localedir,
fallback=True)
full_msg = (self._left_extra_msg +
lang.ugettext(self._msg) +
self._right_extra_msg)
if self.params is not None:
full_msg = full_msg % self.params
return unicode(full_msg)
def _save_parameters(self, other):
# we check for None later to see if
# we actually have parameters to inject,
# so encapsulate if our parameter is actually None
if other is None:
self.params = (other, )
else:
self.params = copy.deepcopy(other)
return self
# overrides to be more string-like
def __unicode__(self):
return self.data
def __str__(self):
return self.data.encode('utf-8')
def __getstate__(self):
to_copy = ['_msg', '_right_extra_msg', '_left_extra_msg',
'domain', 'params', 'locale']
new_dict = self.__dict__.fromkeys(to_copy)
for attr in to_copy:
new_dict[attr] = copy.deepcopy(self.__dict__[attr])
return new_dict
def __setstate__(self, state):
for (k, v) in state.items():
setattr(self, k, v)
# operator overloads
def __add__(self, other):
copied = copy.deepcopy(self)
copied._right_extra_msg += other.__str__()
return copied
def __radd__(self, other):
copied = copy.deepcopy(self)
copied._left_extra_msg += other.__str__()
return copied
def __mod__(self, other):
# do a format string to catch and raise
# any possible KeyErrors from missing parameters
self.data % other
copied = copy.deepcopy(self)
return copied._save_parameters(other)
def __mul__(self, other):
return self.data * other
def __rmul__(self, other):
return other * self.data
def __getitem__(self, key):
return self.data[key]
def __getslice__(self, start, end):
return self.data.__getslice__(start, end)
def __getattribute__(self, name):
# NOTE(mrodden): handle lossy operations that we can't deal with yet
# These override the UserString implementation, since UserString
# uses our __class__ attribute to try and build a new message
# after running the inner data string through the operation.
# At that point, we have lost the gettext message id and can just
# safely resolve to a string instead.
ops = ['capitalize', 'center', 'decode', 'encode',
'expandtabs', 'ljust', 'lstrip', 'replace', 'rjust', 'rstrip',
'strip', 'swapcase', 'title', 'translate', 'upper', 'zfill']
if name in ops:
return getattr(self.data, name)
else:
return UserString.UserString.__getattribute__(self, name)
class LocaleHandler(logging.Handler):
"""Handler that can have a locale associated to translate Messages.
A quick example of how to utilize the Message class above.
LocaleHandler takes a locale and a target logging.Handler object
to forward LogRecord objects to after translating the internal Message.
"""
def __init__(self, locale, target):
"""
Initialize a LocaleHandler
:param locale: locale to use for translating messages
:param target: logging.Handler object to forward
LogRecord objects to after translation
"""
logging.Handler.__init__(self)
self.locale = locale
self.target = target
def emit(self, record):
if isinstance(record.msg, Message):
# set the locale and resolve to a string
record.msg.locale = self.locale
self.target.emit(record)

View File

@ -0,0 +1,67 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Import related utilities and helper functions.
"""
import sys
import traceback
def import_class(import_str):
"""Returns a class from a string including module and class."""
mod_str, _sep, class_str = import_str.rpartition('.')
try:
__import__(mod_str)
return getattr(sys.modules[mod_str], class_str)
except (ValueError, AttributeError):
raise ImportError('Class %s cannot be found (%s)' %
(class_str,
traceback.format_exception(*sys.exc_info())))
def import_object(import_str, *args, **kwargs):
"""Import a class and return an instance of it."""
return import_class(import_str)(*args, **kwargs)
def import_object_ns(name_space, import_str, *args, **kwargs):
"""
Import a class and return an instance of it, first by trying
to find the class in a default namespace, then failing back to
a full path if not found in the default namespace.
"""
import_value = "%s.%s" % (name_space, import_str)
try:
return import_class(import_value)(*args, **kwargs)
except ImportError:
return import_class(import_str)(*args, **kwargs)
def import_module(import_str):
"""Import a module."""
__import__(import_str)
return sys.modules[import_str]
def try_import(import_str, default=None):
"""Try to import a module and if it fails return default."""
try:
return import_module(import_str)
except ImportError:
return default

View File

@ -0,0 +1,169 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
JSON related utilities.
This module provides a few things:
1) A handy function for getting an object down to something that can be
JSON serialized. See to_primitive().
2) Wrappers around loads() and dumps(). The dumps() wrapper will
automatically use to_primitive() for you if needed.
3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson
is available.
'''
import datetime
import functools
import inspect
import itertools
import json
import types
import xmlrpclib
import six
from imagebuilder.openstack.common import timeutils
_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod,
inspect.isfunction, inspect.isgeneratorfunction,
inspect.isgenerator, inspect.istraceback, inspect.isframe,
inspect.iscode, inspect.isbuiltin, inspect.isroutine,
inspect.isabstract]
_simple_types = (types.NoneType, int, basestring, bool, float, long)
def to_primitive(value, convert_instances=False, convert_datetime=True,
level=0, max_depth=3):
"""Convert a complex object into primitives.
Handy for JSON serialization. We can optionally handle instances,
but since this is a recursive function, we could have cyclical
data structures.
To handle cyclical data structures we could track the actual objects
visited in a set, but not all objects are hashable. Instead we just
track the depth of the object inspections and don't go too deep.
Therefore, convert_instances=True is lossy ... be aware.
"""
# handle obvious types first - order of basic types determined by running
# full tests on nova project, resulting in the following counts:
# 572754 <type 'NoneType'>
# 460353 <type 'int'>
# 379632 <type 'unicode'>
# 274610 <type 'str'>
# 199918 <type 'dict'>
# 114200 <type 'datetime.datetime'>
# 51817 <type 'bool'>
# 26164 <type 'list'>
# 6491 <type 'float'>
# 283 <type 'tuple'>
# 19 <type 'long'>
if isinstance(value, _simple_types):
return value
if isinstance(value, datetime.datetime):
if convert_datetime:
return timeutils.strtime(value)
else:
return value
# value of itertools.count doesn't get caught by nasty_type_tests
# and results in infinite loop when list(value) is called.
if type(value) == itertools.count:
return six.text_type(value)
# FIXME(vish): Workaround for LP bug 852095. Without this workaround,
# tests that raise an exception in a mocked method that
# has a @wrap_exception with a notifier will fail. If
# we up the dependency to 0.5.4 (when it is released) we
# can remove this workaround.
if getattr(value, '__module__', None) == 'mox':
return 'mock'
if level > max_depth:
return '?'
# The try block may not be necessary after the class check above,
# but just in case ...
try:
recursive = functools.partial(to_primitive,
convert_instances=convert_instances,
convert_datetime=convert_datetime,
level=level,
max_depth=max_depth)
if isinstance(value, dict):
return dict((k, recursive(v)) for k, v in value.iteritems())
elif isinstance(value, (list, tuple)):
return [recursive(lv) for lv in value]
# It's not clear why xmlrpclib created their own DateTime type, but
# for our purposes, make it a datetime type which is explicitly
# handled
if isinstance(value, xmlrpclib.DateTime):
value = datetime.datetime(*tuple(value.timetuple())[:6])
if convert_datetime and isinstance(value, datetime.datetime):
return timeutils.strtime(value)
elif hasattr(value, 'iteritems'):
return recursive(dict(value.iteritems()), level=level + 1)
elif hasattr(value, '__iter__'):
return recursive(list(value))
elif convert_instances and hasattr(value, '__dict__'):
# Likely an instance of something. Watch for cycles.
# Ignore class member vars.
return recursive(value.__dict__, level=level + 1)
else:
if any(test(value) for test in _nasty_type_tests):
return six.text_type(value)
return value
except TypeError:
# Class objects are tricky since they may define something like
# __iter__ defined but it isn't callable as list().
return six.text_type(value)
def dumps(value, default=to_primitive, **kwargs):
return json.dumps(value, default=default, **kwargs)
def loads(s):
return json.loads(s)
def load(s):
return json.load(s)
try:
import anyjson
except ImportError:
pass
else:
anyjson._modules.append((__name__, 'dumps', TypeError,
'loads', ValueError, 'load'))
anyjson.force_implementation(__name__)

View File

@ -0,0 +1,48 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Greenthread local storage of variables using weak references"""
import weakref
from eventlet import corolocal
class WeakLocal(corolocal.local):
def __getattribute__(self, attr):
rval = corolocal.local.__getattribute__(self, attr)
if rval:
# NOTE(mikal): this bit is confusing. What is stored is a weak
# reference, not the value itself. We therefore need to lookup
# the weak reference and return the inner value here.
rval = rval()
return rval
def __setattr__(self, attr, value):
value = weakref.ref(value)
return corolocal.local.__setattr__(self, attr, value)
# NOTE(mikal): the name "store" should be deprecated in the future
store = WeakLocal()
# A "weak" store uses weak references and allows an object to fall out of scope
# when it falls out of scope in the code that uses the thread local storage. A
# "strong" store will hold a reference to the object so that it never falls out
# of scope.
weak_store = WeakLocal()
strong_store = corolocal.local

View File

@ -0,0 +1,558 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Openstack logging handler.
This module adds to logging functionality by adding the option to specify
a context object when calling the various log methods. If the context object
is not specified, default formatting is used. Additionally, an instance uuid
may be passed as part of the log message, which is intended to make it easier
for admins to find messages related to a specific instance.
It also allows setting of formatting information through conf.
"""
import ConfigParser
import cStringIO
import inspect
import itertools
import logging
import logging.config
import logging.handlers
import os
import sys
import traceback
from oslo.config import cfg
from imagebuilder.openstack.common.gettextutils import _
from imagebuilder.openstack.common import importutils
from imagebuilder.openstack.common import jsonutils
from imagebuilder.openstack.common import local
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
common_cli_opts = [
cfg.BoolOpt('debug',
short='d',
default=False,
help='Print debugging output (set logging level to '
'DEBUG instead of default WARNING level).'),
cfg.BoolOpt('verbose',
short='v',
default=False,
help='Print more verbose output (set logging level to '
'INFO instead of default WARNING level).'),
]
logging_cli_opts = [
cfg.StrOpt('log-config',
metavar='PATH',
help='If this option is specified, the logging configuration '
'file specified is used and overrides any other logging '
'options specified. Please see the Python logging module '
'documentation for details on logging configuration '
'files.'),
cfg.StrOpt('log-format',
default=None,
metavar='FORMAT',
help='A logging.Formatter log message format string which may '
'use any of the available logging.LogRecord attributes. '
'This option is deprecated. Please use '
'logging_context_format_string and '
'logging_default_format_string instead.'),
cfg.StrOpt('log-date-format',
default=_DEFAULT_LOG_DATE_FORMAT,
metavar='DATE_FORMAT',
help='Format string for %%(asctime)s in log records. '
'Default: %(default)s'),
cfg.StrOpt('log-file',
metavar='PATH',
deprecated_name='logfile',
help='(Optional) Name of log file to output to. '
'If no default is set, logging will go to stdout.'),
cfg.StrOpt('log-dir',
deprecated_name='logdir',
help='(Optional) The base directory used for relative '
'--log-file paths'),
cfg.BoolOpt('use-syslog',
default=False,
help='Use syslog for logging.'),
cfg.StrOpt('syslog-log-facility',
default='LOG_USER',
help='syslog facility to receive log lines')
]
generic_log_opts = [
cfg.BoolOpt('use_stderr',
default=True,
help='Log output to standard error')
]
log_opts = [
cfg.StrOpt('logging_context_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [%(request_id)s %(user)s %(tenant)s] '
'%(instance)s%(message)s',
help='format string to use for log messages with context'),
cfg.StrOpt('logging_default_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [-] %(instance)s%(message)s',
help='format string to use for log messages without context'),
cfg.StrOpt('logging_debug_format_suffix',
default='%(funcName)s %(pathname)s:%(lineno)d',
help='data to append to log format when level is DEBUG'),
cfg.StrOpt('logging_exception_prefix',
default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
'%(instance)s',
help='prefix each line of exception output with this format'),
cfg.ListOpt('default_log_levels',
default=[
'amqplib=WARN',
'sqlalchemy=WARN',
'boto=WARN',
'suds=INFO',
'keystone=INFO',
'eventlet.wsgi.server=WARN'
],
help='list of logger=LEVEL pairs'),
cfg.BoolOpt('publish_errors',
default=False,
help='publish error events'),
cfg.BoolOpt('fatal_deprecations',
default=False,
help='make deprecations fatal'),
# NOTE(mikal): there are two options here because sometimes we are handed
# a full instance (and could include more information), and other times we
# are just handed a UUID for the instance.
cfg.StrOpt('instance_format',
default='[instance: %(uuid)s] ',
help='If an instance is passed with the log message, format '
'it like this'),
cfg.StrOpt('instance_uuid_format',
default='[instance: %(uuid)s] ',
help='If an instance UUID is passed with the log message, '
'format it like this'),
]
CONF = cfg.CONF
CONF.register_cli_opts(common_cli_opts)
CONF.register_cli_opts(logging_cli_opts)
CONF.register_opts(generic_log_opts)
CONF.register_opts(log_opts)
# our new audit level
# NOTE(jkoelker) Since we synthesized an audit level, make the logging
# module aware of it so it acts like other levels.
logging.AUDIT = logging.INFO + 1
logging.addLevelName(logging.AUDIT, 'AUDIT')
try:
NullHandler = logging.NullHandler
except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7
class NullHandler(logging.Handler):
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
def _dictify_context(context):
if context is None:
return None
if not isinstance(context, dict) and getattr(context, 'to_dict', None):
context = context.to_dict()
return context
def _get_binary_name():
return os.path.basename(inspect.stack()[-1][1])
def _get_log_file_path(binary=None):
logfile = CONF.log_file
logdir = CONF.log_dir
if logfile and not logdir:
return logfile
if logfile and logdir:
return os.path.join(logdir, logfile)
if logdir:
binary = binary or _get_binary_name()
return '%s.log' % (os.path.join(logdir, binary),)
class BaseLoggerAdapter(logging.LoggerAdapter):
def audit(self, msg, *args, **kwargs):
self.log(logging.AUDIT, msg, *args, **kwargs)
class LazyAdapter(BaseLoggerAdapter):
def __init__(self, name='unknown', version='unknown'):
self._logger = None
self.extra = {}
self.name = name
self.version = version
@property
def logger(self):
if not self._logger:
self._logger = getLogger(self.name, self.version)
return self._logger
class ContextAdapter(BaseLoggerAdapter):
warn = logging.LoggerAdapter.warning
def __init__(self, logger, project_name, version_string):
self.logger = logger
self.project = project_name
self.version = version_string
@property
def handlers(self):
return self.logger.handlers
def deprecated(self, msg, *args, **kwargs):
stdmsg = _("Deprecated: %s") % msg
if CONF.fatal_deprecations:
self.critical(stdmsg, *args, **kwargs)
raise DeprecatedConfig(msg=stdmsg)
else:
self.warn(stdmsg, *args, **kwargs)
def process(self, msg, kwargs):
if 'extra' not in kwargs:
kwargs['extra'] = {}
extra = kwargs['extra']
context = kwargs.pop('context', None)
if not context:
context = getattr(local.store, 'context', None)
if context:
extra.update(_dictify_context(context))
instance = kwargs.pop('instance', None)
instance_extra = ''
if instance:
instance_extra = CONF.instance_format % instance
else:
instance_uuid = kwargs.pop('instance_uuid', None)
if instance_uuid:
instance_extra = (CONF.instance_uuid_format
% {'uuid': instance_uuid})
extra.update({'instance': instance_extra})
extra.update({"project": self.project})
extra.update({"version": self.version})
extra['extra'] = extra.copy()
return msg, kwargs
class JSONFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None):
# NOTE(jkoelker) we ignore the fmt argument, but its still there
# since logging.config.fileConfig passes it.
self.datefmt = datefmt
def formatException(self, ei, strip_newlines=True):
lines = traceback.format_exception(*ei)
if strip_newlines:
lines = [itertools.ifilter(
lambda x: x,
line.rstrip().splitlines()) for line in lines]
lines = list(itertools.chain(*lines))
return lines
def format(self, record):
message = {'message': record.getMessage(),
'asctime': self.formatTime(record, self.datefmt),
'name': record.name,
'msg': record.msg,
'args': record.args,
'levelname': record.levelname,
'levelno': record.levelno,
'pathname': record.pathname,
'filename': record.filename,
'module': record.module,
'lineno': record.lineno,
'funcname': record.funcName,
'created': record.created,
'msecs': record.msecs,
'relative_created': record.relativeCreated,
'thread': record.thread,
'thread_name': record.threadName,
'process_name': record.processName,
'process': record.process,
'traceback': None}
if hasattr(record, 'extra'):
message['extra'] = record.extra
if record.exc_info:
message['traceback'] = self.formatException(record.exc_info)
return jsonutils.dumps(message)
def _create_logging_excepthook(product_name):
def logging_excepthook(type, value, tb):
extra = {}
if CONF.verbose:
extra['exc_info'] = (type, value, tb)
getLogger(product_name).critical(str(value), **extra)
return logging_excepthook
class LogConfigError(Exception):
message = _('Error loading logging config %(log_config)s: %(err_msg)s')
def __init__(self, log_config, err_msg):
self.log_config = log_config
self.err_msg = err_msg
def __str__(self):
return self.message % dict(log_config=self.log_config,
err_msg=self.err_msg)
def _load_log_config(log_config):
try:
logging.config.fileConfig(log_config)
except ConfigParser.Error as exc:
raise LogConfigError(log_config, str(exc))
def setup(product_name):
"""Setup logging."""
if CONF.log_config:
_load_log_config(CONF.log_config)
else:
_setup_logging_from_conf()
sys.excepthook = _create_logging_excepthook(product_name)
def set_defaults(logging_context_format_string):
cfg.set_defaults(log_opts,
logging_context_format_string=
logging_context_format_string)
def _find_facility_from_conf():
facility_names = logging.handlers.SysLogHandler.facility_names
facility = getattr(logging.handlers.SysLogHandler,
CONF.syslog_log_facility,
None)
if facility is None and CONF.syslog_log_facility in facility_names:
facility = facility_names.get(CONF.syslog_log_facility)
if facility is None:
valid_facilities = facility_names.keys()
consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON',
'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS',
'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP',
'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3',
'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7']
valid_facilities.extend(consts)
raise TypeError(_('syslog facility must be one of: %s') %
', '.join("'%s'" % fac
for fac in valid_facilities))
return facility
def _setup_logging_from_conf():
log_root = getLogger(None).logger
for handler in log_root.handlers:
log_root.removeHandler(handler)
if CONF.use_syslog:
facility = _find_facility_from_conf()
syslog = logging.handlers.SysLogHandler(address='/dev/log',
facility=facility)
log_root.addHandler(syslog)
logpath = _get_log_file_path()
if logpath:
filelog = logging.handlers.WatchedFileHandler(logpath)
log_root.addHandler(filelog)
if CONF.use_stderr:
streamlog = ColorHandler()
log_root.addHandler(streamlog)
elif not CONF.log_file:
# pass sys.stdout as a positional argument
# python2.6 calls the argument strm, in 2.7 it's stream
streamlog = logging.StreamHandler(sys.stdout)
log_root.addHandler(streamlog)
if CONF.publish_errors:
handler = importutils.import_object(
"imagebuilder.openstack.common.log_handler.PublishErrorsHandler",
logging.ERROR)
log_root.addHandler(handler)
datefmt = CONF.log_date_format
for handler in log_root.handlers:
# NOTE(alaski): CONF.log_format overrides everything currently. This
# should be deprecated in favor of context aware formatting.
if CONF.log_format:
handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
datefmt=datefmt))
log_root.info('Deprecated: log_format is now deprecated and will '
'be removed in the next release')
else:
handler.setFormatter(ContextFormatter(datefmt=datefmt))
if CONF.debug:
log_root.setLevel(logging.DEBUG)
elif CONF.verbose:
log_root.setLevel(logging.INFO)
else:
log_root.setLevel(logging.WARNING)
for pair in CONF.default_log_levels:
mod, _sep, level_name = pair.partition('=')
level = logging.getLevelName(level_name)
logger = logging.getLogger(mod)
logger.setLevel(level)
_loggers = {}
def getLogger(name='unknown', version='unknown'):
if name not in _loggers:
_loggers[name] = ContextAdapter(logging.getLogger(name),
name,
version)
return _loggers[name]
def getLazyLogger(name='unknown', version='unknown'):
"""
create a pass-through logger that does not create the real logger
until it is really needed and delegates all calls to the real logger
once it is created
"""
return LazyAdapter(name, version)
class WritableLogger(object):
"""A thin wrapper that responds to `write` and logs."""
def __init__(self, logger, level=logging.INFO):
self.logger = logger
self.level = level
def write(self, msg):
self.logger.log(self.level, msg)
class ContextFormatter(logging.Formatter):
"""A context.RequestContext aware formatter configured through flags.
The flags used to set format strings are: logging_context_format_string
and logging_default_format_string. You can also specify
logging_debug_format_suffix to append extra formatting if the log level is
debug.
For information about what variables are available for the formatter see:
http://docs.python.org/library/logging.html#formatter
"""
def format(self, record):
"""Uses contextstring if request_id is set, otherwise default."""
# NOTE(sdague): default the fancier formating params
# to an empty string so we don't throw an exception if
# they get used
for key in ('instance', 'color'):
if key not in record.__dict__:
record.__dict__[key] = ''
if record.__dict__.get('request_id', None):
self._fmt = CONF.logging_context_format_string
else:
self._fmt = CONF.logging_default_format_string
if (record.levelno == logging.DEBUG and
CONF.logging_debug_format_suffix):
self._fmt += " " + CONF.logging_debug_format_suffix
# Cache this on the record, Logger will respect our formated copy
if record.exc_info:
record.exc_text = self.formatException(record.exc_info, record)
return logging.Formatter.format(self, record)
def formatException(self, exc_info, record=None):
"""Format exception output with CONF.logging_exception_prefix."""
if not record:
return logging.Formatter.formatException(self, exc_info)
stringbuffer = cStringIO.StringIO()
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
None, stringbuffer)
lines = stringbuffer.getvalue().split('\n')
stringbuffer.close()
if CONF.logging_exception_prefix.find('%(asctime)') != -1:
record.asctime = self.formatTime(record, self.datefmt)
formatted_lines = []
for line in lines:
pl = CONF.logging_exception_prefix % record.__dict__
fl = '%s%s' % (pl, line)
formatted_lines.append(fl)
return '\n'.join(formatted_lines)
class ColorHandler(logging.StreamHandler):
LEVEL_COLORS = {
logging.DEBUG: '\033[00;32m', # GREEN
logging.INFO: '\033[00;36m', # CYAN
logging.AUDIT: '\033[01;36m', # BOLD CYAN
logging.WARN: '\033[01;33m', # BOLD YELLOW
logging.ERROR: '\033[01;31m', # BOLD RED
logging.CRITICAL: '\033[01;31m', # BOLD RED
}
def format(self, record):
record.color = self.LEVEL_COLORS[record.levelno]
return logging.StreamHandler.format(self, record)
class DeprecatedConfig(Exception):
message = _("Fatal call to deprecated config: %(msg)s")
def __init__(self, msg):
super(Exception, self).__init__(self.message % dict(msg=msg))

View File

@ -0,0 +1,187 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Time related utilities and helper functions.
"""
import calendar
import datetime
import iso8601
# ISO 8601 extended time format with microseconds
_ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f'
_ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S'
PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND
def isotime(at=None, subsecond=False):
"""Stringify time in ISO 8601 format."""
if not at:
at = utcnow()
st = at.strftime(_ISO8601_TIME_FORMAT
if not subsecond
else _ISO8601_TIME_FORMAT_SUBSECOND)
tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC'
st += ('Z' if tz == 'UTC' else tz)
return st
def parse_isotime(timestr):
"""Parse time from ISO 8601 format."""
try:
return iso8601.parse_date(timestr)
except iso8601.ParseError as e:
raise ValueError(e.message)
except TypeError as e:
raise ValueError(e.message)
def strtime(at=None, fmt=PERFECT_TIME_FORMAT):
"""Returns formatted utcnow."""
if not at:
at = utcnow()
return at.strftime(fmt)
def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT):
"""Turn a formatted time back into a datetime."""
return datetime.datetime.strptime(timestr, fmt)
def normalize_time(timestamp):
"""Normalize time in arbitrary timezone to UTC naive object."""
offset = timestamp.utcoffset()
if offset is None:
return timestamp
return timestamp.replace(tzinfo=None) - offset
def is_older_than(before, seconds):
"""Return True if before is older than seconds."""
if isinstance(before, basestring):
before = parse_strtime(before).replace(tzinfo=None)
return utcnow() - before > datetime.timedelta(seconds=seconds)
def is_newer_than(after, seconds):
"""Return True if after is newer than seconds."""
if isinstance(after, basestring):
after = parse_strtime(after).replace(tzinfo=None)
return after - utcnow() > datetime.timedelta(seconds=seconds)
def utcnow_ts():
"""Timestamp version of our utcnow function."""
return calendar.timegm(utcnow().timetuple())
def utcnow():
"""Overridable version of utils.utcnow."""
if utcnow.override_time:
try:
return utcnow.override_time.pop(0)
except AttributeError:
return utcnow.override_time
return datetime.datetime.utcnow()
def iso8601_from_timestamp(timestamp):
"""Returns a iso8601 formated date from timestamp."""
return isotime(datetime.datetime.utcfromtimestamp(timestamp))
utcnow.override_time = None
def set_time_override(override_time=datetime.datetime.utcnow()):
"""
Override utils.utcnow to return a constant time or a list thereof,
one at a time.
"""
utcnow.override_time = override_time
def advance_time_delta(timedelta):
"""Advance overridden time using a datetime.timedelta."""
assert(not utcnow.override_time is None)
try:
for dt in utcnow.override_time:
dt += timedelta
except TypeError:
utcnow.override_time += timedelta
def advance_time_seconds(seconds):
"""Advance overridden time by seconds."""
advance_time_delta(datetime.timedelta(0, seconds))
def clear_time_override():
"""Remove the overridden time."""
utcnow.override_time = None
def marshall_now(now=None):
"""Make an rpc-safe datetime with microseconds.
Note: tzinfo is stripped, but not required for relative times.
"""
if not now:
now = utcnow()
return dict(day=now.day, month=now.month, year=now.year, hour=now.hour,
minute=now.minute, second=now.second,
microsecond=now.microsecond)
def unmarshall_time(tyme):
"""Unmarshall a datetime dict."""
return datetime.datetime(day=tyme['day'],
month=tyme['month'],
year=tyme['year'],
hour=tyme['hour'],
minute=tyme['minute'],
second=tyme['second'],
microsecond=tyme['microsecond'])
def delta_seconds(before, after):
"""
Compute the difference in seconds between two date, time, or
datetime objects (as a float, to microsecond resolution).
"""
delta = after - before
try:
return delta.total_seconds()
except AttributeError:
return ((delta.days * 24 * 3600) + delta.seconds +
float(delta.microseconds) / (10 ** 6))
def is_soon(dt, window):
"""
Determines if time is going to happen in the next window seconds.
:params dt: the time
:params window: minimum seconds to remain to consider the time not soon
:return: True if expiration is within the given duration
"""
soon = (utcnow() + datetime.timedelta(seconds=window))
return normalize_time(dt) <= soon

View File

@ -0,0 +1,43 @@
body {
background: #311F00;
color: white;
font-family: 'Helvetica Neue', 'Helvetica', 'Verdana', sans-serif;
padding: 1em 2em;
}
a {
color: #FAFF78;
text-decoration: none;
}
a:hover {
text-decoration: underline;
}
div#content {
width: 800px;
margin: 0 auto;
}
form {
margin: 0;
padding: 0;
border: 0;
}
fieldset {
border: 0;
}
input.error {
background: #FAFF78;
}
header {
text-align: center;
}
h1, h2, h3, h4, h5, h6 {
font-family: 'Futura-CondensedExtraBold', 'Futura', 'Helvetica', sans-serif;
text-transform: uppercase;
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 34 KiB

44
imagebuilder/service.py Normal file
View File

@ -0,0 +1,44 @@
#
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from oslo.config import cfg
from imagebuilder.openstack.common import log
from imagebuilder.openstack.common import gettextutils
cfg.CONF.register_opts([
cfg.StrOpt('host',
default='0.0.0.0',
help='host address for imagebuilder REST API'),
cfg.IntOpt('port',
default=8080,
help='port to listen to for imagebuilder REST API'),
cfg.StrOpt('persistence_backend',
default='SQLAlchemy',
help='data manager to use: SQLAlchemy, Mongo')
])
def prepare_service(argv=None):
gettextutils.install('imagebuilder')
cfg.set_defaults(log.log_opts,
default_log_levels=['sqlalchemy=WARN',
'eventlet.wsgi.server=WARN'
])
if argv is None:
argv = sys.argv
cfg.CONF(argv[1:], project='imagebuilder')
log.setup('imagebuilder')

4
openstack-common.conf Normal file
View File

@ -0,0 +1,4 @@
[DEFAULT]
module=log
base=imagebuilder