Add a simple and basic webinterface to the API service

Change-Id: I26c8f673f8f2697c63e6210a5a9f83b69259ea21
This commit is contained in:
Christian Berendt 2015-03-29 18:52:48 +02:00
parent da37ae9835
commit cfdb998eba
12 changed files with 321 additions and 209 deletions

View File

@ -2,3 +2,7 @@
database_url = mysql://faafo:secretsecret@127.0.0.1:3306/faafo
verbose = True
[glance_store]
default_store = file
filesystem_store_datadir = /home/vagrant

Binary file not shown.

After

Width:  |  Height:  |  Size: 202 KiB

View File

@ -35,3 +35,8 @@ API Service
2015-03-25 23:01:29.342 22511 INFO werkzeug [-] 127.0.0.1 - - [25/Mar/2015 23:01:29] "POST /api/fractal HTTP/1.1" 201 -
2015-03-25 23:01:30.317 22511 INFO werkzeug [-] 127.0.0.1 - - [25/Mar/2015 23:01:30] "PUT /api/fractal/212e8c23-e67f-4bd3-86e1-5a5e811ee2f4 HTTP/1.1" 200 -
Example webinterface view
-------------------------
.. image:: images/screenshot_webinterface.png

View File

@ -126,42 +126,54 @@
# From glance.store
#
# The host where the S3 server is listening. (string value)
#s3_store_host = <None>
# Directory to which the Filesystem backend store writes images.
# (string value)
# Deprecated group/name - [DEFAULT]/filesystem_store_datadir
#filesystem_store_datadir = <None>
# The S3 query token access key. (string value)
#s3_store_access_key = <None>
# List of directories and its priorities to which the Filesystem
# backend store writes images. (multi valued)
# Deprecated group/name - [DEFAULT]/filesystem_store_datadirs
#filesystem_store_datadirs =
# The S3 query token secret key. (string value)
#s3_store_secret_key = <None>
# The path to a file which contains the metadata to be returned with
# any location associated with this store. The file must contain a
# valid JSON object. The object should contain the keys 'id' and
# 'mountpoint'. The value for both keys should be 'string'. (string
# value)
# Deprecated group/name - [DEFAULT]/filesystem_store_metadata_file
#filesystem_store_metadata_file = <None>
# The S3 bucket to be used to store the Glance data. (string value)
#s3_store_bucket = <None>
# The required permission for created image file. In this way the user
# other service used, e.g. Nova, who consumes the image could be the
# exclusive member of the group that owns the files created. Assigning
# it less then or equal to zero means don't change the default
# permission of the file. This value will be decoded as an octal
# digit. (integer value)
# Deprecated group/name - [DEFAULT]/filesystem_store_file_perm
#filesystem_store_file_perm = 0
# The local directory where uploads will be staged before they are
# transferred into S3. (string value)
#s3_store_object_buffer_dir = <None>
# Info to match when looking for cinder in the service catalog. Format
# is : separated values of the form:
# <service_type>:<service_name>:<endpoint_type> (string value)
#cinder_catalog_info = volume:cinder:publicURL
# A boolean to determine if the S3 bucket should be created on upload
# if it does not exist or if an error should be returned to the user.
# (boolean value)
#s3_store_create_bucket_on_put = false
# Override service catalog lookup with template for cinder endpoint
# e.g. http://localhost:8776/v1/%(project_id)s (string value)
#cinder_endpoint_template = <None>
# The S3 calling format used to determine the bucket. Either subdomain
# or path can be used. (string value)
#s3_store_bucket_url_format = subdomain
# Region name of this node (string value)
#os_region_name = <None>
# What size, in MB, should S3 start chunking image files and do a
# multipart upload in S3. (integer value)
#s3_store_large_object_size = 100
# Location of ca certicates file to use for cinder client requests.
# (string value)
#cinder_ca_certificates_file = <None>
# What multipart upload part size, in MB, should S3 use when uploading
# parts. The size must be greater than or equal to 5M. (integer value)
#s3_store_large_object_chunk_size = 10
# Number of cinderclient retries on failed http calls (integer value)
#cinder_http_retries = 3
# The number of thread pools to perform a multipart upload in S3.
# (integer value)
#s3_store_thread_pools = 10
# Allow to perform insecure SSL requests to cinder (boolean value)
#cinder_api_insecure = false
# Version of the authentication service to use. Valid versions are 2
# for keystone and 1 for swauth and rackspace. (deprecated) (string
@ -261,56 +273,6 @@
# The config file that has the swift account(s)configs. (string value)
#swift_store_config_file = <None>
# RADOS images will be chunked into objects of this size (in
# megabytes). For best performance, this should be a power of two.
# (integer value)
#rbd_store_chunk_size = 8
# RADOS pool in which images are stored. (string value)
#rbd_store_pool = images
# RADOS user to authenticate as (only applicable if using Cephx. If
# <None>, a default will be chosen based on the client. section in
# rbd_store_ceph_conf) (string value)
#rbd_store_user = <None>
# Ceph configuration file path. If <None>, librados will locate the
# default config. If using cephx authentication, this file should
# include a reference to the right keyring in a client.<USER> section
# (string value)
#rbd_store_ceph_conf = /etc/ceph/ceph.conf
# Info to match when looking for cinder in the service catalog. Format
# is : separated values of the form:
# <service_type>:<service_name>:<endpoint_type> (string value)
#cinder_catalog_info = volume:cinder:publicURL
# Override service catalog lookup with template for cinder endpoint
# e.g. http://localhost:8776/v1/%(project_id)s (string value)
#cinder_endpoint_template = <None>
# Region name of this node (string value)
#os_region_name = <None>
# Location of ca certicates file to use for cinder client requests.
# (string value)
#cinder_ca_certificates_file = <None>
# Number of cinderclient retries on failed http calls (integer value)
#cinder_http_retries = 3
# Allow to perform insecure SSL requests to cinder (boolean value)
#cinder_api_insecure = false
# Hostname or IP address of the instance to connect to, or a mongodb
# URI, or a list of hostnames / mongodb URIs. If host is an IPv6
# literal it must be enclosed in '[' and ']' characters following the
# RFC2732 URL syntax (e.g. '[::1]' for localhost) (string value)
#mongodb_store_uri = <None>
# Database to use (string value)
#mongodb_store_db = <None>
# ESX/ESXi or vCenter Server target system. The server value can be an
# IP address or a DNS name. (string value)
#vmware_server_host = <None>
@ -366,28 +328,33 @@
# valued)
#vmware_datastores =
# Directory to which the Filesystem backend store writes images.
# RADOS images will be chunked into objects of this size (in
# megabytes). For best performance, this should be a power of two.
# (integer value)
#rbd_store_chunk_size = 8
# RADOS pool in which images are stored. (string value)
#rbd_store_pool = images
# RADOS user to authenticate as (only applicable if using Cephx. If
# <None>, a default will be chosen based on the client. section in
# rbd_store_ceph_conf) (string value)
#rbd_store_user = <None>
# Ceph configuration file path. If <None>, librados will locate the
# default config. If using cephx authentication, this file should
# include a reference to the right keyring in a client.<USER> section
# (string value)
#filesystem_store_datadir = <None>
#rbd_store_ceph_conf = /etc/ceph/ceph.conf
# List of directories and its priorities to which the Filesystem
# backend store writes images. (multi valued)
#filesystem_store_datadirs =
# Hostname or IP address of the instance to connect to, or a mongodb
# URI, or a list of hostnames / mongodb URIs. If host is an IPv6
# literal it must be enclosed in '[' and ']' characters following the
# RFC2732 URL syntax (e.g. '[::1]' for localhost) (string value)
#mongodb_store_uri = <None>
# The path to a file which contains the metadata to be returned with
# any location associated with this store. The file must contain a
# valid JSON object. The object should contain the keys 'id' and
# 'mountpoint'. The value for both keys should be 'string'. (string
# value)
#filesystem_store_metadata_file = <None>
# The required permission for created image file. In this way the user
# other service used, e.g. Nova, who consumes the image could be the
# exclusive member of the group that owns the files created. Assigning
# it less then or equal to zero means don't change the default
# permission of the file. This value will be decoded as an octal
# digit. (integer value)
#filesystem_store_file_perm = 0
# Database to use (string value)
#mongodb_store_db = <None>
# Images will be chunked into objects of this size (in megabytes). For
# best performance, this should be a power of two. (integer value)
@ -398,3 +365,40 @@
# IP address of sheep daemon. (string value)
#sheepdog_store_address = localhost
# The host where the S3 server is listening. (string value)
#s3_store_host = <None>
# The S3 query token access key. (string value)
#s3_store_access_key = <None>
# The S3 query token secret key. (string value)
#s3_store_secret_key = <None>
# The S3 bucket to be used to store the Glance data. (string value)
#s3_store_bucket = <None>
# The local directory where uploads will be staged before they are
# transferred into S3. (string value)
#s3_store_object_buffer_dir = <None>
# A boolean to determine if the S3 bucket should be created on upload
# if it does not exist or if an error should be returned to the user.
# (boolean value)
#s3_store_create_bucket_on_put = false
# The S3 calling format used to determine the bucket. Either subdomain
# or path can be used. (string value)
#s3_store_bucket_url_format = subdomain
# What size, in MB, should S3 start chunking image files and do a
# multipart upload in S3. (integer value)
#s3_store_large_object_size = 100
# What multipart upload part size, in MB, should S3 use when uploading
# parts. The size must be greater than or equal to 5M. (integer value)
#s3_store_large_object_chunk_size = 10
# The number of thread pools to perform a multipart upload in S3.
# (integer value)
#s3_store_thread_pools = 10

View File

@ -240,42 +240,50 @@
# From glance.store
#
# The host where the S3 server is listening. (string value)
#s3_store_host = <None>
# Directory to which the Filesystem backend store writes images.
# (string value)
#filesystem_store_datadir = <None>
# The S3 query token access key. (string value)
#s3_store_access_key = <None>
# List of directories and its priorities to which the Filesystem
# backend store writes images. (multi valued)
#filesystem_store_datadirs =
# The S3 query token secret key. (string value)
#s3_store_secret_key = <None>
# The path to a file which contains the metadata to be returned with
# any location associated with this store. The file must contain a
# valid JSON object. The object should contain the keys 'id' and
# 'mountpoint'. The value for both keys should be 'string'. (string
# value)
#filesystem_store_metadata_file = <None>
# The S3 bucket to be used to store the Glance data. (string value)
#s3_store_bucket = <None>
# The required permission for created image file. In this way the user
# other service used, e.g. Nova, who consumes the image could be the
# exclusive member of the group that owns the files created. Assigning
# it less then or equal to zero means don't change the default
# permission of the file. This value will be decoded as an octal
# digit. (integer value)
#filesystem_store_file_perm = 0
# The local directory where uploads will be staged before they are
# transferred into S3. (string value)
#s3_store_object_buffer_dir = <None>
# Info to match when looking for cinder in the service catalog. Format
# is : separated values of the form:
# <service_type>:<service_name>:<endpoint_type> (string value)
#cinder_catalog_info = volume:cinder:publicURL
# A boolean to determine if the S3 bucket should be created on upload
# if it does not exist or if an error should be returned to the user.
# (boolean value)
#s3_store_create_bucket_on_put = false
# Override service catalog lookup with template for cinder endpoint
# e.g. http://localhost:8776/v1/%(project_id)s (string value)
#cinder_endpoint_template = <None>
# The S3 calling format used to determine the bucket. Either subdomain
# or path can be used. (string value)
#s3_store_bucket_url_format = subdomain
# Region name of this node (string value)
#os_region_name = <None>
# What size, in MB, should S3 start chunking image files and do a
# multipart upload in S3. (integer value)
#s3_store_large_object_size = 100
# Location of ca certicates file to use for cinder client requests.
# (string value)
#cinder_ca_certificates_file = <None>
# What multipart upload part size, in MB, should S3 use when uploading
# parts. The size must be greater than or equal to 5M. (integer value)
#s3_store_large_object_chunk_size = 10
# Number of cinderclient retries on failed http calls (integer value)
#cinder_http_retries = 3
# The number of thread pools to perform a multipart upload in S3.
# (integer value)
#s3_store_thread_pools = 10
# Allow to perform insecure SSL requests to cinder (boolean value)
#cinder_api_insecure = false
# Version of the authentication service to use. Valid versions are 2
# for keystone and 1 for swauth and rackspace. (deprecated) (string
@ -375,56 +383,6 @@
# The config file that has the swift account(s)configs. (string value)
#swift_store_config_file = <None>
# RADOS images will be chunked into objects of this size (in
# megabytes). For best performance, this should be a power of two.
# (integer value)
#rbd_store_chunk_size = 8
# RADOS pool in which images are stored. (string value)
#rbd_store_pool = images
# RADOS user to authenticate as (only applicable if using Cephx. If
# <None>, a default will be chosen based on the client. section in
# rbd_store_ceph_conf) (string value)
#rbd_store_user = <None>
# Ceph configuration file path. If <None>, librados will locate the
# default config. If using cephx authentication, this file should
# include a reference to the right keyring in a client.<USER> section
# (string value)
#rbd_store_ceph_conf = /etc/ceph/ceph.conf
# Info to match when looking for cinder in the service catalog. Format
# is : separated values of the form:
# <service_type>:<service_name>:<endpoint_type> (string value)
#cinder_catalog_info = volume:cinder:publicURL
# Override service catalog lookup with template for cinder endpoint
# e.g. http://localhost:8776/v1/%(project_id)s (string value)
#cinder_endpoint_template = <None>
# Region name of this node (string value)
#os_region_name = <None>
# Location of ca certicates file to use for cinder client requests.
# (string value)
#cinder_ca_certificates_file = <None>
# Number of cinderclient retries on failed http calls (integer value)
#cinder_http_retries = 3
# Allow to perform insecure SSL requests to cinder (boolean value)
#cinder_api_insecure = false
# Hostname or IP address of the instance to connect to, or a mongodb
# URI, or a list of hostnames / mongodb URIs. If host is an IPv6
# literal it must be enclosed in '[' and ']' characters following the
# RFC2732 URL syntax (e.g. '[::1]' for localhost) (string value)
#mongodb_store_uri = <None>
# Database to use (string value)
#mongodb_store_db = <None>
# ESX/ESXi or vCenter Server target system. The server value can be an
# IP address or a DNS name. (string value)
#vmware_server_host = <None>
@ -480,28 +438,33 @@
# valued)
#vmware_datastores =
# Directory to which the Filesystem backend store writes images.
# RADOS images will be chunked into objects of this size (in
# megabytes). For best performance, this should be a power of two.
# (integer value)
#rbd_store_chunk_size = 8
# RADOS pool in which images are stored. (string value)
#rbd_store_pool = images
# RADOS user to authenticate as (only applicable if using Cephx. If
# <None>, a default will be chosen based on the client. section in
# rbd_store_ceph_conf) (string value)
#rbd_store_user = <None>
# Ceph configuration file path. If <None>, librados will locate the
# default config. If using cephx authentication, this file should
# include a reference to the right keyring in a client.<USER> section
# (string value)
#filesystem_store_datadir = <None>
#rbd_store_ceph_conf = /etc/ceph/ceph.conf
# List of directories and its priorities to which the Filesystem
# backend store writes images. (multi valued)
#filesystem_store_datadirs =
# Hostname or IP address of the instance to connect to, or a mongodb
# URI, or a list of hostnames / mongodb URIs. If host is an IPv6
# literal it must be enclosed in '[' and ']' characters following the
# RFC2732 URL syntax (e.g. '[::1]' for localhost) (string value)
#mongodb_store_uri = <None>
# The path to a file which contains the metadata to be returned with
# any location associated with this store. The file must contain a
# valid JSON object. The object should contain the keys 'id' and
# 'mountpoint'. The value for both keys should be 'string'. (string
# value)
#filesystem_store_metadata_file = <None>
# The required permission for created image file. In this way the user
# other service used, e.g. Nova, who consumes the image could be the
# exclusive member of the group that owns the files created. Assigning
# it less then or equal to zero means don't change the default
# permission of the file. This value will be decoded as an octal
# digit. (integer value)
#filesystem_store_file_perm = 0
# Database to use (string value)
#mongodb_store_db = <None>
# Images will be chunked into objects of this size (in megabytes). For
# best performance, this should be a power of two. (integer value)
@ -513,6 +476,43 @@
# IP address of sheep daemon. (string value)
#sheepdog_store_address = localhost
# The host where the S3 server is listening. (string value)
#s3_store_host = <None>
# The S3 query token access key. (string value)
#s3_store_access_key = <None>
# The S3 query token secret key. (string value)
#s3_store_secret_key = <None>
# The S3 bucket to be used to store the Glance data. (string value)
#s3_store_bucket = <None>
# The local directory where uploads will be staged before they are
# transferred into S3. (string value)
#s3_store_object_buffer_dir = <None>
# A boolean to determine if the S3 bucket should be created on upload
# if it does not exist or if an error should be returned to the user.
# (boolean value)
#s3_store_create_bucket_on_put = false
# The S3 calling format used to determine the bucket. Either subdomain
# or path can be used. (string value)
#s3_store_bucket_url_format = subdomain
# What size, in MB, should S3 start chunking image files and do a
# multipart upload in S3. (integer value)
#s3_store_large_object_size = 100
# What multipart upload part size, in MB, should S3 use when uploading
# parts. The size must be greater than or equal to 5M. (integer value)
#s3_store_large_object_chunk_size = 10
# The number of thread pools to perform a multipart upload in S3.
# (integer value)
#s3_store_thread_pools = 10
[matchmaker_redis]

0
faafo/api/__init__.py Normal file
View File

View File

@ -11,16 +11,21 @@
# under the License.
import copy
from pkg_resources import resource_filename
import flask
import flask.ext.restless
import flask.ext.sqlalchemy
from flask_bootstrap import Bootstrap
import glance_store
from oslo_config import cfg
from oslo_log import log
from faafo import version
LOG = log.getLogger('faafo.api')
CONF = cfg.CONF
glance_store.register_opts(CONF)
api_opts = [
cfg.StrOpt('listen-address',
@ -34,21 +39,26 @@ api_opts = [
help='Database connection URL.')
]
cfg.CONF.register_opts(api_opts)
CONF.register_opts(api_opts)
log.register_options(cfg.CONF)
log.register_options(CONF)
log.set_defaults()
cfg.CONF(project='api', prog='faafo-api',
version=version.version_info.version_string())
CONF(project='api', prog='faafo-api',
version=version.version_info.version_string())
log.setup(cfg.CONF, 'api',
log.setup(CONF, 'api',
version=version.version_info.version_string())
app = flask.Flask('faafo.api')
app.config['DEBUG'] = cfg.CONF.debug
app.config['SQLALCHEMY_DATABASE_URI'] = cfg.CONF.database_url
template_path = resource_filename(__name__, "templates")
app = flask.Flask('faafo.api', template_folder=template_path)
app.config['DEBUG'] = CONF.debug
app.config['SQLALCHEMY_DATABASE_URI'] = CONF.database_url
db = flask.ext.sqlalchemy.SQLAlchemy(app)
Bootstrap(app)
glance_store.create_stores(CONF)
glance_store.verify_default_store()
def list_opts():
@ -59,7 +69,9 @@ def list_opts():
class Fractal(db.Model):
uuid = db.Column(db.String(36), primary_key=True)
checksum = db.Column(db.String(256), unique=True)
url = db.Column(db.String(256), nullable=True)
duration = db.Column(db.Float)
size = db.Column(db.Integer, nullable=True)
width = db.Column(db.Integer, nullable=False)
height = db.Column(db.Integer, nullable=False)
iterations = db.Column(db.Integer, nullable=False)
@ -76,9 +88,35 @@ db.create_all()
manager = flask.ext.restless.APIManager(app, flask_sqlalchemy_db=db)
@app.route('/', methods=['GET'])
@app.route('/index', methods=['GET'])
@app.route('/index/<int:page>', methods=['GET'])
def index(page=1):
fractals = Fractal.query.filter(
(Fractal.checksum != None) & (Fractal.size != None)).paginate( # noqa
page, 5, error_out=False)
return flask.render_template('index.html', fractals=fractals)
@app.route('/fractal/<string:fractalid>', methods=['GET'])
def get_fractal(fractalid):
fractal = Fractal.query.filter_by(uuid=fractalid).first()
if not fractal:
response = flask.jsonify({'code': 404,
'message': 'Fracal not found'})
response.status_code = 404
else:
image, imagesize = glance_store.get_from_backend(fractal.url)
response = flask.make_response(image.fp.read())
response.content_type = "image/png"
return response
def main():
manager.create_api(Fractal, methods=['GET', 'POST', 'DELETE', 'PUT'])
app.run(host=cfg.CONF.listen_address, port=cfg.CONF.bind_port)
manager.create_api(Fractal, methods=['GET', 'POST', 'DELETE', 'PUT'],
url_prefix='/v1')
app.run(host=CONF.listen_address, port=CONF.bind_port)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,56 @@
{% extends "bootstrap/base.html" %}
{% block title %}First App Appliation for OpenStack{% endblock %}
{% from "bootstrap/pagination.html" import render_pagination %}
{% block content %}
{{render_pagination(fractals)}}
{% for fractal in fractals.items %}
<div class="row">
<div class="col-xs-5 col-md-3">
<a href="/fractal/{{ fractal.uuid }}" class="thumbnail">
<img src="/fractal/{{ fractal.uuid }}" style="max-height: 300px; max-width: 300px">
</a>
</div>
<div class="col-xs-7 col-md-9">
<table class="table table-striped">
<tbody>
<tr>
<td>UUID</td>
<td>{{ fractal.uuid }}</td>
</tr>
<tr>
<td>Duration</td>
<td>{{ fractal.duration }} seconds</td>
</tr>
<tr>
<td>Dimensions</td>
<td>{{ fractal.width }} x {{ fractal.height }} px</td>
</tr>
<tr>
<td>Iterations</td>
<td>{{ fractal.iterations }}</td>
</tr>
<tr>
<td>Parameters</td>
<td>
<pre>xa = {{ fractal.xa }}
xb = {{ fractal.xb }}
ya = {{ fractal.ya }}
yb = {{ fractal.yb }}</pre>
</td>
</tr>
<tr>
<td>Filesize</td>
<td>{{ fractal.size}} bytes</td>
</tr>
<tr>
<td>Checksum</td>
<td><pre>{{ fractal.checksum }}</pre></td>
</tr>
</tbody>
</table>
</div>
</div>
{% endfor %}
{{render_pagination(fractals)}}
{% endblock %}

View File

@ -98,12 +98,12 @@ class ProducerService(service.Service, periodic_task.PeriodicTasks):
# NOTE(berendt): only necessary when using requests < 2.4.2
headers = {'Content-type': 'application/json',
'Accept': 'text/plain'}
requests.post("%s/api/fractal" % CONF.endpoint_url,
requests.post("%s/v1/fractal" % CONF.endpoint_url,
json.dumps(task), headers=headers)
LOG.info("generated task: %s" % task)
result = self._client.call(ctxt, 'process', task=task)
LOG.info("task %s processed: %s" % (task['uuid'], result))
requests.put("%s/api/fractal/%s" %
requests.put("%s/v1/fractal/%s" %
(CONF.endpoint_url, str(task['uuid'])),
json.dumps(result), headers=headers)

View File

@ -108,7 +108,7 @@ class WorkerEndpoint(object):
(task['uuid'], filename))
with open(filename, 'rb') as fp:
size = os.fstat(fp.fileno()).st_size
glance_store.add_to_backend(CONF, task['uuid'], fp, size)
glance = glance_store.add_to_backend(CONF, task['uuid'], fp, size)
checksum = hashlib.sha256(open(filename, 'rb').read()).hexdigest()
LOG.debug("checksum for task %s: %s" % (task['uuid'], checksum))
os.remove(filename)
@ -116,7 +116,9 @@ class WorkerEndpoint(object):
result = {
'uuid': task['uuid'],
'duration': elapsed_time,
'checksum': checksum
'checksum': checksum,
'url': glance[0],
'size': glance[1]
}
return result

View File

@ -4,9 +4,10 @@ eventlet>=0.16.1,!=0.17.0
PyMySQL>=0.6.2 # MIT License
Pillow==2.4.0 # MIT
requests>=2.2.0,!=2.4.0
Flask-Bootstrap
Flask>=0.10,<1.0
flask-sqlalchemy
flask-restless
flask-sqlalchemy
oslo.config>=1.9.3,<1.10.0 # Apache-2.0
oslo.log>=1.0.0,<1.1.0 # Apache-2.0
oslo.messaging>=1.8.0,<1.9.0 # Apache-2.0

View File

@ -23,6 +23,8 @@ packages =
scripts =
bin/faafo-producer
bin/faafo-worker
extra_files =
faafo/api/templates/index.html
[global]
setup-hooks =
@ -30,9 +32,9 @@ setup-hooks =
[entry_points]
console_scripts =
faafo-api = faafo.api:main
faafo-api = faafo.api.service:main
oslo.config.opts =
faafo.api = faafo.api:list_opts
faafo.api = faafo.api.service:list_opts
faafo.producer = faafo.producer.service:list_opts
[build_sphinx]