Delete more glance code

Change-Id: I2a0c54a59489819466d448ddf9c04b8e4acdc51a
Signed-off-by: Zhijiang Hu <hu.zhijiang@zte.com.cn>
This commit is contained in:
Zhijiang Hu 2016-09-18 23:58:04 -04:00
parent 43eb87e448
commit 1620cf1b02
259 changed files with 17188 additions and 7714 deletions

View File

@ -1,125 +0,0 @@
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Controller for Image Cache Management API
"""
import webob.exc
from daisy.api import policy
from daisy.api.v1 import controller
from daisy.common import exception
from daisy.common import wsgi
from daisy import image_cache
class Controller(controller.BaseController):
"""
A controller for managing cached images.
"""
def __init__(self):
self.cache = image_cache.ImageCache()
self.policy = policy.Enforcer()
def _enforce(self, req):
"""Authorize request against 'manage_image_cache' policy"""
try:
self.policy.enforce(req.context, 'manage_image_cache', {})
except exception.Forbidden:
raise webob.exc.HTTPForbidden()
def get_cached_images(self, req):
"""
GET /cached_images
Returns a mapping of records about cached images.
"""
self._enforce(req)
images = self.cache.get_cached_images()
return dict(cached_images=images)
def delete_cached_image(self, req, image_id):
"""
DELETE /cached_images/<IMAGE_ID>
Removes an image from the cache.
"""
self._enforce(req)
self.cache.delete_cached_image(image_id)
def delete_cached_images(self, req):
"""
DELETE /cached_images - Clear all active cached images
Removes all images from the cache.
"""
self._enforce(req)
return dict(num_deleted=self.cache.delete_all_cached_images())
def get_queued_images(self, req):
"""
GET /queued_images
Returns a mapping of records about queued images.
"""
self._enforce(req)
images = self.cache.get_queued_images()
return dict(queued_images=images)
def queue_image(self, req, image_id):
"""
PUT /queued_images/<IMAGE_ID>
Queues an image for caching. We do not check to see if
the image is in the registry here. That is done by the
prefetcher...
"""
self._enforce(req)
self.cache.queue_image(image_id)
def delete_queued_image(self, req, image_id):
"""
DELETE /queued_images/<IMAGE_ID>
Removes an image from the cache.
"""
self._enforce(req)
self.cache.delete_queued_image(image_id)
def delete_queued_images(self, req):
"""
DELETE /queued_images - Clear all active queued images
Removes all images from the cache.
"""
self._enforce(req)
return dict(num_deleted=self.cache.delete_all_queued_images())
class CachedImageDeserializer(wsgi.JSONRequestDeserializer):
pass
class CachedImageSerializer(wsgi.JSONResponseSerializer):
pass
def create_resource():
"""Cached Images resource factory method"""
deserializer = CachedImageDeserializer()
serializer = CachedImageSerializer()
return wsgi.Resource(Controller(), deserializer, serializer)

View File

@ -1,323 +0,0 @@
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Transparent image file caching middleware, designed to live on
Glance API nodes. When images are requested from the API node,
this middleware caches the returned image file to local filesystem.
When subsequent requests for the same image file are received,
the local cached copy of the image file is returned.
"""
import re
from oslo_log import log as logging
import webob
from daisy.api.common import size_checked_iter
from daisy.api import policy
from daisy.api.v1 import images
from daisy.common import exception
from daisy.common import utils
from daisy.common import wsgi
import daisy.db
from daisy import i18n
from daisy import image_cache
from daisy import notifier
import daisy.registry.client.v1.api as registry
LOG = logging.getLogger(__name__)
_LI = i18n._LI
_LE = i18n._LE
_LW = i18n._LW
PATTERNS = {
('v1', 'GET'): re.compile(r'^/v1/images/([^\/]+)$'),
('v1', 'DELETE'): re.compile(r'^/v1/images/([^\/]+)$'),
('v2', 'GET'): re.compile(r'^/v2/images/([^\/]+)/file$'),
('v2', 'DELETE'): re.compile(r'^/v2/images/([^\/]+)$')
}
class CacheFilter(wsgi.Middleware):
def __init__(self, app):
self.cache = image_cache.ImageCache()
self.serializer = images.ImageSerializer()
self.policy = policy.Enforcer()
LOG.info(_LI("Initialized image cache middleware"))
super(CacheFilter, self).__init__(app)
def _verify_metadata(self, image_meta):
"""
Sanity check the 'deleted' and 'size' metadata values.
"""
# NOTE: admins can see image metadata in the v1 API, but shouldn't
# be able to download the actual image data.
if image_meta['status'] == 'deleted' and image_meta['deleted']:
raise exception.NotFound()
if not image_meta['size']:
# override image size metadata with the actual cached
# file size, see LP Bug #900959
image_meta['size'] = self.cache.get_image_size(image_meta['id'])
@staticmethod
def _match_request(request):
"""Determine the version of the url and extract the image id
:returns tuple of version and image id if the url is a cacheable,
otherwise None
"""
for ((version, method), pattern) in PATTERNS.items():
if request.method != method:
continue
match = pattern.match(request.path_info)
if match is None:
continue
image_id = match.group(1)
# Ensure the image id we got looks like an image id to filter
# out a URI like /images/detail. See LP Bug #879136
if image_id != 'detail':
return (version, method, image_id)
def _enforce(self, req, action, target=None):
"""Authorize an action against our policies"""
if target is None:
target = {}
try:
self.policy.enforce(req.context, action, target)
except exception.Forbidden as e:
raise webob.exc.HTTPForbidden(explanation=e.msg, request=req)
def _get_v1_image_metadata(self, request, image_id):
"""
Retrieves image metadata using registry for v1 api and creates
dictionary-like mash-up of image core and custom properties.
"""
try:
image_metadata = registry.get_image_metadata(request.context,
image_id)
return utils.create_mashup_dict(image_metadata)
except exception.NotFound as e:
LOG.debug("No metadata found for image '%s'" % image_id)
raise webob.exc.HTTPNotFound(explanation=e.msg, request=request)
def _get_v2_image_metadata(self, request, image_id):
"""
Retrieves image and for v2 api and creates adapter like object
to access image core or custom properties on request.
"""
db_api = daisy.db.get_api()
image_repo = daisy.db.ImageRepo(request.context, db_api)
try:
image = image_repo.get(image_id)
# Storing image object in request as it is required in
# _process_v2_request call.
request.environ['api.cache.image'] = image
return policy.ImageTarget(image)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.msg, request=request)
def process_request(self, request):
"""
For requests for an image file, we check the local image
cache. If present, we return the image file, appending
the image metadata in headers. If not present, we pass
the request on to the next application in the pipeline.
"""
match = self._match_request(request)
try:
(version, method, image_id) = match
except TypeError:
# Trying to unpack None raises this exception
return None
self._stash_request_info(request, image_id, method, version)
if request.method != 'GET' or not self.cache.is_cached(image_id):
return None
method = getattr(self, '_get_%s_image_metadata' % version)
image_metadata = method(request, image_id)
# Deactivated images shall not be served from cache
if image_metadata['status'] == 'deactivated':
return None
try:
self._enforce(request, 'download_image', target=image_metadata)
except exception.Forbidden:
return None
LOG.debug("Cache hit for image '%s'", image_id)
image_iterator = self.get_from_cache(image_id)
method = getattr(self, '_process_%s_request' % version)
try:
return method(request, image_id, image_iterator, image_metadata)
except exception.NotFound:
msg = _LE("Image cache contained image file for image '%s', "
"however the registry did not contain metadata for "
"that image!") % image_id
LOG.error(msg)
self.cache.delete_cached_image(image_id)
@staticmethod
def _stash_request_info(request, image_id, method, version):
"""
Preserve the image id, version and request method for later retrieval
"""
request.environ['api.cache.image_id'] = image_id
request.environ['api.cache.method'] = method
request.environ['api.cache.version'] = version
@staticmethod
def _fetch_request_info(request):
"""
Preserve the cached image id, version for consumption by the
process_response method of this middleware
"""
try:
image_id = request.environ['api.cache.image_id']
method = request.environ['api.cache.method']
version = request.environ['api.cache.version']
except KeyError:
return None
else:
return (image_id, method, version)
def _process_v1_request(self, request, image_id, image_iterator,
image_meta):
# Don't display location
if 'location' in image_meta:
del image_meta['location']
image_meta.pop('location_data', None)
self._verify_metadata(image_meta)
response = webob.Response(request=request)
raw_response = {
'image_iterator': image_iterator,
'image_meta': image_meta,
}
return self.serializer.show(response, raw_response)
def _process_v2_request(self, request, image_id, image_iterator,
image_meta):
# We do some contortions to get the image_metadata so
# that we can provide it to 'size_checked_iter' which
# will generate a notification.
# TODO(mclaren): Make notification happen more
# naturally once caching is part of the domain model.
image = request.environ['api.cache.image']
self._verify_metadata(image_meta)
response = webob.Response(request=request)
response.app_iter = size_checked_iter(response, image_meta,
image_meta['size'],
image_iterator,
notifier.Notifier())
# NOTE (flwang): Set the content-type, content-md5 and content-length
# explicitly to be consistent with the non-cache scenario.
# Besides, it's not worth the candle to invoke the "download" method
# of ResponseSerializer under image_data. Because method "download"
# will reset the app_iter. Then we have to call method
# "size_checked_iter" to avoid missing any notification. But after
# call "size_checked_iter", we will lose the content-md5 and
# content-length got by the method "download" because of this issue:
# https://github.com/Pylons/webob/issues/86
response.headers['Content-Type'] = 'application/octet-stream'
response.headers['Content-MD5'] = image.checksum
response.headers['Content-Length'] = str(image.size)
return response
def process_response(self, resp):
"""
We intercept the response coming back from the main
images Resource, removing image file from the cache
if necessary
"""
status_code = self.get_status_code(resp)
if not 200 <= status_code < 300:
return resp
try:
(image_id, method, version) = self._fetch_request_info(
resp.request)
except TypeError:
return resp
if method == 'GET' and status_code == 204:
# Bugfix:1251055 - Don't cache non-existent image files.
# NOTE: Both GET for an image without locations and DELETE return
# 204 but DELETE should be processed.
return resp
method_str = '_process_%s_response' % method
try:
process_response_method = getattr(self, method_str)
except AttributeError:
LOG.error(_LE('could not find %s') % method_str)
# Nothing to do here, move along
return resp
else:
return process_response_method(resp, image_id, version=version)
def _process_DELETE_response(self, resp, image_id, version=None):
if self.cache.is_cached(image_id):
LOG.debug("Removing image %s from cache", image_id)
self.cache.delete_cached_image(image_id)
return resp
def _process_GET_response(self, resp, image_id, version=None):
image_checksum = resp.headers.get('Content-MD5')
if not image_checksum:
# API V1 stores the checksum in a different header:
image_checksum = resp.headers.get('x-image-meta-checksum')
if not image_checksum:
LOG.error(_LE("Checksum header is missing."))
# fetch image_meta on the basis of version
image_metadata = None
if version:
method = getattr(self, '_get_%s_image_metadata' % version)
image_metadata = method(resp.request, image_id)
# NOTE(zhiyan): image_cache return a generator object and set to
# response.app_iter, it will be called by eventlet.wsgi later.
# So we need enforce policy firstly but do it by application
# since eventlet.wsgi could not catch webob.exc.HTTPForbidden and
# return 403 error to client then.
self._enforce(resp.request, 'download_image', target=image_metadata)
resp.app_iter = self.cache.get_caching_iter(image_id, image_checksum,
resp.app_iter)
return resp
def get_status_code(self, response):
"""
Returns the integer status code from the response, which
can be either a Webob.Response (used in testing) or httplib.Response
"""
if hasattr(response, 'status_int'):
return response.status_int
return response.status
def get_from_cache(self, image_id):
"""Called if cache hit"""
with self.cache.open_for_read(image_id) as cache_file:
chunks = utils.chunkiter(cache_file)
for chunk in chunks:
yield chunk

View File

@ -1,85 +0,0 @@
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Image Cache Management API
"""
from oslo_log import log as logging
import routes
from daisy.api import cached_images
from daisy.common import wsgi
from daisy import i18n
LOG = logging.getLogger(__name__)
_LI = i18n._LI
class CacheManageFilter(wsgi.Middleware):
def __init__(self, app):
mapper = routes.Mapper()
resource = cached_images.create_resource()
mapper.connect("/v1/cached_images",
controller=resource,
action="get_cached_images",
conditions=dict(method=["GET"]))
mapper.connect("/v1/cached_images/{image_id}",
controller=resource,
action="delete_cached_image",
conditions=dict(method=["DELETE"]))
mapper.connect("/v1/cached_images",
controller=resource,
action="delete_cached_images",
conditions=dict(method=["DELETE"]))
mapper.connect("/v1/queued_images/{image_id}",
controller=resource,
action="queue_image",
conditions=dict(method=["PUT"]))
mapper.connect("/v1/queued_images",
controller=resource,
action="get_queued_images",
conditions=dict(method=["GET"]))
mapper.connect("/v1/queued_images/{image_id}",
controller=resource,
action="delete_queued_image",
conditions=dict(method=["DELETE"]))
mapper.connect("/v1/queued_images",
controller=resource,
action="delete_queued_images",
conditions=dict(method=["DELETE"]))
self._mapper = mapper
self._resource = resource
LOG.info(_LI("Initialized image cache management middleware"))
super(CacheManageFilter, self).__init__(app)
def process_request(self, request):
# Map request to our resource object if we can handle it
match = self._mapper.match(request.path_info, request.environ)
if match:
request.environ['wsgiorg.routing_args'] = (None, match)
return self._resource(request)
# Pass off downstream if we don't match the request path
else:
return None

View File

@ -1,66 +0,0 @@
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Use gzip compression if the client accepts it.
"""
import re
from oslo_log import log as logging
from daisy.common import wsgi
from daisy import i18n
LOG = logging.getLogger(__name__)
_LI = i18n._LI
class GzipMiddleware(wsgi.Middleware):
re_zip = re.compile(r'\bgzip\b')
def __init__(self, app):
LOG.info(_LI("Initialized gzip middleware"))
super(GzipMiddleware, self).__init__(app)
def process_response(self, response):
request = response.request
accept_encoding = request.headers.get('Accept-Encoding', '')
if self.re_zip.search(accept_encoding):
# NOTE(flaper87): Webob removes the content-md5 when
# app_iter is called. We'll keep it and reset it later
checksum = response.headers.get("Content-MD5")
# NOTE(flaper87): We'll use lazy for images so
# that they can be compressed without reading
# the whole content in memory. Notice that using
# lazy will set response's content-length to 0.
content_type = response.headers["Content-Type"]
lazy = content_type == "application/octet-stream"
# NOTE(flaper87): Webob takes care of the compression
# process, it will replace the body either with a
# compressed body or a generator - used for lazy com
# pression - depending on the lazy value.
#
# Webob itself will set the Content-Encoding header.
response.encode_content(lazy=lazy)
if checksum:
response.headers['Content-MD5'] = checksum
return response

View File

@ -1,109 +0,0 @@
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A filter middleware that inspects the requested URI for a version string
and/or Accept headers and attempts to negotiate an API controller to
return
"""
from oslo_config import cfg
from oslo_log import log as logging
from daisy.api import versions
from daisy.common import wsgi
from daisy import i18n
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
_ = i18n._
_LW = i18n._LW
class VersionNegotiationFilter(wsgi.Middleware):
def __init__(self, app):
self.versions_app = versions.Controller()
super(VersionNegotiationFilter, self).__init__(app)
def process_request(self, req):
"""Try to find a version first in the accept header, then the URL"""
msg = _("Determining version of request: %(method)s %(path)s"
" Accept: %(accept)s")
args = {'method': req.method, 'path': req.path, 'accept': req.accept}
LOG.debug(msg % args)
# If the request is for /versions, just return the versions container
# TODO(bcwaldon): deprecate this behavior
if req.path_info_peek() == "versions":
return self.versions_app
accept = str(req.accept)
if accept.startswith('application/vnd.openstack.images-'):
LOG.debug("Using media-type versioning")
token_loc = len('application/vnd.openstack.images-')
req_version = accept[token_loc:]
else:
LOG.debug("Using url versioning")
# Remove version in url so it doesn't conflict later
req_version = self._pop_path_info(req)
try:
version = self._match_version_string(req_version)
except ValueError:
LOG.warn(_LW("Unknown version. Returning version choices."))
return self.versions_app
req.environ['api.version'] = version
req.path_info = ''.join(('/v', str(version), req.path_info))
LOG.debug("Matched version: v%d", version)
LOG.debug('new path %s', req.path_info)
return None
def _match_version_string(self, subject):
"""
Given a string, tries to match a major and/or
minor version number.
:param subject: The string to check
:returns version found in the subject
:raises ValueError if no acceptable version could be found
"""
if subject in ('v1', 'v1.0', 'v1.1') and CONF.enable_v1_api:
major_version = 1
elif subject in ('v2', 'v2.0', 'v2.1', 'v2.2') and CONF.enable_v2_api:
major_version = 2
else:
raise ValueError()
return major_version
def _pop_path_info(self, req):
"""
'Pops' off the next segment of PATH_INFO, returns the popped
segment. Do NOT push it onto SCRIPT_NAME.
"""
path = req.path_info
if not path:
return None
while path.startswith('/'):
path = path[1:]
idx = path.find('/')
if idx == -1:
idx = len(path)
r = path[:idx]
req.path_info = path[idx:]
return r

View File

@ -1,224 +0,0 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/Initialize network configuration about neutron
"""
import time
from oslo_log import log as logging
import daisy.registry.client.v1.api as registry
from webob.exc import HTTPBadRequest
from neutronclient.v2_0 import client as clientv20
from daisy.common import exception
LOG = logging.getLogger(__name__)
class network(object):
"""
network config
"""
def __init__(self, req, neutron_host, keystone_host, cluster_id):
registry.configure_registry_client()
auth_url = 'http://' + keystone_host + ':35357/v2.0'
end_url = 'http://' + neutron_host + ':9696'
params = {'username': 'admin',
'ca_cert': None,
'tenant_name': 'admin',
'insecure': False,
'auth_url': auth_url,
'timeout': 30,
'password': 'keystone',
'endpoint_url': end_url,
'auth_strategy': 'keystone'
}
self.cluster_id = cluster_id
self.neutron = clientv20.Client(**params)
try:
cluster = registry.get_cluster_metadata(req.context, cluster_id)
except exception.Invalid as e:
LOG.exception(e.msg)
raise HTTPBadRequest(explanation=e.msg, request=req)
LOG.info("<<<CLUSTER:%s,NEUTRON HOST:%s,KEYSTOEN:%s>>>",
cluster, neutron_host, keystone_host)
if 'logic_networks' in cluster and cluster[
'logic_networks'] is not None:
self.nets = cluster['logic_networks']
# self._flat_network_uniqueness_check()
if 'routers' in cluster and cluster['routers'] is not None:
self.routers = cluster['routers']
else:
self.routers = []
self._network_check()
self.name_mappings = {}
self._network_config()
def _router_create(self, name):
body = {}
body['router'] = {"name": name, "admin_state_up": True}
router = self.neutron.create_router(body)
return router['router']['id']
def _subnet_create(self, net_id, **kwargs):
body = {}
body['subnet'] = {'enable_dhcp': True,
'network_id': net_id,
'ip_version': 4
}
for k in kwargs.keys():
body['subnet'][k] = kwargs[k]
LOG.info("<<<BODY:%s>>>", body)
subnet = self.neutron.create_subnet(body)
return subnet['subnet']['id']
def _router_link(self):
for router in self.routers:
router_id = self._router_create(router['name'])
if 'external_logic_network' in router:
body = {
'network_id': self.name_mappings[
router['external_logic_network']]}
self.neutron.add_gateway_router(router_id, body)
if 'subnets' in router:
for i in router['subnets']:
body = {'subnet_id': self.name_mappings[i]}
self.neutron.add_interface_router(router_id, body)
def _net_subnet_same_router_check(self, ex_network, subnet):
for router in self.routers:
if 'external_logic_network' in router and router[
'external_logic_network'] == ex_network:
if 'subnets' in router:
for i in router['subnets']:
if i == subnet:
return True
return False
def _subnet_check_and_create(self, net_id, subnet):
kwargs = {}
key_list = ['name', 'cidr', 'floating_ranges', 'dns_nameservers']
for i in key_list:
if i not in subnet:
raise exception.Invalid()
kwargs['name'] = subnet['name']
kwargs['cidr'] = subnet['cidr']
if len(subnet['dns_nameservers']) != 0:
kwargs['dns_nameservers'] = subnet['dns_nameservers']
kwargs['allocation_pools'] = []
if len(subnet['floating_ranges']) != 0:
for pool in subnet['floating_ranges']:
if len(pool) != 2:
raise exception.Invalid()
else:
alloc_pool = {}
alloc_pool['start'] = pool[0]
alloc_pool['end'] = pool[1]
kwargs['allocation_pools'].append(alloc_pool)
if 'gateway' in subnet and subnet['gateway'] is not None:
kwargs['gateway_ip'] = subnet['gateway']
subnet_id = self._subnet_create(net_id, **kwargs)
return subnet_id
def _network_check(self):
execute_times = 0
while True:
try:
nets = self.neutron.list_networks()
except:
LOG.info("can not connect neutron server,sleep 5s,try")
time.sleep(5)
execute_times += 1
if execute_times >= 60:
LOG.info("connect neutron server failed")
break
else:
LOG.info("connect neutron server sucessful")
if 'networks' in nets and len(nets['networks']) > 0:
raise exception.Invalid()
break
def _flat_network_uniqueness_check(self):
flat_mapping = []
for net in self.nets:
if net['physnet_name'] in flat_mapping:
raise exception.Invalid()
else:
if net['segmentation_type'].strip() == 'flat':
flat_mapping.append(net['physnet_name'])
def _network_config(self):
for net in self.nets:
body = {}
if net['type'] == 'external':
body['network'] = {
'name': net['name'],
'router:external': True,
'provider:network_type': net['segmentation_type']}
if net['segmentation_type'].strip() == 'flat':
body['network']['provider:physical_network'] = net[
'physnet_name']
elif net['segmentation_type'].strip() == 'vxlan':
if 'segmentation_id' in net and net[
'segmentation_id'] is not None:
body['network']['provider:segmentation_id'] = net[
'segmentation_id']
else:
if 'segmentation_id' in net and net[
'segmentation_id'] is not None:
body['network']['provider:segmentation_id'] = net[
'segmentation_id']
body['network']['provider:physical_network'] = net[
'physnet_name']
if net['shared']:
body['network']['shared'] = True
else:
body['network']['shared'] = False
external = self.neutron.create_network(body)
self.name_mappings[net['name']] = external['network']['id']
last_create_subnet = []
for subnet in net['subnets']:
if self._net_subnet_same_router_check(
net['name'], subnet['name']):
last_create_subnet.append(subnet)
else:
subnet_id = self._subnet_check_and_create(
external['network']['id'], subnet)
self.name_mappings[subnet['name']] = subnet_id
for subnet in last_create_subnet:
subnet_id = self._subnet_check_and_create(
external['network']['id'], subnet)
self.name_mappings[subnet['name']] = subnet_id
else:
body['network'] = {
'name': net['name'],
'provider:network_type': net['segmentation_type']}
if net['segmentation_type'].strip() == 'vlan':
body['network']['provider:physical_network'] = net[
'physnet_name']
if 'segmentation_id' in net and net[
'segmentation_id'] is not None:
body['network']['provider:segmentation_id'] = net[
'segmentation_id']
if net['shared']:
body['network']['shared'] = True
else:
body['network']['shared'] = False
inner = self.neutron.create_network(body)
self.name_mappings[net['name']] = inner['network']['id']
for subnet in net['subnets']:
subnet_id = self._subnet_check_and_create(
inner['network']['id'], subnet)
self.name_mappings[subnet['name']] = subnet_id
self._router_link()

View File

@ -1,126 +0,0 @@
# Copyright 2013 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from daisy.common import exception
import daisy.domain.proxy
class ProtectedImageFactoryProxy(daisy.domain.proxy.ImageFactory):
def __init__(self, image_factory, context, property_rules):
self.image_factory = image_factory
self.context = context
self.property_rules = property_rules
kwargs = {'context': self.context,
'property_rules': self.property_rules}
super(ProtectedImageFactoryProxy, self).__init__(
image_factory,
proxy_class=ProtectedImageProxy,
proxy_kwargs=kwargs)
def new_image(self, **kwargs):
extra_props = kwargs.pop('extra_properties', {})
extra_properties = {}
for key in extra_props.keys():
if self.property_rules.check_property_rules(key, 'create',
self.context):
extra_properties[key] = extra_props[key]
else:
raise exception.ReservedProperty(property=key)
return super(ProtectedImageFactoryProxy, self).new_image(
extra_properties=extra_properties, **kwargs)
class ProtectedImageRepoProxy(daisy.domain.proxy.Repo):
def __init__(self, image_repo, context, property_rules):
self.context = context
self.image_repo = image_repo
self.property_rules = property_rules
proxy_kwargs = {'context': self.context}
super(ProtectedImageRepoProxy, self).__init__(
image_repo, item_proxy_class=ProtectedImageProxy,
item_proxy_kwargs=proxy_kwargs)
def get(self, image_id):
return ProtectedImageProxy(self.image_repo.get(image_id),
self.context, self.property_rules)
def list(self, *args, **kwargs):
images = self.image_repo.list(*args, **kwargs)
return [ProtectedImageProxy(image, self.context, self.property_rules)
for image in images]
class ProtectedImageProxy(daisy.domain.proxy.Image):
def __init__(self, image, context, property_rules):
self.image = image
self.context = context
self.property_rules = property_rules
self.image.extra_properties = ExtraPropertiesProxy(
self.context,
self.image.extra_properties,
self.property_rules)
super(ProtectedImageProxy, self).__init__(self.image)
class ExtraPropertiesProxy(daisy.domain.ExtraProperties):
def __init__(self, context, extra_props, property_rules):
self.context = context
self.property_rules = property_rules
extra_properties = {}
for key in extra_props.keys():
if self.property_rules.check_property_rules(key, 'read',
self.context):
extra_properties[key] = extra_props[key]
super(ExtraPropertiesProxy, self).__init__(extra_properties)
def __getitem__(self, key):
if self.property_rules.check_property_rules(key, 'read', self.context):
return dict.__getitem__(self, key)
else:
raise KeyError
def __setitem__(self, key, value):
# NOTE(isethi): Exceptions are raised only for actions update, delete
# and create, where the user proactively interacts with the properties.
# A user cannot request to read a specific property, hence reads do
# raise an exception
try:
if self.__getitem__(key) is not None:
if self.property_rules.check_property_rules(key, 'update',
self.context):
return dict.__setitem__(self, key, value)
else:
raise exception.ReservedProperty(property=key)
except KeyError:
if self.property_rules.check_property_rules(key, 'create',
self.context):
return dict.__setitem__(self, key, value)
else:
raise exception.ReservedProperty(property=key)
def __delitem__(self, key):
if key not in super(ExtraPropertiesProxy, self).keys():
raise KeyError
if self.property_rules.check_property_rules(key, 'delete',
self.context):
return dict.__delitem__(self, key)
else:
raise exception.ReservedProperty(property=key)

View File

@ -1,73 +0,0 @@
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from daisy import i18n
LOG = logging.getLogger(__name__)
_LE = i18n._LE
class TaskExecutor(object):
"""Base class for Asynchronous task executors. It does not support the
execution mechanism.
Provisions the extensible classes with necessary variables to utilize
important Glance modules like, context, task_repo, image_repo,
image_factory.
Note:
It also gives abstraction for the standard pre-processing and
post-processing operations to be executed by a task. These may include
validation checks, security checks, introspection, error handling etc.
The aim is to give developers an abstract sense of the execution
pipeline logic.
Args:
context: daisy.context.RequestContext object for AuthZ and AuthN
checks
task_repo: daisy.db.TaskRepo object which acts as a translator for
daisy.domain.Task and daisy.domain.TaskStub objects
into ORM semantics
image_repo: daisy.db.ImageRepo object which acts as a translator for
daisy.domain.Image object into ORM semantics
image_factory: daisy.domain.ImageFactory object to be used for
creating new images for certain types of tasks viz. import, cloning
"""
def __init__(self, context, task_repo, image_repo, image_factory):
self.context = context
self.task_repo = task_repo
self.image_repo = image_repo
self.image_factory = image_factory
def begin_processing(self, task_id):
task = self.task_repo.get(task_id)
task.begin_processing()
self.task_repo.save(task)
# start running
self._run(task_id, task.type)
def _run(self, task_id, task_type):
task = self.task_repo.get(task_id)
msg = _LE("This execution of Tasks is not setup. Please consult the "
"project documentation for more information on the "
"executors available.")
LOG.error(msg)
task.fail(_LE("Internal error occurred while trying to process task."))
self.task_repo.save(task)

View File

@ -1,441 +0,0 @@
# Copyright 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
import glance_store as store_api
from glance_store import backend
from oslo_config import cfg
import six
from stevedore import named
from taskflow.patterns import linear_flow as lf
from taskflow import retry
from taskflow import task
from daisy.common import exception
from daisy.common.scripts.image_import import main as image_import
from daisy.common.scripts import utils as script_utils
from daisy.common import utils as common_utils
from daisy import i18n
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
CONF = cfg.CONF
class _CreateImage(task.Task):
default_provides = 'image_id'
def __init__(self, task_id, task_type, task_repo, image_repo,
image_factory):
self.task_id = task_id
self.task_type = task_type
self.task_repo = task_repo
self.image_repo = image_repo
self.image_factory = image_factory
super(_CreateImage, self).__init__(
name='%s-CreateImage-%s' % (task_type, task_id))
def execute(self):
task = script_utils.get_task(self.task_repo, self.task_id)
if task is None:
return
task_input = script_utils.unpack_task_input(task)
image = image_import.create_image(
self.image_repo, self.image_factory,
task_input.get('image_properties'), self.task_id)
LOG.debug("Task %(task_id)s created image %(image_id)s" %
{'task_id': task.task_id, 'image_id': image.image_id})
return image.image_id
def revert(self, *args, **kwargs):
# TODO(flaper87): Define the revert rules for images on failures.
# Deleting the image may not be what we want since users could upload
# the image data in a separate step. However, it really depends on
# when the failure happened. I guess we should check if data has been
# written, although at that point failures are (should be) unexpected,
# at least image-workflow wise.
pass
class _ImportToFS(task.Task):
default_provides = 'file_path'
def __init__(self, task_id, task_type, task_repo, uri):
self.task_id = task_id
self.task_type = task_type
self.task_repo = task_repo
self.uri = uri
super(_ImportToFS, self).__init__(
name='%s-ImportToFS-%s' % (task_type, task_id))
if CONF.task.work_dir is None:
msg = (_("%(task_id)s of %(task_type)s not configured "
"properly. Missing work dir: %(work_dir)s") %
{'task_id': self.task_id,
'task_type': self.task_type,
'work_dir': CONF.task.work_dir})
raise exception.BadTaskConfiguration(msg)
self.store = self._build_store()
def _build_store(self):
# NOTE(flaper87): Due to the nice glance_store api (#sarcasm), we're
# forced to build our own config object, register the required options
# (and by required I mean *ALL* of them, even the ones we don't want),
# and create our own store instance by calling a private function.
# This is certainly unfortunate but it's the best we can do until the
# glance_store refactor is done. A good thing is that glance_store is
# under our team's management and it gates on Glance so changes to
# this API will (should?) break task's tests.
conf = cfg.ConfigOpts()
backend.register_opts(conf)
conf.set_override('filesystem_store_datadir',
CONF.task.work_dir,
group='glance_store')
# NOTE(flaper87): Do not even try to judge me for this... :(
# With the glance_store refactor, this code will change, until
# that happens, we don't have a better option and this is the
# least worst one, IMHO.
store = backend._load_store(conf, 'file')
if store is None:
msg = (_("%(task_id)s of %(task_type)s not configured "
"properly. Could not load the filesystem store") %
{'task_id': self.task_id, 'task_type': self.task_type})
raise exception.BadTaskConfiguration(msg)
store.configure()
return store
def execute(self, image_id):
"""Create temp file into store and return path to it
:param image_id: Glance Image ID
"""
# NOTE(flaper87): We've decided to use a separate `work_dir` for
# this task - and tasks coming after this one - as a way to expect
# users to configure a local store for pre-import works on the image
# to happen.
#
# While using any path should be "technically" fine, it's not what
# we recommend as the best solution. For more details on this, please
# refer to the comment in the `_ImportToStore.execute` method.
data = script_utils.get_image_data_iter(self.uri)
# NOTE(jokke): Using .tasks_import to ease debugging. The file name
# is specific so we know exactly where it's coming from.
tmp_id = "%s.tasks_import" % image_id
path = self.store.add(tmp_id, data, 0, context=None)[0]
return path
def revert(self, image_id, result=None, **kwargs):
# NOTE(flaper87): If result is None, it probably
# means this task failed. Otherwise, we would have
# a result from its execution.
if result is None:
return
if os.path.exists(result.split("file://")[-1]):
store_api.delete_from_backend(result)
class _DeleteFromFS(task.Task):
def __init__(self, task_id, task_type):
self.task_id = task_id
self.task_type = task_type
super(_DeleteFromFS, self).__init__(
name='%s-DeleteFromFS-%s' % (task_type, task_id))
def execute(self, file_path):
"""Remove file from the backend
:param file_path: path to the file being deleted
"""
store_api.delete_from_backend(file_path)
class _ImportToStore(task.Task):
def __init__(self, task_id, task_type, image_repo, uri):
self.task_id = task_id
self.task_type = task_type
self.image_repo = image_repo
self.uri = uri
super(_ImportToStore, self).__init__(
name='%s-ImportToStore-%s' % (task_type, task_id))
def execute(self, image_id, file_path=None):
"""Bringing the introspected image to back end store
:param image_id: Glance Image ID
:param file_path: path to the image file
"""
# NOTE(flaper87): There are a couple of interesting bits in the
# interaction between this task and the `_ImportToFS` one. I'll try
# to cover them in this comment.
#
# NOTE(flaper87):
# `_ImportToFS` downloads the image to a dedicated `work_dir` which
# needs to be configured in advance (please refer to the config option
# docs for more info). The motivation behind this is also explained in
# the `_ImportToFS.execute` method.
#
# Due to the fact that we have an `_ImportToFS` task which downloads
# the image data already, we need to be as smart as we can in this task
# to avoid downloading the data several times and reducing the copy or
# write times. There are several scenarios where the interaction
# between this task and `_ImportToFS` could be improved. All these
# scenarios assume the `_ImportToFS` task has been executed before
# and/or in a more abstract scenario, that `file_path` is being
# provided.
#
# Scenario 1: FS Store is Remote, introspection enabled,
# conversion disabled
#
# In this scenario, the user would benefit from having the scratch path
# being the same path as the fs store. Only one write would happen and
# an extra read will happen in order to introspect the image. Note that
# this read is just for the image headers and not the entire file.
#
# Scenario 2: FS Store is remote, introspection enabled,
# conversion enabled
#
# In this scenario, the user would benefit from having a *local* store
# into which the image can be converted. This will require downloading
# the image locally, converting it and then copying the converted image
# to the remote store.
#
# Scenario 3: FS Store is local, introspection enabled,
# conversion disabled
# Scenario 4: FS Store is local, introspection enabled,
# conversion enabled
#
# In both these scenarios the user shouldn't care if the FS
# store path and the work dir are the same, therefore probably
# benefit, about the scratch path and the FS store being the
# same from a performance perspective. Space wise, regardless
# of the scenario, the user will have to account for it in
# advance.
#
# Lets get to it and identify the different scenarios in the
# implementation
image = self.image_repo.get(image_id)
image.status = 'saving'
self.image_repo.save(image)
# NOTE(flaper87): Let's dance... and fall
#
# Unfortunatelly, because of the way our domain layers work and
# the checks done in the FS store, we can't simply rename the file
# and set the location. To do that, we'd have to duplicate the logic
# of every and each of the domain factories (quota, location, etc)
# and we'd also need to hack the FS store to prevent it from raising
# a "duplication path" error. I'd rather have this task copying the
# image bits one more time than duplicating all that logic.
#
# Since I don't think this should be the definitive solution, I'm
# leaving the code below as a reference for what should happen here
# once the FS store and domain code will be able to handle this case.
#
# if file_path is None:
# image_import.set_image_data(image, self.uri, None)
# return
# NOTE(flaper87): Don't assume the image was stored in the
# work_dir. Think in the case this path was provided by another task.
# Also, lets try to neither assume things nor create "logic"
# dependencies between this task and `_ImportToFS`
#
# base_path = os.path.dirname(file_path.split("file://")[-1])
# NOTE(flaper87): Hopefully just scenarios #3 and #4. I say
# hopefully because nothing prevents the user to use the same
# FS store path as a work dir
#
# image_path = os.path.join(base_path, image_id)
#
# if (base_path == CONF.glance_store.filesystem_store_datadir or
# base_path in CONF.glance_store.filesystem_store_datadirs):
# os.rename(file_path, image_path)
#
# image_import.set_image_data(image, image_path, None)
image_import.set_image_data(image, file_path or self.uri, None)
class _SaveImage(task.Task):
def __init__(self, task_id, task_type, image_repo):
self.task_id = task_id
self.task_type = task_type
self.image_repo = image_repo
super(_SaveImage, self).__init__(
name='%s-SaveImage-%s' % (task_type, task_id))
def execute(self, image_id):
"""Transition image status to active
:param image_id: Glance Image ID
"""
new_image = self.image_repo.get(image_id)
if new_image.status == 'saving':
# NOTE(flaper87): THIS IS WRONG!
# we should be doing atomic updates to avoid
# race conditions. This happens in other places
# too.
new_image.status = 'active'
self.image_repo.save(new_image)
class _CompleteTask(task.Task):
def __init__(self, task_id, task_type, task_repo):
self.task_id = task_id
self.task_type = task_type
self.task_repo = task_repo
super(_CompleteTask, self).__init__(
name='%s-CompleteTask-%s' % (task_type, task_id))
def execute(self, image_id):
"""Finishing the task flow
:param image_id: Glance Image ID
"""
task = script_utils.get_task(self.task_repo, self.task_id)
if task is None:
return
try:
task.succeed({'image_id': image_id})
except Exception as e:
# Note: The message string contains Error in it to indicate
# in the task.message that it's a error message for the user.
# TODO(nikhil): need to bring back save_and_reraise_exception when
# necessary
err_msg = ("Error: " + six.text_type(type(e)) + ': ' +
common_utils.exception_to_str(e))
log_msg = err_msg + _LE("Task ID %s") % task.task_id
LOG.exception(log_msg)
task.fail(err_msg)
finally:
self.task_repo.save(task)
LOG.info(_LI("%(task_id)s of %(task_type)s completed") %
{'task_id': self.task_id, 'task_type': self.task_type})
def _get_import_flows(**kwargs):
# NOTE(flaper87): Until we have a better infrastructure to enable
# and disable tasks plugins, hard-code the tasks we know exist,
# instead of loading everything from the namespace. This guarantees
# both, the load order of these plugins and the fact that no random
# plugins will be added/loaded until we feel comfortable with this.
# Future patches will keep using NamedExtensionManager but they'll
# rely on a config option to control this process.
extensions = named.NamedExtensionManager('daisy.flows.import',
names=['convert',
'introspect'],
name_order=True,
invoke_on_load=True,
invoke_kwds=kwargs)
for ext in extensions.extensions:
yield ext.obj
def get_flow(**kwargs):
"""Return task flow
:param task_id: Task ID
:param task_type: Type of the task
:param task_repo: Task repo
:param image_repo: Image repository used
:param image_factory: Glance Image Factory
:param uri: uri for the image file
"""
task_id = kwargs.get('task_id')
task_type = kwargs.get('task_type')
task_repo = kwargs.get('task_repo')
image_repo = kwargs.get('image_repo')
image_factory = kwargs.get('image_factory')
uri = kwargs.get('uri')
flow = lf.Flow(task_type, retry=retry.AlwaysRevert()).add(
_CreateImage(task_id, task_type, task_repo, image_repo, image_factory))
import_to_store = _ImportToStore(task_id, task_type, image_repo, uri)
try:
# NOTE(flaper87): ImportToLocal and DeleteFromLocal shouldn't be here.
# Ideally, we should have the different import flows doing this for us
# and this function should clean up duplicated tasks. For example, say
# 2 flows need to have a local copy of the image - ImportToLocal - in
# order to be able to complete the task - i.e Introspect-. In that
# case, the introspect.get_flow call should add both, ImportToLocal and
# DeleteFromLocal, to the flow and this function will reduce the
# duplicated calls to those tasks by creating a linear flow that
# ensures those are called before the other tasks. For now, I'm
# keeping them here, though.
limbo = lf.Flow(task_type).add(_ImportToFS(task_id,
task_type,
task_repo,
uri))
for subflow in _get_import_flows(**kwargs):
limbo.add(subflow)
# NOTE(flaper87): We have hard-coded 2 tasks,
# if there aren't more than 2, it means that
# no subtask has been registered.
if len(limbo) > 1:
flow.add(limbo)
# NOTE(flaper87): Until this implementation gets smarter,
# make sure ImportToStore is called *after* the imported
# flow stages. If not, the image will be set to saving state
# invalidating tasks like Introspection or Convert.
flow.add(import_to_store)
# NOTE(flaper87): Since this is an "optional" task but required
# when `limbo` is executed, we're adding it in its own subflow
# to isolat it from the rest of the flow.
delete_flow = lf.Flow(task_type).add(_DeleteFromFS(task_id,
task_type))
flow.add(delete_flow)
else:
flow.add(import_to_store)
except exception.BadTaskConfiguration:
# NOTE(flaper87): If something goes wrong with the load of
# import tasks, make sure we go on.
flow.add(import_to_store)
flow.add(
_SaveImage(task_id, task_type, image_repo),
_CompleteTask(task_id, task_type, task_repo)
)
return flow

View File

@ -1,94 +0,0 @@
# Copyright 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from oslo_concurrency import processutils as putils
from oslo_config import cfg
from taskflow.patterns import linear_flow as lf
from taskflow import task
from daisy import i18n
_ = i18n._
_LI = i18n._LI
_LE = i18n._LE
_LW = i18n._LW
LOG = logging.getLogger(__name__)
convert_task_opts = [
cfg.StrOpt('conversion_format',
default=None,
choices=('qcow2', 'raw'),
help=_("The format to which images will be automatically "
"converted. " "Can be 'qcow2' or 'raw'.")),
]
CONF = cfg.CONF
# NOTE(flaper87): Registering under the taskflow_executor section
# for now. It seems a waste to have a whole section dedidcated to a
# single task with a single option.
CONF.register_opts(convert_task_opts, group='taskflow_executor')
class _Convert(task.Task):
conversion_missing_warned = False
def __init__(self, task_id, task_type, image_repo):
self.task_id = task_id
self.task_type = task_type
self.image_repo = image_repo
super(_Convert, self).__init__(
name='%s-Convert-%s' % (task_type, task_id))
def execute(self, image_id, file_path):
# NOTE(flaper87): A format must be explicitly
# specified. There's no "sane" default for this
# because the dest format may work differently depending
# on the environment OpenStack is running in.
conversion_format = CONF.taskflow_executor.conversion_format
if conversion_format is None:
if not _Convert.conversion_missing_warned:
msg = (_LW('The conversion format is None, please add a value '
'for it in the config file for this task to '
'work: %s') %
self.task_id)
LOG.warn(msg)
_Convert.conversion_missing_warned = True
return
# TODO(flaper87): Check whether the image is in the desired
# format already. Probably using `qemu-img` just like the
# `Introspection` task.
dest_path = "%s.converted"
stdout, stderr = putils.trycmd('qemu-img', 'convert', '-O',
conversion_format, file_path, dest_path,
log_errors=putils.LOG_ALL_ERRORS)
if stderr:
raise RuntimeError(stderr)
def get_flow(**kwargs):
task_id = kwargs.get('task_id')
task_type = kwargs.get('task_type')
image_repo = kwargs.get('image_repo')
return lf.Flow(task_type).add(
_Convert(task_id, task_type, image_repo),
)

View File

@ -1,89 +0,0 @@
# Copyright 2015 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import logging
from oslo_concurrency import processutils as putils
from oslo_utils import excutils
from taskflow.patterns import linear_flow as lf
from daisy.async import utils
from daisy import i18n
_LE = i18n._LE
_LI = i18n._LI
LOG = logging.getLogger(__name__)
class _Introspect(utils.OptionalTask):
"""Taskflow to pull the embedded metadata out of image file"""
def __init__(self, task_id, task_type, image_repo):
self.task_id = task_id
self.task_type = task_type
self.image_repo = image_repo
super(_Introspect, self).__init__(
name='%s-Introspect-%s' % (task_type, task_id))
def execute(self, image_id, file_path):
"""Does the actual introspection
:param image_id: Glance image ID
:param file_path: Path to the file being introspected
"""
try:
stdout, stderr = putils.trycmd('qemu-img', 'info',
'--output=json', file_path,
log_errors=putils.LOG_ALL_ERRORS)
except OSError as exc:
# NOTE(flaper87): errno == 2 means the executable file
# was not found. For now, log an error and move forward
# until we have a better way to enable/disable optional
# tasks.
if exc.errno != 2:
with excutils.save_and_reraise_exception():
msg = (_LE('Failed to execute introspection '
'%(task_id)s: %(exc)s') %
{'task_id': self.task_id, 'exc': exc.message})
LOG.error(msg)
return
if stderr:
raise RuntimeError(stderr)
metadata = json.loads(stdout)
new_image = self.image_repo.get(image_id)
new_image.virtual_size = metadata.get('virtual-size', 0)
new_image.disk_format = metadata.get('format')
self.image_repo.save(new_image)
LOG.debug("%(task_id)s: Introspection successful: %(file)s" %
{'task_id': self.task_id, 'file': file_path})
return new_image
def get_flow(**kwargs):
task_id = kwargs.get('task_id')
task_type = kwargs.get('task_type')
image_repo = kwargs.get('image_repo')
LOG.debug("Flow: %(task_type)s with ID %(id)s on %(repo)s" %
{'task_type': task_type, 'id': task_id, 'repo': image_repo})
return lf.Flow(task_type).add(
_Introspect(task_id, task_type, image_repo),
)

View File

@ -1,131 +0,0 @@
# Copyright 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from stevedore import driver
from taskflow import engines
from taskflow.listeners import logging as llistener
from taskflow.types import futures
from taskflow.utils import eventlet_utils
import daisy.async
from daisy.common.scripts import utils as script_utils
from daisy import i18n
_ = i18n._
_LE = i18n._LE
LOG = logging.getLogger(__name__)
_deprecated_opt = cfg.DeprecatedOpt('eventlet_executor_pool_size',
group='task')
taskflow_executor_opts = [
cfg.StrOpt('engine_mode',
default='parallel',
choices=('serial', 'parallel'),
help=_("The mode in which the engine will run. "
"Can be 'serial' or 'parallel'.")),
cfg.IntOpt('max_workers',
default=10,
help=_("The number of parallel activities executed at the "
"same time by the engine. The value can be greater "
"than one when the engine mode is 'parallel'."),
deprecated_opts=[_deprecated_opt])
]
CONF = cfg.CONF
CONF.register_opts(taskflow_executor_opts, group='taskflow_executor')
class TaskExecutor(daisy.async.TaskExecutor):
def __init__(self, context, task_repo, image_repo, image_factory):
self.context = context
self.task_repo = task_repo
self.image_repo = image_repo
self.image_factory = image_factory
self.engine_conf = {
'engine': CONF.taskflow_executor.engine_mode,
}
self.engine_kwargs = {}
if CONF.taskflow_executor.engine_mode == 'parallel':
self.engine_kwargs['max_workers'] = (
CONF.taskflow_executor.max_workers)
super(TaskExecutor, self).__init__(context, task_repo, image_repo,
image_factory)
@contextlib.contextmanager
def _executor(self):
if CONF.taskflow_executor.engine_mode != 'parallel':
yield None
else:
max_workers = CONF.taskflow_executor.max_workers
if eventlet_utils.EVENTLET_AVAILABLE:
yield futures.GreenThreadPoolExecutor(max_workers=max_workers)
else:
yield futures.ThreadPoolExecutor(max_workers=max_workers)
def _get_flow(self, task):
try:
task_input = script_utils.unpack_task_input(task)
uri = script_utils.validate_location_uri(
task_input.get('import_from'))
kwds = {
'uri': uri,
'task_id': task.task_id,
'task_type': task.type,
'context': self.context,
'task_repo': self.task_repo,
'image_repo': self.image_repo,
'image_factory': self.image_factory
}
return driver.DriverManager('daisy.flows', task.type,
invoke_on_load=True,
invoke_kwds=kwds).driver
except RuntimeError:
raise NotImplementedError()
def _run(self, task_id, task_type):
LOG.debug('Taskflow executor picked up the execution of task ID '
'%(task_id)s of task type '
'%(task_type)s' % {'task_id': task_id,
'task_type': task_type})
task = script_utils.get_task(self.task_repo, task_id)
if task is None:
# NOTE: This happens if task is not found in the database. In
# such cases, there is no way to update the task status so,
# it's ignored here.
return
flow = self._get_flow(task)
try:
with self._executor() as executor:
engine = engines.load(flow, self.engine_conf,
executor=executor, **self.engine_kwargs)
with llistener.DynamicLoggingListener(engine, log=LOG):
engine.run()
except Exception as exc:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to execute task %(task_id)s: %(exc)s') %
{'task_id': task_id, 'exc': exc.message})

View File

@ -1,66 +0,0 @@
# Copyright 2015 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from taskflow import task
from daisy import i18n
LOG = logging.getLogger(__name__)
_LW = i18n._LW
class OptionalTask(task.Task):
def __init__(self, *args, **kwargs):
super(OptionalTask, self).__init__(*args, **kwargs)
self.execute = self._catch_all(self.execute)
def _catch_all(self, func):
# NOTE(flaper87): Read this comment before calling the MI6
# Here's the thing, there's no nice way to define "optional"
# tasks. That is, tasks whose failure shouldn't affect the execution
# of the flow. The only current "sane" way to do this, is by catching
# everything and logging. This seems harmless from a taskflow
# perspective but it is not. There are some issues related to this
# "workaround":
#
# - Task's states will shamelessly lie to us saying the task succeeded.
#
# - No revert procedure will be triggered, which means optional tasks,
# for now, mustn't cause any side-effects because they won't be able to
# clean them up. If these tasks depend on other task that do cause side
# effects, a task that cleans those side effects most be registered as
# well. For example, _ImportToFS, _MyDumbTask, _DeleteFromFS.
#
# - Ideally, optional tasks shouldn't `provide` new values unless they
# are part of an optional flow. Due to the decoration of the execute
# method, these tasks will need to define the provided methods at
# class level using `default_provides`.
#
#
# The taskflow team is working on improving this and on something that
# will provide the ability of defining optional tasks. For now, to lie
# ourselves we must.
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as exc:
msg = (_LW("An optional task has failed, "
"the failure was: %s") %
exc.message)
LOG.warn(msg)
return wrapper

View File

@ -1,30 +0,0 @@
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from daisy import listener
from daisy.openstack.common import service as os_service
from daisy import service
def main():
service.prepare_service()
launcher = os_service.ProcessLauncher()
launcher.launch_service(
listener.ListenerService(),
workers=service.get_workers('listener'))
launcher.wait()
if __name__ == "__main__":
main()

View File

@ -1,59 +0,0 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Glance Image Cache Invalid Cache Entry and Stalled Image cleaner
This is meant to be run as a periodic task from cron.
If something goes wrong while we're caching an image (for example the fetch
times out, or an exception is raised), we create an 'invalid' entry. These
entires are left around for debugging purposes. However, after some period of
time, we want to clean these up.
Also, if an incomplete image hangs around past the image_cache_stall_time
period, we automatically sweep it up.
"""
import os
import sys
from oslo_log import log as logging
from daisy.common import config
from daisy.image_cache import cleaner
# If ../glance/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')):
sys.path.insert(0, possible_topdir)
CONF = config.CONF
logging.register_options(CONF)
def main():
try:
config.parse_cache_args()
logging.setup(CONF, 'glance')
app = cleaner.Cleaner()
app.run()
except RuntimeError as e:
sys.exit("ERROR: %s" % e)

View File

@ -1,514 +0,0 @@
#!/usr/bin/env python
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A simple cache management utility for daisy.
"""
from __future__ import print_function
import functools
import optparse
import os
import sys
import time
from oslo_utils import timeutils
from daisy.common import utils
from daisy.common import exception
import daisy.image_cache.client
from daisy.version import version_info as version
# If ../glance/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')):
sys.path.insert(0, possible_topdir)
SUCCESS = 0
FAILURE = 1
def catch_error(action):
"""Decorator to provide sensible default error handling for actions."""
def wrap(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
ret = func(*args, **kwargs)
return SUCCESS if ret is None else ret
except exception.NotFound:
options = args[0]
print("Cache management middleware not enabled on host %s" %
options.host)
return FAILURE
except exception.Forbidden:
print("Not authorized to make this request.")
return FAILURE
except Exception as e:
options = args[0]
if options.debug:
raise
print("Failed to %s. Got error:" % action)
pieces = utils.exception_to_str(e).split('\n')
for piece in pieces:
print(piece)
return FAILURE
return wrapper
return wrap
@catch_error('show cached images')
def list_cached(options, args):
"""%(prog)s list-cached [options]
List all images currently cached.
"""
client = get_client(options)
images = client.get_cached_images()
if not images:
print("No cached images.")
return SUCCESS
print("Found %d cached images..." % len(images))
pretty_table = utils.PrettyTable()
pretty_table.add_column(36, label="ID")
pretty_table.add_column(19, label="Last Accessed (UTC)")
pretty_table.add_column(19, label="Last Modified (UTC)")
# 1 TB takes 13 characters to display: len(str(2**40)) == 13
pretty_table.add_column(14, label="Size", just="r")
pretty_table.add_column(10, label="Hits", just="r")
print(pretty_table.make_header())
for image in images:
last_modified = image['last_modified']
last_modified = timeutils.iso8601_from_timestamp(last_modified)
last_accessed = image['last_accessed']
if last_accessed == 0:
last_accessed = "N/A"
else:
last_accessed = timeutils.iso8601_from_timestamp(last_accessed)
print(pretty_table.make_row(
image['image_id'],
last_accessed,
last_modified,
image['size'],
image['hits']))
@catch_error('show queued images')
def list_queued(options, args):
"""%(prog)s list-queued [options]
List all images currently queued for caching.
"""
client = get_client(options)
images = client.get_queued_images()
if not images:
print("No queued images.")
return SUCCESS
print("Found %d queued images..." % len(images))
pretty_table = utils.PrettyTable()
pretty_table.add_column(36, label="ID")
print(pretty_table.make_header())
for image in images:
print(pretty_table.make_row(image))
@catch_error('queue the specified image for caching')
def queue_image(options, args):
"""%(prog)s queue-image <IMAGE_ID> [options]
Queues an image for caching
"""
if len(args) == 1:
image_id = args.pop()
else:
print("Please specify one and only ID of the image you wish to ")
print("queue from the cache as the first argument")
return FAILURE
if (not options.force and
not user_confirm("Queue image %(image_id)s for caching?" %
{'image_id': image_id}, default=False)):
return SUCCESS
client = get_client(options)
client.queue_image_for_caching(image_id)
if options.verbose:
print("Queued image %(image_id)s for caching" %
{'image_id': image_id})
return SUCCESS
@catch_error('delete the specified cached image')
def delete_cached_image(options, args):
"""
%(prog)s delete-cached-image <IMAGE_ID> [options]
Deletes an image from the cache
"""
if len(args) == 1:
image_id = args.pop()
else:
print("Please specify one and only ID of the image you wish to ")
print("delete from the cache as the first argument")
return FAILURE
if (not options.force and
not user_confirm("Delete cached image %(image_id)s?" %
{'image_id': image_id}, default=False)):
return SUCCESS
client = get_client(options)
client.delete_cached_image(image_id)
if options.verbose:
print("Deleted cached image %(image_id)s" % {'image_id': image_id})
return SUCCESS
@catch_error('Delete all cached images')
def delete_all_cached_images(options, args):
"""%(prog)s delete-all-cached-images [options]
Remove all images from the cache.
"""
if (not options.force and
not user_confirm("Delete all cached images?", default=False)):
return SUCCESS
client = get_client(options)
num_deleted = client.delete_all_cached_images()
if options.verbose:
print("Deleted %(num_deleted)s cached images" %
{'num_deleted': num_deleted})
return SUCCESS
@catch_error('delete the specified queued image')
def delete_queued_image(options, args):
"""
%(prog)s delete-queued-image <IMAGE_ID> [options]
Deletes an image from the cache
"""
if len(args) == 1:
image_id = args.pop()
else:
print("Please specify one and only ID of the image you wish to ")
print("delete from the cache as the first argument")
return FAILURE
if (not options.force and
not user_confirm("Delete queued image %(image_id)s?" %
{'image_id': image_id}, default=False)):
return SUCCESS
client = get_client(options)
client.delete_queued_image(image_id)
if options.verbose:
print("Deleted queued image %(image_id)s" % {'image_id': image_id})
return SUCCESS
@catch_error('Delete all queued images')
def delete_all_queued_images(options, args):
"""%(prog)s delete-all-queued-images [options]
Remove all images from the cache queue.
"""
if (not options.force and
not user_confirm("Delete all queued images?", default=False)):
return SUCCESS
client = get_client(options)
num_deleted = client.delete_all_queued_images()
if options.verbose:
print("Deleted %(num_deleted)s queued images" %
{'num_deleted': num_deleted})
return SUCCESS
def get_client(options):
"""Return a new client object to a Glance server.
specified by the --host and --port options
supplied to the CLI
"""
return daisy.image_cache.client.get_client(
host=options.host,
port=options.port,
username=options.os_username,
password=options.os_password,
tenant=options.os_tenant_name,
auth_url=options.os_auth_url,
auth_strategy=options.os_auth_strategy,
auth_token=options.os_auth_token,
region=options.os_region_name,
insecure=options.insecure)
def env(*vars, **kwargs):
"""Search for the first defined of possibly many env vars.
Returns the first environment variable defined in vars, or
returns the default defined in kwargs.
"""
for v in vars:
value = os.environ.get(v, None)
if value:
return value
return kwargs.get('default', '')
def create_options(parser):
"""Set up the CLI and config-file options that may be
parsed and program commands.
:param parser: The option parser
"""
parser.add_option('-v', '--verbose', default=False, action="store_true",
help="Print more verbose output.")
parser.add_option('-d', '--debug', default=False, action="store_true",
help="Print debugging output.")
parser.add_option('-H', '--host', metavar="ADDRESS", default="0.0.0.0",
help="Address of Glance API host. "
"Default: %default.")
parser.add_option('-p', '--port', dest="port", metavar="PORT",
type=int, default=9292,
help="Port the Glance API host listens on. "
"Default: %default.")
parser.add_option('-k', '--insecure', dest="insecure",
default=False, action="store_true",
help="Explicitly allow glance to perform \"insecure\" "
"SSL (https) requests. The server's certificate will "
"not be verified against any certificate authorities. "
"This option should be used with caution.")
parser.add_option('-f', '--force', dest="force", metavar="FORCE",
default=False, action="store_true",
help="Prevent select actions from requesting "
"user confirmation.")
parser.add_option('--os-auth-token',
dest='os_auth_token',
default=env('OS_AUTH_TOKEN'),
help='Defaults to env[OS_AUTH_TOKEN].')
parser.add_option('-A', '--os_auth_token', '--auth_token',
dest='os_auth_token',
help=optparse.SUPPRESS_HELP)
parser.add_option('--os-username',
dest='os_username',
default=env('OS_USERNAME'),
help='Defaults to env[OS_USERNAME].')
parser.add_option('-I', '--os_username',
dest='os_username',
help=optparse.SUPPRESS_HELP)
parser.add_option('--os-password',
dest='os_password',
default=env('OS_PASSWORD'),
help='Defaults to env[OS_PASSWORD].')
parser.add_option('-K', '--os_password',
dest='os_password',
help=optparse.SUPPRESS_HELP)
parser.add_option('--os-region-name',
dest='os_region_name',
default=env('OS_REGION_NAME'),
help='Defaults to env[OS_REGION_NAME].')
parser.add_option('-R', '--os_region_name',
dest='os_region_name',
help=optparse.SUPPRESS_HELP)
parser.add_option('--os-tenant-id',
dest='os_tenant_id',
default=env('OS_TENANT_ID'),
help='Defaults to env[OS_TENANT_ID].')
parser.add_option('--os_tenant_id',
dest='os_tenant_id',
help=optparse.SUPPRESS_HELP)
parser.add_option('--os-tenant-name',
dest='os_tenant_name',
default=env('OS_TENANT_NAME'),
help='Defaults to env[OS_TENANT_NAME].')
parser.add_option('-T', '--os_tenant_name',
dest='os_tenant_name',
help=optparse.SUPPRESS_HELP)
parser.add_option('--os-auth-url',
default=env('OS_AUTH_URL'),
help='Defaults to env[OS_AUTH_URL].')
parser.add_option('-N', '--os_auth_url',
dest='os_auth_url',
help=optparse.SUPPRESS_HELP)
parser.add_option('-S', '--os_auth_strategy', dest="os_auth_strategy",
metavar="STRATEGY",
help="Authentication strategy (keystone or noauth).")
def parse_options(parser, cli_args):
"""
Returns the parsed CLI options, command to run and its arguments, merged
with any same-named options found in a configuration file
:param parser: The option parser
"""
if not cli_args:
cli_args.append('-h') # Show options in usage output...
(options, args) = parser.parse_args(cli_args)
# HACK(sirp): Make the parser available to the print_help method
# print_help is a command, so it only accepts (options, args); we could
# one-off have it take (parser, options, args), however, for now, I think
# this little hack will suffice
options.__parser = parser
if not args:
parser.print_usage()
sys.exit(0)
command_name = args.pop(0)
command = lookup_command(parser, command_name)
return (options, command, args)
def print_help(options, args):
"""
Print help specific to a command
"""
if len(args) != 1:
sys.exit("Please specify a command")
parser = options.__parser
command_name = args.pop()
command = lookup_command(parser, command_name)
print(command.__doc__ % {'prog': os.path.basename(sys.argv[0])})
def lookup_command(parser, command_name):
BASE_COMMANDS = {'help': print_help}
CACHE_COMMANDS = {
'list-cached': list_cached,
'list-queued': list_queued,
'queue-image': queue_image,
'delete-cached-image': delete_cached_image,
'delete-all-cached-images': delete_all_cached_images,
'delete-queued-image': delete_queued_image,
'delete-all-queued-images': delete_all_queued_images,
}
commands = {}
for command_set in (BASE_COMMANDS, CACHE_COMMANDS):
commands.update(command_set)
try:
command = commands[command_name]
except KeyError:
parser.print_usage()
sys.exit("Unknown command: %(cmd_name)s" % {'cmd_name': command_name})
return command
def user_confirm(prompt, default=False):
"""Yes/No question dialog with user.
:param prompt: question/statement to present to user (string)
:param default: boolean value to return if empty string
is received as response to prompt
"""
if default:
prompt_default = "[Y/n]"
else:
prompt_default = "[y/N]"
answer = raw_input("%s %s " % (prompt, prompt_default))
if answer == "":
return default
else:
return answer.lower() in ("yes", "y")
def main():
usage = """
%prog <command> [options] [args]
Commands:
help <command> Output help for one of the commands below
list-cached List all images currently cached
list-queued List all images currently queued for caching
queue-image Queue an image for caching
delete-cached-image Purges an image from the cache
delete-all-cached-images Removes all images from the cache
delete-queued-image Deletes an image from the cache queue
delete-all-queued-images Deletes all images from the cache queue
"""
version_string = version.cached_version_string()
oparser = optparse.OptionParser(version=version_string,
usage=usage.strip())
create_options(oparser)
(options, command, args) = parse_options(oparser, sys.argv[1:])
try:
start_time = time.time()
result = command(options, args)
end_time = time.time()
if options.verbose:
print("Completed in %-0.4f sec." % (end_time - start_time))
sys.exit(result)
except (RuntimeError, NotImplementedError) as e:
print("ERROR: ", e)
if __name__ == '__main__':
main()

View File

@ -1,62 +0,0 @@
#!/usr/bin/env python
# Copyright 2011-2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Glance Image Cache Pre-fetcher
This is meant to be run from the command line after queueing
images to be pretched.
"""
import os
import sys
import glance_store
from oslo_log import log as logging
from daisy.common import config
from daisy.image_cache import prefetcher
# If ../glance/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')):
sys.path.insert(0, possible_topdir)
CONF = config.CONF
logging.register_options(CONF)
def main():
try:
config.parse_cache_args()
logging.setup(CONF, 'glance')
glance_store.register_opts(config.CONF)
glance_store.create_stores(config.CONF)
glance_store.verify_default_store()
app = prefetcher.Prefetcher()
app.run()
except RuntimeError as e:
sys.exit("ERROR: %s" % e)
if __name__ == '__main__':
main()

View File

@ -1,52 +0,0 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Glance Image Cache Pruner
This is meant to be run as a periodic task, perhaps every half-hour.
"""
import os
import sys
from oslo_log import log as logging
from daisy.common import config
from daisy.image_cache import pruner
# If ../glance/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')):
sys.path.insert(0, possible_topdir)
CONF = config.CONF
logging.register_options(CONF)
def main():
try:
config.parse_cache_args()
logging.setup(CONF, 'glance')
app = pruner.Pruner()
app.run()
except RuntimeError as e:
sys.exit("ERROR: %s" % e)

View File

@ -1,52 +0,0 @@
# Copyright 2015 Intel Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from oslo_config import cfg
from oslo_log import log as logging
import stevedore
from daisy.common import config
from daisy import i18n
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
_LE = i18n._LE
def main():
try:
logging.register_options(CONF)
cfg_files = cfg.find_config_files(project='glance',
prog='glance-api')
cfg_files.extend(cfg.find_config_files(project='glance',
prog='glance-search'))
config.parse_args(default_config_files=cfg_files)
logging.setup(CONF, 'glance')
namespace = 'daisy.search.index_backend'
ext_manager = stevedore.extension.ExtensionManager(
namespace, invoke_on_load=True)
for ext in ext_manager.extensions:
try:
ext.obj.setup()
except Exception as e:
LOG.error(_LE("Failed to setup index extension "
"%(ext)s: %(e)s") % {'ext': ext.name,
'e': e})
except RuntimeError as e:
sys.exit("ERROR: %s" % e)

View File

@ -1,725 +0,0 @@
#!/usr/bin/env python
# Copyright 2012 Michael Still and Canonical Inc
# Copyright 2014 SoftLayer Technologies, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import httplib
import os
import sys
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_log import log as logging
import six.moves.urllib.parse as urlparse
from webob import exc
from daisy.common import config
from daisy.common import exception
from daisy.common import utils
from daisy import i18n
LOG = logging.getLogger(__name__)
_ = i18n._
_LI = i18n._LI
_LE = i18n._LE
_LW = i18n._LW
# NOTE: positional arguments <args> will be parsed before <command> until
# this bug is corrected https://bugs.launchpad.net/oslo.config/+bug/1392428
cli_opts = [
cfg.IntOpt('chunksize',
short='c',
default=65536,
help="Amount of data to transfer per HTTP write."),
cfg.StrOpt('dontreplicate',
short='D',
default=('created_at date deleted_at location updated_at'),
help="List of fields to not replicate."),
cfg.BoolOpt('metaonly',
short='m',
default=False,
help="Only replicate metadata, not images."),
cfg.StrOpt('token',
short='t',
default='',
help=("Pass in your authentication token if you have "
"one. If you use this option the same token is "
"used for both the master and the slave.")),
cfg.StrOpt('mastertoken',
short='M',
default='',
help=("Pass in your authentication token if you have "
"one. This is the token used for the master.")),
cfg.StrOpt('slavetoken',
short='S',
default='',
help=("Pass in your authentication token if you have "
"one. This is the token used for the slave.")),
cfg.StrOpt('command',
positional=True,
help="Command to be given to replicator"),
cfg.ListOpt('args',
positional=True,
help="Arguments for the command"),
]
CONF = cfg.CONF
CONF.register_cli_opts(cli_opts)
logging.register_options(CONF)
# If ../glance/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')):
sys.path.insert(0, possible_topdir)
COMMANDS = """Commands:
help <command> Output help for one of the commands below
compare What is missing from the slave glance?
dump Dump the contents of a glance instance to local disk.
livecopy Load the contents of one glance instance into another.
load Load the contents of a local directory into daisy.
size Determine the size of a glance instance if dumped to disk.
"""
IMAGE_ALREADY_PRESENT_MESSAGE = _('The image %s is already present on '
'the slave, but our check for it did '
'not find it. This indicates that we '
'do not have permissions to see all '
'the images on the slave server.')
class ImageService(object):
def __init__(self, conn, auth_token):
"""Initialize the ImageService.
conn: a httplib.HTTPConnection to the glance server
auth_token: authentication token to pass in the x-auth-token header
"""
self.auth_token = auth_token
self.conn = conn
def _http_request(self, method, url, headers, body,
ignore_result_body=False):
"""Perform an HTTP request against the server.
method: the HTTP method to use
url: the URL to request (not including server portion)
headers: headers for the request
body: body to send with the request
ignore_result_body: the body of the result will be ignored
Returns: a httplib response object
"""
if self.auth_token:
headers.setdefault('x-auth-token', self.auth_token)
LOG.debug('Request: %(method)s http://%(server)s:%(port)s'
'%(url)s with headers %(headers)s'
% {'method': method,
'server': self.conn.host,
'port': self.conn.port,
'url': url,
'headers': repr(headers)})
self.conn.request(method, url, body, headers)
response = self.conn.getresponse()
headers = self._header_list_to_dict(response.getheaders())
code = response.status
code_description = httplib.responses[code]
LOG.debug('Response: %(code)s %(status)s %(headers)s'
% {'code': code,
'status': code_description,
'headers': repr(headers)})
if code == 400:
raise exc.HTTPBadRequest(
explanation=response.read())
if code == 500:
raise exc.HTTPInternalServerError(
explanation=response.read())
if code == 401:
raise exc.HTTPUnauthorized(
explanation=response.read())
if code == 403:
raise exc.HTTPForbidden(
explanation=response.read())
if code == 409:
raise exc.HTTPConflict(
explanation=response.read())
if ignore_result_body:
# NOTE: because we are pipelining requests through a single HTTP
# connection, httplib requires that we read the response body
# before we can make another request. If the caller knows they
# don't care about the body, they can ask us to do that for them.
response.read()
return response
def get_images(self):
"""Return a detailed list of images.
Yields a series of images as dicts containing metadata.
"""
params = {'is_public': None}
while True:
url = '/v1/images/detail'
query = urlparse.urlencode(params)
if query:
url += '?%s' % query
response = self._http_request('GET', url, {}, '')
result = jsonutils.loads(response.read())
if not result or 'images' not in result or not result['images']:
return
for image in result.get('images', []):
params['marker'] = image['id']
yield image
def get_image(self, image_uuid):
"""Fetch image data from daisy.
image_uuid: the id of an image
Returns: a httplib Response object where the body is the image.
"""
url = '/v1/images/%s' % image_uuid
return self._http_request('GET', url, {}, '')
@staticmethod
def _header_list_to_dict(headers):
"""Expand a list of headers into a dictionary.
headers: a list of [(key, value), (key, value), (key, value)]
Returns: a dictionary representation of the list
"""
d = {}
for (header, value) in headers:
if header.startswith('x-image-meta-property-'):
prop = header.replace('x-image-meta-property-', '')
d.setdefault('properties', {})
d['properties'][prop] = value
else:
d[header.replace('x-image-meta-', '')] = value
return d
def get_image_meta(self, image_uuid):
"""Return the metadata for a single image.
image_uuid: the id of an image
Returns: image metadata as a dictionary
"""
url = '/v1/images/%s' % image_uuid
response = self._http_request('HEAD', url, {}, '',
ignore_result_body=True)
return self._header_list_to_dict(response.getheaders())
@staticmethod
def _dict_to_headers(d):
"""Convert a dictionary into one suitable for a HTTP request.
d: a dictionary
Returns: the same dictionary, with x-image-meta added to every key
"""
h = {}
for key in d:
if key == 'properties':
for subkey in d[key]:
if d[key][subkey] is None:
h['x-image-meta-property-%s' % subkey] = ''
else:
h['x-image-meta-property-%s' % subkey] = d[key][subkey]
else:
h['x-image-meta-%s' % key] = d[key]
return h
def add_image(self, image_meta, image_data):
"""Upload an image.
image_meta: image metadata as a dictionary
image_data: image data as a object with a read() method
Returns: a tuple of (http response headers, http response body)
"""
url = '/v1/images'
headers = self._dict_to_headers(image_meta)
headers['Content-Type'] = 'application/octet-stream'
headers['Content-Length'] = int(image_meta['size'])
response = self._http_request('POST', url, headers, image_data)
headers = self._header_list_to_dict(response.getheaders())
LOG.debug('Image post done')
body = response.read()
return headers, body
def add_image_meta(self, image_meta):
"""Update image metadata.
image_meta: image metadata as a dictionary
Returns: a tuple of (http response headers, http response body)
"""
url = '/v1/images/%s' % image_meta['id']
headers = self._dict_to_headers(image_meta)
headers['Content-Type'] = 'application/octet-stream'
response = self._http_request('PUT', url, headers, '')
headers = self._header_list_to_dict(response.getheaders())
LOG.debug('Image post done')
body = response.read()
return headers, body
def get_image_service():
"""Get a copy of the image service.
This is done like this to make it easier to mock out ImageService.
"""
return ImageService
def replication_size(options, args):
"""%(prog)s size <server:port>
Determine the size of a glance instance if dumped to disk.
server:port: the location of the glance instance.
"""
# Make sure server info is provided
if len(args) < 1:
raise TypeError(_("Too few arguments."))
server, port = utils.parse_valid_host_port(args.pop())
total_size = 0
count = 0
imageservice = get_image_service()
client = imageservice(httplib.HTTPConnection(server, port),
options.slavetoken)
for image in client.get_images():
LOG.debug('Considering image: %(image)s' % {'image': image})
if image['status'] == 'active':
total_size += int(image['size'])
count += 1
print(_('Total size is %(size)d bytes across %(img_count)d images') %
{'size': total_size,
'img_count': count})
def replication_dump(options, args):
"""%(prog)s dump <server:port> <path>
Dump the contents of a glance instance to local disk.
server:port: the location of the glance instance.
path: a directory on disk to contain the data.
"""
# Make sure server and path are provided
if len(args) < 2:
raise TypeError(_("Too few arguments."))
path = args.pop()
server, port = utils.parse_valid_host_port(args.pop())
imageservice = get_image_service()
client = imageservice(httplib.HTTPConnection(server, port),
options.mastertoken)
for image in client.get_images():
LOG.debug('Considering: %s' % image['id'])
data_path = os.path.join(path, image['id'])
if not os.path.exists(data_path):
LOG.info(_LI('Storing: %s') % image['id'])
# Dump glance information
with open(data_path, 'w') as f:
f.write(jsonutils.dumps(image))
if image['status'] == 'active' and not options.metaonly:
# Now fetch the image. The metadata returned in headers here
# is the same as that which we got from the detailed images
# request earlier, so we can ignore it here. Note that we also
# only dump active images.
LOG.debug('Image %s is active' % image['id'])
image_response = client.get_image(image['id'])
with open(data_path + '.img', 'wb') as f:
while True:
chunk = image_response.read(options.chunksize)
if not chunk:
break
f.write(chunk)
def _dict_diff(a, b):
"""A one way dictionary diff.
a: a dictionary
b: a dictionary
Returns: True if the dictionaries are different
"""
# Only things the master has which the slave lacks matter
if set(a.keys()) - set(b.keys()):
LOG.debug('metadata diff -- master has extra keys: %(keys)s'
% {'keys': ' '.join(set(a.keys()) - set(b.keys()))})
return True
for key in a:
if str(a[key]) != str(b[key]):
LOG.debug('metadata diff -- value differs for key '
'%(key)s: master "%(master_value)s" vs '
'slave "%(slave_value)s"' %
{'key': key,
'master_value': a[key],
'slave_value': b[key]})
return True
return False
def replication_load(options, args):
"""%(prog)s load <server:port> <path>
Load the contents of a local directory into daisy.
server:port: the location of the glance instance.
path: a directory on disk containing the data.
"""
# Make sure server and path are provided
if len(args) < 2:
raise TypeError(_("Too few arguments."))
path = args.pop()
server, port = utils.parse_valid_host_port(args.pop())
imageservice = get_image_service()
client = imageservice(httplib.HTTPConnection(server, port),
options.slavetoken)
updated = []
for ent in os.listdir(path):
if utils.is_uuid_like(ent):
image_uuid = ent
LOG.info(_LI('Considering: %s') % image_uuid)
meta_file_name = os.path.join(path, image_uuid)
with open(meta_file_name) as meta_file:
meta = jsonutils.loads(meta_file.read())
# Remove keys which don't make sense for replication
for key in options.dontreplicate.split(' '):
if key in meta:
LOG.debug('Stripping %(header)s from saved '
'metadata', {'header': key})
del meta[key]
if _image_present(client, image_uuid):
# NOTE(mikal): Perhaps we just need to update the metadata?
# Note that we don't attempt to change an image file once it
# has been uploaded.
LOG.debug('Image %s already present', image_uuid)
headers = client.get_image_meta(image_uuid)
for key in options.dontreplicate.split(' '):
if key in headers:
LOG.debug('Stripping %(header)s from slave '
'metadata', {'header': key})
del headers[key]
if _dict_diff(meta, headers):
LOG.info(_LI('Image %s metadata has changed') %
image_uuid)
headers, body = client.add_image_meta(meta)
_check_upload_response_headers(headers, body)
updated.append(meta['id'])
else:
if not os.path.exists(os.path.join(path, image_uuid + '.img')):
LOG.debug('%s dump is missing image data, skipping' %
image_uuid)
continue
# Upload the image itself
with open(os.path.join(path, image_uuid + '.img')) as img_file:
try:
headers, body = client.add_image(meta, img_file)
_check_upload_response_headers(headers, body)
updated.append(meta['id'])
except exc.HTTPConflict:
LOG.error(_LE(IMAGE_ALREADY_PRESENT_MESSAGE)
% image_uuid) # noqa
return updated
def replication_livecopy(options, args):
"""%(prog)s livecopy <fromserver:port> <toserver:port>
Load the contents of one glance instance into another.
fromserver:port: the location of the master glance instance.
toserver:port: the location of the slave glance instance.
"""
# Make sure from-server and to-server are provided
if len(args) < 2:
raise TypeError(_("Too few arguments."))
imageservice = get_image_service()
slave_server, slave_port = utils.parse_valid_host_port(args.pop())
slave_conn = httplib.HTTPConnection(slave_server, slave_port)
slave_client = imageservice(slave_conn, options.slavetoken)
master_server, master_port = utils.parse_valid_host_port(args.pop())
master_conn = httplib.HTTPConnection(master_server, master_port)
master_client = imageservice(master_conn, options.mastertoken)
updated = []
for image in master_client.get_images():
LOG.debug('Considering %(id)s' % {'id': image['id']})
for key in options.dontreplicate.split(' '):
if key in image:
LOG.debug('Stripping %(header)s from master metadata',
{'header': key})
del image[key]
if _image_present(slave_client, image['id']):
# NOTE(mikal): Perhaps we just need to update the metadata?
# Note that we don't attempt to change an image file once it
# has been uploaded.
headers = slave_client.get_image_meta(image['id'])
if headers['status'] == 'active':
for key in options.dontreplicate.split(' '):
if key in image:
LOG.debug('Stripping %(header)s from master '
'metadata', {'header': key})
del image[key]
if key in headers:
LOG.debug('Stripping %(header)s from slave '
'metadata', {'header': key})
del headers[key]
if _dict_diff(image, headers):
LOG.info(_LI('Image %s metadata has changed') %
image['id'])
headers, body = slave_client.add_image_meta(image)
_check_upload_response_headers(headers, body)
updated.append(image['id'])
elif image['status'] == 'active':
LOG.info(_LI('Image %s is being synced') % image['id'])
if not options.metaonly:
image_response = master_client.get_image(image['id'])
try:
headers, body = slave_client.add_image(image,
image_response)
_check_upload_response_headers(headers, body)
updated.append(image['id'])
except exc.HTTPConflict:
LOG.error(_LE(IMAGE_ALREADY_PRESENT_MESSAGE) % image['id']) # noqa
return updated
def replication_compare(options, args):
"""%(prog)s compare <fromserver:port> <toserver:port>
Compare the contents of fromserver with those of toserver.
fromserver:port: the location of the master glance instance.
toserver:port: the location of the slave glance instance.
"""
# Make sure from-server and to-server are provided
if len(args) < 2:
raise TypeError(_("Too few arguments."))
imageservice = get_image_service()
slave_server, slave_port = utils.parse_valid_host_port(args.pop())
slave_conn = httplib.HTTPConnection(slave_server, slave_port)
slave_client = imageservice(slave_conn, options.slavetoken)
master_server, master_port = utils.parse_valid_host_port(args.pop())
master_conn = httplib.HTTPConnection(master_server, master_port)
master_client = imageservice(master_conn, options.mastertoken)
differences = {}
for image in master_client.get_images():
if _image_present(slave_client, image['id']):
headers = slave_client.get_image_meta(image['id'])
for key in options.dontreplicate.split(' '):
if key in image:
LOG.debug('Stripping %(header)s from master metadata',
{'header': key})
del image[key]
if key in headers:
LOG.debug('Stripping %(header)s from slave metadata',
{'header': key})
del headers[key]
for key in image:
if image[key] != headers.get(key, None):
LOG.warn(_LW('%(image_id)s: field %(key)s differs '
'(source is %(master_value)s, destination '
'is %(slave_value)s)')
% {'image_id': image['id'],
'key': key,
'master_value': image[key],
'slave_value': headers.get(key, 'undefined')})
differences[image['id']] = 'diff'
else:
LOG.debug('%(image_id)s is identical'
% {'image_id': image['id']})
elif image['status'] == 'active':
LOG.warn(_LW('Image %s entirely missing from the destination')
% image['id'])
differences[image['id']] = 'missing'
return differences
def _check_upload_response_headers(headers, body):
"""Check that the headers of an upload are reasonable.
headers: the headers from the upload
body: the body from the upload
"""
if 'status' not in headers:
try:
d = jsonutils.loads(body)
if 'image' in d and 'status' in d['image']:
return
except Exception:
raise exception.UploadException(body)
def _image_present(client, image_uuid):
"""Check if an image is present in daisy.
client: the ImageService
image_uuid: the image uuid to check
Returns: True if the image is present
"""
headers = client.get_image_meta(image_uuid)
return 'status' in headers
def print_help(options, args):
"""Print help specific to a command.
options: the parsed command line options
args: the command line
"""
if len(args) != 1:
print(COMMANDS)
sys.exit(1)
command_name = args.pop()
command = lookup_command(command_name)
print(command.__doc__ % {'prog': os.path.basename(sys.argv[0])})
def lookup_command(command_name):
"""Lookup a command.
command_name: the command name
Returns: a method which implements that command
"""
BASE_COMMANDS = {'help': print_help}
REPLICATION_COMMANDS = {'compare': replication_compare,
'dump': replication_dump,
'livecopy': replication_livecopy,
'load': replication_load,
'size': replication_size}
commands = {}
for command_set in (BASE_COMMANDS, REPLICATION_COMMANDS):
commands.update(command_set)
try:
command = commands[command_name]
except KeyError:
sys.exit(_("Unknown command: %s") % command_name)
return command
def main():
"""The main function."""
try:
config.parse_args()
except RuntimeError as e:
sys.exit("ERROR: %s" % utils.exception_to_str(e))
# Setup logging
logging.setup('glance')
if CONF.token:
CONF.slavetoken = CONF.token
CONF.mastertoken = CONF.token
command = lookup_command(CONF.command)
try:
command(CONF, CONF.args)
except TypeError as e:
LOG.error(_LE(command.__doc__) % {'prog': command.__name__}) # noqa
sys.exit("ERROR: %s" % utils.exception_to_str(e))
except ValueError as e:
LOG.error(_LE(command.__doc__) % {'prog': command.__name__}) # noqa
sys.exit("ERROR: %s" % utils.exception_to_str(e))
if __name__ == '__main__':
main()

View File

@ -1,93 +0,0 @@
#!/usr/bin/env python
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Glance Catalog Search Server
"""
import os
import sys
import eventlet
from daisy.common import utils
from oslo_config import cfg
from oslo_log import log as logging
import osprofiler.notifier
import osprofiler.web
from daisy.common import config
from daisy.common import exception
from daisy.common import wsgi
from daisy import notifier
# Monkey patch socket, time, select, threads
eventlet.patcher.monkey_patch(socket=True, time=True, select=True,
thread=True, os=True)
# If ../glance/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')):
sys.path.insert(0, possible_topdir)
CONF = cfg.CONF
CONF.import_group("profiler", "daisy.common.wsgi")
logging.register_options(CONF)
KNOWN_EXCEPTIONS = (RuntimeError,
exception.WorkerCreationFailure)
def fail(e):
global KNOWN_EXCEPTIONS
return_code = KNOWN_EXCEPTIONS.index(type(e)) + 1
sys.stderr.write("ERROR: %s\n" % utils.exception_to_str(e))
sys.exit(return_code)
def main():
try:
config.parse_args()
wsgi.set_eventlet_hub()
logging.setup(CONF, 'glance')
if cfg.CONF.profiler.enabled:
_notifier = osprofiler.notifier.create("Messaging",
notifier.messaging, {},
notifier.get_transport(),
"glance", "search",
cfg.CONF.bind_host)
osprofiler.notifier.set(_notifier)
else:
osprofiler.web.disable()
server = wsgi.Server()
server.start(config.load_paste_app('glance-search'),
default_port=9393)
server.wait()
except KNOWN_EXCEPTIONS as e:
fail(e)
if __name__ == '__main__':
main()

View File

@ -194,11 +194,6 @@ def parse_args(args=None, usage=None, default_config_files=None):
default_config_files=default_config_files)
def parse_cache_args(args=None):
config_files = cfg.find_config_files(project='daisy', prog='daisy-cache')
parse_args(args=args, default_config_files=config_files)
def _get_deployment_flavor(flavor=None):
"""
Retrieve the paste_deploy.flavor config item, formatted appropriately

View File

@ -33,7 +33,6 @@ import functools
import os
import platform
import re
import stevedore
import subprocess
import sys
import uuid
@ -822,13 +821,6 @@ def stash_conf_values():
return conf
def get_search_plugins():
namespace = 'daisy.search.index_backend'
ext_manager = stevedore.extension.ExtensionManager(
namespace, invoke_on_load=True)
return ext_manager.extensions
def get_host_min_mac(host_interfaces):
if not isinstance(host_interfaces, list):
host_interfaces = eval(host_interfaces)

View File

@ -1,65 +0,0 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2013 OpenStack Foundation
# Copyright 2013 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Metadata setup commands."""
import threading
from oslo_config import cfg
from oslo_db import options as db_options
from stevedore import driver
from daisy.db.sqlalchemy import api as db_api
_IMPL = None
_LOCK = threading.Lock()
db_options.set_defaults(cfg.CONF)
def get_backend():
global _IMPL
if _IMPL is None:
with _LOCK:
if _IMPL is None:
_IMPL = driver.DriverManager(
"daisy.database.metadata_backend",
cfg.CONF.database.backend).driver
return _IMPL
def load_metadefs():
"""Read metadefinition files and insert data into the database"""
return get_backend().db_load_metadefs(engine=db_api.get_engine(),
metadata_path=None,
merge=False,
prefer_new=False,
overwrite=False)
def unload_metadefs():
"""Unload metadefinitions from database"""
return get_backend().db_unload_metadefs(engine=db_api.get_engine())
def export_metadefs():
"""Export metadefinitions from database to files"""
return get_backend().db_export_metadefs(engine=db_api.get_engine(),
metadata_path=None)

View File

@ -1,90 +0,0 @@
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from oslo import messaging
from oslo_log import log as logging
import stevedore
from daisy import i18n
from daisy.openstack.common import service as os_service
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
class NotificationEndpoint(object):
def __init__(self):
self.plugins = get_plugins()
self.notification_target_map = dict()
for plugin in self.plugins:
try:
event_list = plugin.obj.get_notification_supported_events()
for event in event_list:
self.notification_target_map[event.lower()] = plugin.obj
except Exception as e:
LOG.error(_LE("Failed to retrieve supported notification"
" events from search plugins "
"%(ext)s: %(e)s") %
{'ext': plugin.name, 'e': e})
def info(self, ctxt, publisher_id, event_type, payload, metadata):
event_type_l = event_type.lower()
if event_type_l in self.notification_target_map:
plugin = self.notification_target_map[event_type_l]
handler = plugin.get_notification_handler()
handler.process(
ctxt,
publisher_id,
event_type,
payload,
metadata)
class ListenerService(os_service.Service):
def __init__(self, *args, **kwargs):
super(ListenerService, self).__init__(*args, **kwargs)
self.listeners = []
def start(self):
super(ListenerService, self).start()
transport = messaging.get_transport(cfg.CONF)
targets = [
messaging.Target(topic="notifications", exchange="daisy")
]
endpoints = [
NotificationEndpoint()
]
listener = messaging.get_notification_listener(
transport,
targets,
endpoints)
listener.start()
self.listeners.append(listener)
def stop(self):
for listener in self.listeners:
listener.stop()
listener.wait()
super(ListenerService, self).stop()
def get_plugins():
namespace = 'daisy.search.index_backend'
ext_manager = stevedore.extension.ExtensionManager(
namespace, invoke_on_load=True)
return ext_manager.extensions

View File

@ -18,11 +18,8 @@ import itertools
import daisy.api.middleware.context
import daisy.api.versions
import daisy.common.config
import daisy.common.property_utils
import daisy.common.rpc
import daisy.common.wsgi
import daisy.image_cache
import daisy.image_cache.drivers.sqlite
import daisy.notifier
import daisy.registry
import daisy.registry.client
@ -31,7 +28,6 @@ import daisy.registry.client.v1.api
__all__ = [
'list_api_opts',
'list_registry_opts',
'list_cache_opts',
'list_manage_opts'
]
@ -41,13 +37,10 @@ _api_opts = [
daisy.api.middleware.context.context_opts,
daisy.api.versions.versions_opts,
daisy.common.config.common_opts,
daisy.common.property_utils.property_opts,
daisy.common.rpc.rpc_opts,
daisy.common.wsgi.bind_opts,
daisy.common.wsgi.eventlet_opts,
daisy.common.wsgi.socket_opts,
daisy.image_cache.drivers.sqlite.sqlite_opts,
daisy.image_cache.image_cache_opts,
daisy.notifier.notifier_opts,
daisy.registry.registry_addr_opts,
daisy.registry.client.registry_client_ctx_opts,
@ -65,14 +58,6 @@ _registry_opts = [
daisy.common.wsgi.eventlet_opts))),
('paste_deploy', daisy.common.config.paste_deploy_opts)
]
_cache_opts = [
(None, list(itertools.chain(
daisy.common.config.common_opts,
daisy.image_cache.drivers.sqlite.sqlite_opts,
daisy.image_cache.image_cache_opts,
daisy.registry.registry_addr_opts,
daisy.registry.client.registry_client_ctx_opts))),
]
_manage_opts = [
(None, [])
]
@ -105,13 +90,6 @@ def list_registry_opts():
return [(g, copy.deepcopy(o)) for g, o in _registry_opts]
def list_cache_opts():
"""Return a list of oslo_config options available in Glance Cache
service.
"""
return [(g, copy.deepcopy(o)) for g, o in _cache_opts]
def list_manage_opts():
"""Return a list of oslo_config options available in Glance manage."""
return [(g, copy.deepcopy(o)) for g, o in _manage_opts]

View File

@ -1,77 +0,0 @@
# Copyright 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import elasticsearch
from elasticsearch import helpers
from oslo_config import cfg
from daisy.common import utils
search_opts = [
cfg.ListOpt('hosts', default=['127.0.0.1:9200'],
help='List of nodes where Elasticsearch instances are '
'running. A single node should be defined as an IP '
'address and port number.'),
]
CONF = cfg.CONF
CONF.register_opts(search_opts, group='elasticsearch')
def get_api():
es_hosts = CONF.elasticsearch.hosts
es_api = elasticsearch.Elasticsearch(hosts=es_hosts)
return es_api
class CatalogSearchRepo(object):
def __init__(self, context, es_api):
self.context = context
self.es_api = es_api
self.plugins = utils.get_search_plugins() or []
self.plugins_info_dict = self._get_plugin_info()
def search(self, index, doc_type, query, fields, offset, limit,
ignore_unavailable=True):
return self.es_api.search(
index=index,
doc_type=doc_type,
body=query,
_source_include=fields,
from_=offset,
size=limit,
ignore_unavailable=ignore_unavailable)
def index(self, default_index, default_type, actions):
return helpers.bulk(
client=self.es_api,
index=default_index,
doc_type=default_type,
actions=actions)
def plugins_info(self):
return self.plugins_info_dict
def _get_plugin_info(self):
plugin_info = dict()
plugin_info['plugins'] = []
for plugin in self.plugins:
info = dict()
info['type'] = plugin.obj.get_document_type()
info['index'] = plugin.obj.get_index_name()
plugin_info['plugins'].append(info)
return plugin_info

View File

@ -1,66 +0,0 @@
# Copyright 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from daisy.common import wsgi
from daisy.search.api.v0_1 import search
class API(wsgi.Router):
"""WSGI router for Glance Catalog Search v0_1 API requests."""
def __init__(self, mapper):
reject_method_resource = wsgi.Resource(wsgi.RejectMethodController())
search_catalog_resource = search.create_resource()
mapper.connect('/search',
controller=search_catalog_resource,
action='search',
conditions={'method': ['GET']})
mapper.connect('/search',
controller=search_catalog_resource,
action='search',
conditions={'method': ['POST']})
mapper.connect('/search',
controller=reject_method_resource,
action='reject',
allowed_methods='GET, POST',
conditions={'method': ['PUT', 'DELETE',
'PATCH', 'HEAD']})
mapper.connect('/search/plugins',
controller=search_catalog_resource,
action='plugins_info',
conditions={'method': ['GET']})
mapper.connect('/search/plugins',
controller=reject_method_resource,
action='reject',
allowed_methods='GET',
conditions={'method': ['POST', 'PUT', 'DELETE',
'PATCH', 'HEAD']})
mapper.connect('/index',
controller=search_catalog_resource,
action='index',
conditions={'method': ['POST']})
mapper.connect('/index',
controller=reject_method_resource,
action='reject',
allowed_methods='POST',
conditions={'method': ['GET', 'PUT', 'DELETE',
'PATCH', 'HEAD']})
super(API, self).__init__(mapper)

View File

@ -1,382 +0,0 @@
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from oslo_config import cfg
from oslo_log import log as logging
import six
import webob.exc
from daisy.api import policy
from daisy.common import exception
from daisy.common import utils
from daisy.common import wsgi
import daisy.db
import daisy.gateway
from daisy import i18n
import daisy.notifier
import daisy.schema
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
CONF = cfg.CONF
class SearchController(object):
def __init__(self, plugins=None, es_api=None, policy_enforcer=None):
self.es_api = es_api or daisy.search.get_api()
self.policy = policy_enforcer or policy.Enforcer()
self.gateway = daisy.gateway.Gateway(
es_api=self.es_api,
policy_enforcer=self.policy)
self.plugins = plugins or []
def search(self, req, query, index, doc_type=None, fields=None, offset=0,
limit=10):
if fields is None:
fields = []
try:
search_repo = self.gateway.get_catalog_search_repo(req.context)
result = search_repo.search(index,
doc_type,
query,
fields,
offset,
limit,
True)
for plugin in self.plugins:
result = plugin.obj.filter_result(result, req.context)
return result
except exception.Forbidden as e:
raise webob.exc.HTTPForbidden(explanation=e.msg)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.msg)
except exception.Duplicate as e:
raise webob.exc.HTTPConflict(explanation=e.msg)
except Exception as e:
LOG.error(utils.exception_to_str(e))
raise webob.exc.HTTPInternalServerError()
def plugins_info(self, req):
try:
search_repo = self.gateway.get_catalog_search_repo(req.context)
return search_repo.plugins_info()
except exception.Forbidden as e:
raise webob.exc.HTTPForbidden(explanation=e.msg)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.msg)
except Exception as e:
LOG.error(utils.exception_to_str(e))
raise webob.exc.HTTPInternalServerError()
def index(self, req, actions, default_index=None, default_type=None):
try:
search_repo = self.gateway.get_catalog_search_repo(req.context)
success, errors = search_repo.index(
default_index,
default_type,
actions)
return {
'success': success,
'failed': len(errors),
'errors': errors,
}
except exception.Forbidden as e:
raise webob.exc.HTTPForbidden(explanation=e.msg)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.msg)
except exception.Duplicate as e:
raise webob.exc.HTTPConflict(explanation=e.msg)
except Exception as e:
LOG.error(utils.exception_to_str(e))
raise webob.exc.HTTPInternalServerError()
class RequestDeserializer(wsgi.JSONRequestDeserializer):
_disallowed_properties = ['self', 'schema']
def __init__(self, plugins, schema=None):
super(RequestDeserializer, self).__init__()
self.plugins = plugins
def _get_request_body(self, request):
output = super(RequestDeserializer, self).default(request)
if 'body' not in output:
msg = _('Body expected in request.')
raise webob.exc.HTTPBadRequest(explanation=msg)
return output['body']
@classmethod
def _check_allowed(cls, query):
for key in cls._disallowed_properties:
if key in query:
msg = _("Attribute '%s' is read-only.") % key
raise webob.exc.HTTPForbidden(explanation=msg)
def _get_available_indices(self):
return list(set([p.obj.get_index_name() for p in self.plugins]))
def _get_available_types(self):
return list(set([p.obj.get_document_type() for p in self.plugins]))
def _validate_index(self, index):
available_indices = self._get_available_indices()
if index not in available_indices:
msg = _("Index '%s' is not supported.") % index
raise webob.exc.HTTPBadRequest(explanation=msg)
return index
def _validate_doc_type(self, doc_type):
available_types = self._get_available_types()
if doc_type not in available_types:
msg = _("Document type '%s' is not supported.") % doc_type
raise webob.exc.HTTPBadRequest(explanation=msg)
return doc_type
def _validate_offset(self, offset):
try:
offset = int(offset)
except ValueError:
msg = _("offset param must be an integer")
raise webob.exc.HTTPBadRequest(explanation=msg)
if offset < 0:
msg = _("offset param must be positive")
raise webob.exc.HTTPBadRequest(explanation=msg)
return offset
def _validate_limit(self, limit):
try:
limit = int(limit)
except ValueError:
msg = _("limit param must be an integer")
raise webob.exc.HTTPBadRequest(explanation=msg)
if limit < 1:
msg = _("limit param must be positive")
raise webob.exc.HTTPBadRequest(explanation=msg)
return limit
def _validate_actions(self, actions):
if not actions:
msg = _("actions param cannot be empty")
raise webob.exc.HTTPBadRequest(explanation=msg)
output = []
allowed_action_types = ['create', 'update', 'delete', 'index']
for action in actions:
action_type = action.get('action', 'index')
document_id = action.get('id')
document_type = action.get('type')
index_name = action.get('index')
data = action.get('data', {})
script = action.get('script')
if index_name is not None:
index_name = self._validate_index(index_name)
if document_type is not None:
document_type = self._validate_doc_type(document_type)
if action_type not in allowed_action_types:
msg = _("Invalid action type: '%s'") % action_type
raise webob.exc.HTTPBadRequest(explanation=msg)
elif (action_type in ['create', 'update', 'index'] and
not any([data, script])):
msg = (_("Action type '%s' requires data or script param.") %
action_type)
raise webob.exc.HTTPBadRequest(explanation=msg)
elif action_type in ['update', 'delete'] and not document_id:
msg = (_("Action type '%s' requires ID of the document.") %
action_type)
raise webob.exc.HTTPBadRequest(explanation=msg)
bulk_action = {
'_op_type': action_type,
'_id': document_id,
'_index': index_name,
'_type': document_type,
}
if script:
data_field = 'params'
bulk_action['script'] = script
elif action_type == 'update':
data_field = 'doc'
else:
data_field = '_source'
bulk_action[data_field] = data
output.append(bulk_action)
return output
def _get_query(self, context, query, doc_types):
is_admin = context.is_admin
if is_admin:
query_params = {
'query': {
'query': query
}
}
else:
filtered_query_list = []
for plugin in self.plugins:
try:
doc_type = plugin.obj.get_document_type()
rbac_filter = plugin.obj.get_rbac_filter(context)
except Exception as e:
LOG.error(_LE("Failed to retrieve RBAC filters "
"from search plugin "
"%(ext)s: %(e)s") %
{'ext': plugin.name, 'e': e})
if doc_type in doc_types:
filter_query = {
"query": query,
"filter": rbac_filter
}
filtered_query = {
'filtered': filter_query
}
filtered_query_list.append(filtered_query)
query_params = {
'query': {
'query': {
"bool": {
"should": filtered_query_list
},
}
}
}
return query_params
def search(self, request):
body = self._get_request_body(request)
self._check_allowed(body)
query = body.pop('query', None)
indices = body.pop('index', None)
doc_types = body.pop('type', None)
fields = body.pop('fields', None)
offset = body.pop('offset', None)
limit = body.pop('limit', None)
highlight = body.pop('highlight', None)
if not indices:
indices = self._get_available_indices()
elif not isinstance(indices, (list, tuple)):
indices = [indices]
if not doc_types:
doc_types = self._get_available_types()
elif not isinstance(doc_types, (list, tuple)):
doc_types = [doc_types]
query_params = self._get_query(request.context, query, doc_types)
query_params['index'] = [self._validate_index(index)
for index in indices]
query_params['doc_type'] = [self._validate_doc_type(doc_type)
for doc_type in doc_types]
if fields is not None:
query_params['fields'] = fields
if offset is not None:
query_params['offset'] = self._validate_offset(offset)
if limit is not None:
query_params['limit'] = self._validate_limit(limit)
if highlight is not None:
query_params['query']['highlight'] = highlight
return query_params
def index(self, request):
body = self._get_request_body(request)
self._check_allowed(body)
default_index = body.pop('default_index', None)
if default_index is not None:
default_index = self._validate_index(default_index)
default_type = body.pop('default_type', None)
if default_type is not None:
default_type = self._validate_doc_type(default_type)
actions = self._validate_actions(body.pop('actions', None))
if not all([default_index, default_type]):
for action in actions:
if not any([action['_index'], default_index]):
msg = (_("Action index is missing and no default "
"index has been set."))
raise webob.exc.HTTPBadRequest(explanation=msg)
if not any([action['_type'], default_type]):
msg = (_("Action document type is missing and no default "
"type has been set."))
raise webob.exc.HTTPBadRequest(explanation=msg)
query_params = {
'default_index': default_index,
'default_type': default_type,
'actions': actions,
}
return query_params
class ResponseSerializer(wsgi.JSONResponseSerializer):
def __init__(self, schema=None):
super(ResponseSerializer, self).__init__()
self.schema = schema
def search(self, response, query_result):
body = json.dumps(query_result, ensure_ascii=False)
response.unicode_body = six.text_type(body)
response.content_type = 'application/json'
def plugins_info(self, response, query_result):
body = json.dumps(query_result, ensure_ascii=False)
response.unicode_body = six.text_type(body)
response.content_type = 'application/json'
def index(self, response, query_result):
body = json.dumps(query_result, ensure_ascii=False)
response.unicode_body = six.text_type(body)
response.content_type = 'application/json'
def create_resource():
"""Search resource factory method"""
plugins = utils.get_search_plugins()
deserializer = RequestDeserializer(plugins)
serializer = ResponseSerializer()
controller = SearchController(plugins)
return wsgi.Resource(controller, deserializer, serializer)

View File

@ -1,140 +0,0 @@
# Copyright 2015 Intel Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from elasticsearch import helpers
import six
import daisy.search
@six.add_metaclass(abc.ABCMeta)
class IndexBase(object):
chunk_size = 200
def __init__(self):
self.engine = daisy.search.get_api()
self.index_name = self.get_index_name()
self.document_type = self.get_document_type()
def setup(self):
"""Comprehensively install search engine index and put data into it."""
self.setup_index()
self.setup_mapping()
self.setup_data()
def setup_index(self):
"""Create the index if it doesn't exist and update its settings."""
index_exists = self.engine.indices.exists(self.index_name)
if not index_exists:
self.engine.indices.create(index=self.index_name)
index_settings = self.get_settings()
if index_settings:
self.engine.indices.put_settings(index=self.index_name,
body=index_settings)
return index_exists
def setup_mapping(self):
"""Update index document mapping."""
index_mapping = self.get_mapping()
if index_mapping:
self.engine.indices.put_mapping(index=self.index_name,
doc_type=self.document_type,
body=index_mapping)
def setup_data(self):
"""Insert all objects from database into search engine."""
object_list = self.get_objects()
documents = []
for obj in object_list:
document = self.serialize(obj)
documents.append(document)
self.save_documents(documents)
def save_documents(self, documents, id_field='id'):
"""Send list of serialized documents into search engine."""
actions = []
for document in documents:
action = {
'_id': document.get(id_field),
'_source': document,
}
actions.append(action)
helpers.bulk(
client=self.engine,
index=self.index_name,
doc_type=self.document_type,
chunk_size=self.chunk_size,
actions=actions)
@abc.abstractmethod
def get_objects(self):
"""Get list of all objects which will be indexed into search engine."""
@abc.abstractmethod
def serialize(self, obj):
"""Serialize database object into valid search engine document."""
@abc.abstractmethod
def get_index_name(self):
"""Get name of the index."""
@abc.abstractmethod
def get_document_type(self):
"""Get name of the document type."""
@abc.abstractmethod
def get_rbac_filter(self, request_context):
"""Get rbac filter as es json filter dsl."""
def filter_result(self, result, request_context):
"""Filter the outgoing search result."""
return result
def get_settings(self):
"""Get an index settings."""
return {}
def get_mapping(self):
"""Get an index mapping."""
return {}
def get_notification_handler(self):
"""Get the notification handler which implements NotificationBase."""
return None
def get_notification_supported_events(self):
"""Get the list of suppported event types."""
return []
@six.add_metaclass(abc.ABCMeta)
class NotificationBase(object):
def __init__(self, engine, index_name, document_type):
self.engine = engine
self.index_name = index_name
self.document_type = document_type
@abc.abstractmethod
def process(self, ctxt, publisher_id, event_type, payload, metadata):
"""Process the incoming notification message."""

View File

@ -1,163 +0,0 @@
# Copyright 2015 Intel Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy.orm import joinedload
from oslo_utils import timeutils
from daisy.api import policy
from daisy.common import property_utils
import daisy.db
from daisy.db.sqlalchemy import models
from daisy.search.plugins import base
from daisy.search.plugins import images_notification_handler
class ImageIndex(base.IndexBase):
def __init__(self, db_api=None, policy_enforcer=None):
super(ImageIndex, self).__init__()
self.db_api = db_api or daisy.db.get_api()
self.policy = policy_enforcer or policy.Enforcer()
if property_utils.is_property_protection_enabled():
self.property_rules = property_utils.PropertyRules(self.policy)
self._image_base_properties = [
'checksum', 'created_at', 'container_format', 'disk_format', 'id',
'min_disk', 'min_ram', 'name', 'size', 'virtual_size', 'status',
'tags', 'updated_at', 'visibility', 'protected', 'owner',
'members']
def get_index_name(self):
return 'glance'
def get_document_type(self):
return 'image'
def get_mapping(self):
return {
'dynamic': True,
'properties': {
'id': {'type': 'string', 'index': 'not_analyzed'},
'name': {'type': 'string'},
'description': {'type': 'string'},
'tags': {'type': 'string'},
'disk_format': {'type': 'string'},
'container_format': {'type': 'string'},
'size': {'type': 'long'},
'virtual_size': {'type': 'long'},
'status': {'type': 'string'},
'visibility': {'type': 'string'},
'checksum': {'type': 'string'},
'min_disk': {'type': 'long'},
'min_ram': {'type': 'long'},
'owner': {'type': 'string', 'index': 'not_analyzed'},
'protected': {'type': 'boolean'},
'members': {'type': 'string', 'index': 'not_analyzed'},
"created_at": {'type': 'date'},
"updated_at": {'type': 'date'}
},
}
def get_rbac_filter(self, request_context):
return [
{
"and": [
{
'or': [
{
'term': {
'owner': request_context.owner
}
},
{
'term': {
'visibility': 'public'
}
},
{
'term': {
'members': request_context.tenant
}
}
]
},
{
'type': {
'value': self.get_document_type()
}
}
]
}
]
def filter_result(self, result, request_context):
if property_utils.is_property_protection_enabled():
hits = result['hits']['hits']
for hit in hits:
if hit['_type'] == self.get_document_type():
source = hit['_source']
for key in source.keys():
if key not in self._image_base_properties:
if not self.property_rules.check_property_rules(
key, 'read', request_context):
del hit['_source'][key]
return result
def get_objects(self):
session = self.db_api.get_session()
images = session.query(models.Image).options(
joinedload('properties'), joinedload('members'), joinedload('tags')
).filter_by(deleted=False)
return images
def serialize(self, obj):
visibility = 'public' if obj.is_public else 'private'
members = []
for member in obj.members:
if member.status == 'accepted' and member.deleted == 0:
members.append(member.member)
document = {
'id': obj.id,
'name': obj.name,
'tags': obj.tags,
'disk_format': obj.disk_format,
'container_format': obj.container_format,
'size': obj.size,
'virtual_size': obj.virtual_size,
'status': obj.status,
'visibility': visibility,
'checksum': obj.checksum,
'min_disk': obj.min_disk,
'min_ram': obj.min_ram,
'owner': obj.owner,
'protected': obj.protected,
'members': members,
'created_at': timeutils.isotime(obj.created_at),
'updated_at': timeutils.isotime(obj.updated_at)
}
for image_property in obj.properties:
document[image_property.name] = image_property.value
return document
def get_notification_handler(self):
return images_notification_handler.ImageHandler(
self.engine,
self.get_index_name(),
self.get_document_type()
)
def get_notification_supported_events(self):
return ['image.create', 'image.update', 'image.delete']

View File

@ -1,83 +0,0 @@
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_log import log as logging
import oslo_messaging
from daisy.common import utils
from daisy.search.plugins import base
LOG = logging.getLogger(__name__)
class ImageHandler(base.NotificationBase):
def __init__(self, *args, **kwargs):
super(ImageHandler, self).__init__(*args, **kwargs)
self.image_delete_keys = ['deleted_at', 'deleted',
'is_public', 'properties']
def process(self, ctxt, publisher_id, event_type, payload, metadata):
try:
actions = {
"image.create": self.create,
"image.update": self.update,
"image.delete": self.delete
}
actions[event_type](payload)
return oslo_messaging.NotificationResult.HANDLED
except Exception as e:
LOG.error(utils.exception_to_str(e))
def create(self, payload):
id = payload['id']
payload = self.format_image(payload)
self.engine.create(
index=self.index_name,
doc_type=self.document_type,
body=payload,
id=id
)
def update(self, payload):
id = payload['id']
payload = self.format_image(payload)
doc = {"doc": payload}
self.engine.update(
index=self.index_name,
doc_type=self.document_type,
body=doc,
id=id
)
def delete(self, payload):
id = payload['id']
self.engine.delete(
index=self.index_name,
doc_type=self.document_type,
id=id
)
def format_image(self, payload):
visibility = 'public' if payload['is_public'] else 'private'
payload['visibility'] = visibility
payload.update(payload.get('properties', '{}'))
for key in payload.keys():
if key in self.image_delete_keys:
del payload[key]
return payload

View File

@ -1,259 +0,0 @@
# Copyright 2015 Intel Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import six
import daisy.db
from daisy.db.sqlalchemy import models_metadef as models
from daisy.search.plugins import base
from daisy.search.plugins import metadefs_notification_handler
class MetadefIndex(base.IndexBase):
def __init__(self):
super(MetadefIndex, self).__init__()
self.db_api = daisy.db.get_api()
def get_index_name(self):
return 'glance'
def get_document_type(self):
return 'metadef'
def get_mapping(self):
property_mapping = {
'dynamic': True,
'type': 'nested',
'properties': {
'property': {'type': 'string', 'index': 'not_analyzed'},
'type': {'type': 'string'},
'title': {'type': 'string'},
'description': {'type': 'string'},
}
}
mapping = {
'_id': {
'path': 'namespace',
},
'properties': {
'display_name': {'type': 'string'},
'description': {'type': 'string'},
'namespace': {'type': 'string', 'index': 'not_analyzed'},
'owner': {'type': 'string', 'index': 'not_analyzed'},
'visibility': {'type': 'string', 'index': 'not_analyzed'},
'resource_types': {
'type': 'nested',
'properties': {
'name': {'type': 'string'},
'prefix': {'type': 'string'},
'properties_target': {'type': 'string'},
},
},
'objects': {
'type': 'nested',
'properties': {
'id': {'type': 'string', 'index': 'not_analyzed'},
'name': {'type': 'string'},
'description': {'type': 'string'},
'properties': property_mapping,
}
},
'properties': property_mapping,
'tags': {
'type': 'nested',
'properties': {
'name': {'type': 'string'},
}
}
},
}
return mapping
def get_rbac_filter(self, request_context):
# TODO(krykowski): Define base get_rbac_filter in IndexBase class
# which will provide some common subset of query pieces.
# Something like:
# def get_common_context_pieces(self, request_context):
# return [{'term': {'owner': request_context.owner,
# 'type': {'value': self.get_document_type()}}]
return [
{
"and": [
{
'or': [
{
'term': {
'owner': request_context.owner
}
},
{
'term': {
'visibility': 'public'
}
}
]
},
{
'type': {
'value': self.get_document_type()
}
}
]
}
]
def get_objects(self):
session = self.db_api.get_session()
namespaces = session.query(models.MetadefNamespace).all()
resource_types = session.query(models.MetadefResourceType).all()
resource_types_map = {r.id: r.name for r in resource_types}
for namespace in namespaces:
namespace.resource_types = self.get_namespace_resource_types(
namespace.id, resource_types_map)
namespace.objects = self.get_namespace_objects(namespace.id)
namespace.properties = self.get_namespace_properties(namespace.id)
namespace.tags = self.get_namespace_tags(namespace.id)
return namespaces
def get_namespace_resource_types(self, namespace_id, resource_types):
session = self.db_api.get_session()
namespace_resource_types = session.query(
models.MetadefNamespaceResourceType
).filter_by(namespace_id=namespace_id)
resource_associations = [{
'prefix': r.prefix,
'properties_target': r.properties_target,
'name': resource_types[r.resource_type_id],
} for r in namespace_resource_types]
return resource_associations
def get_namespace_properties(self, namespace_id):
session = self.db_api.get_session()
properties = session.query(
models.MetadefProperty
).filter_by(namespace_id=namespace_id)
return list(properties)
def get_namespace_objects(self, namespace_id):
session = self.db_api.get_session()
namespace_objects = session.query(
models.MetadefObject
).filter_by(namespace_id=namespace_id)
return list(namespace_objects)
def get_namespace_tags(self, namespace_id):
session = self.db_api.get_session()
namespace_tags = session.query(
models.MetadefTag
).filter_by(namespace_id=namespace_id)
return list(namespace_tags)
def serialize(self, obj):
object_docs = [self.serialize_object(ns_obj) for ns_obj in obj.objects]
property_docs = [self.serialize_property(prop.name, prop.json_schema)
for prop in obj.properties]
resource_type_docs = [self.serialize_namespace_resource_type(rt)
for rt in obj.resource_types]
tag_docs = [self.serialize_tag(tag) for tag in obj.tags]
namespace_doc = self.serialize_namespace(obj)
namespace_doc.update({
'objects': object_docs,
'properties': property_docs,
'resource_types': resource_type_docs,
'tags': tag_docs,
})
return namespace_doc
def serialize_namespace(self, namespace):
return {
'namespace': namespace.namespace,
'display_name': namespace.display_name,
'description': namespace.description,
'visibility': namespace.visibility,
'protected': namespace.protected,
'owner': namespace.owner,
}
def serialize_object(self, obj):
obj_properties = obj.json_schema
property_docs = []
for name, schema in six.iteritems(obj_properties):
property_doc = self.serialize_property(name, schema)
property_docs.append(property_doc)
document = {
'name': obj.name,
'description': obj.description,
'properties': property_docs,
}
return document
def serialize_property(self, name, schema):
document = copy.deepcopy(schema)
document['property'] = name
if 'default' in document:
document['default'] = str(document['default'])
if 'enum' in document:
document['enum'] = map(str, document['enum'])
return document
def serialize_namespace_resource_type(self, ns_resource_type):
return {
'name': ns_resource_type['name'],
'prefix': ns_resource_type['prefix'],
'properties_target': ns_resource_type['properties_target']
}
def serialize_tag(self, tag):
return {
'name': tag.name
}
def get_notification_handler(self):
return metadefs_notification_handler.MetadefHandler(
self.engine,
self.get_index_name(),
self.get_document_type()
)
def get_notification_supported_events(self):
return [
"metadef_namespace.create",
"metadef_namespace.update",
"metadef_namespace.delete",
"metadef_object.create",
"metadef_object.update",
"metadef_object.delete",
"metadef_property.create",
"metadef_property.update",
"metadef_property.delete",
"metadef_tag.create",
"metadef_tag.update",
"metadef_tag.delete",
"metadef_resource_type.create",
"metadef_resource_type.delete",
"metadef_namespace.delete_properties",
"metadef_namespace.delete_objects",
"metadef_namespace.delete_tags"
]

View File

@ -1,251 +0,0 @@
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
from oslo_log import log as logging
import oslo_messaging
from daisy.common import utils
from daisy.search.plugins import base
LOG = logging.getLogger(__name__)
class MetadefHandler(base.NotificationBase):
def __init__(self, *args, **kwargs):
super(MetadefHandler, self).__init__(*args, **kwargs)
self.namespace_delete_keys = ['deleted_at', 'deleted', 'created_at',
'updated_at', 'namespace_old']
self.property_delete_keys = ['deleted', 'deleted_at',
'name_old', 'namespace', 'name']
def process(self, ctxt, publisher_id, event_type, payload, metadata):
try:
actions = {
"metadef_namespace.create": self.create_ns,
"metadef_namespace.update": self.update_ns,
"metadef_namespace.delete": self.delete_ns,
"metadef_object.create": self.create_obj,
"metadef_object.update": self.update_obj,
"metadef_object.delete": self.delete_obj,
"metadef_property.create": self.create_prop,
"metadef_property.update": self.update_prop,
"metadef_property.delete": self.delete_prop,
"metadef_resource_type.create": self.create_rs,
"metadef_resource_type.delete": self.delete_rs,
"metadef_tag.create": self.create_tag,
"metadef_tag.update": self.update_tag,
"metadef_tag.delete": self.delete_tag,
"metadef_namespace.delete_properties": self.delete_props,
"metadef_namespace.delete_objects": self.delete_objects,
"metadef_namespace.delete_tags": self.delete_tags
}
actions[event_type](payload)
return oslo_messaging.NotificationResult.HANDLED
except Exception as e:
LOG.error(utils.exception_to_str(e))
def run_create(self, id, payload):
self.engine.create(
index=self.index_name,
doc_type=self.document_type,
body=payload,
id=id
)
def run_update(self, id, payload, script=False):
if script:
self.engine.update(
index=self.index_name,
doc_type=self.document_type,
body=payload,
id=id)
else:
doc = {"doc": payload}
self.engine.update(
index=self.index_name,
doc_type=self.document_type,
body=doc,
id=id)
def run_delete(self, id):
self.engine.delete(
index=self.index_name,
doc_type=self.document_type,
id=id
)
def create_ns(self, payload):
id = payload['namespace']
self.run_create(id, self.format_namespace(payload))
def update_ns(self, payload):
id = payload['namespace_old']
self.run_update(id, self.format_namespace(payload))
def delete_ns(self, payload):
id = payload['namespace']
self.run_delete(id)
def create_obj(self, payload):
id = payload['namespace']
object = self.format_object(payload)
self.create_entity(id, "objects", object)
def update_obj(self, payload):
id = payload['namespace']
object = self.format_object(payload)
self.update_entity(id, "objects", object,
payload['name_old'], "name")
def delete_obj(self, payload):
id = payload['namespace']
self.delete_entity(id, "objects", payload['name'], "name")
def create_prop(self, payload):
id = payload['namespace']
property = self.format_property(payload)
self.create_entity(id, "properties", property)
def update_prop(self, payload):
id = payload['namespace']
property = self.format_property(payload)
self.update_entity(id, "properties", property,
payload['name_old'], "property")
def delete_prop(self, payload):
id = payload['namespace']
self.delete_entity(id, "properties", payload['name'], "property")
def create_rs(self, payload):
id = payload['namespace']
resource_type = dict()
resource_type['name'] = payload['name']
if payload['prefix']:
resource_type['prefix'] = payload['prefix']
if payload['properties_target']:
resource_type['properties_target'] = payload['properties_target']
self.create_entity(id, "resource_types", resource_type)
def delete_rs(self, payload):
id = payload['namespace']
self.delete_entity(id, "resource_types", payload['name'], "name")
def create_tag(self, payload):
id = payload['namespace']
tag = dict()
tag['name'] = payload['name']
self.create_entity(id, "tags", tag)
def update_tag(self, payload):
id = payload['namespace']
tag = dict()
tag['name'] = payload['name']
self.update_entity(id, "tags", tag, payload['name_old'], "name")
def delete_tag(self, payload):
id = payload['namespace']
self.delete_entity(id, "tags", payload['name'], "name")
def delete_props(self, payload):
self.delete_field(payload, "properties")
def delete_objects(self, payload):
self.delete_field(payload, "objects")
def delete_tags(self, payload):
self.delete_field(payload, "tags")
def create_entity(self, id, entity, entity_data):
script = ("if (ctx._source.containsKey('%(entity)s'))"
"{ctx._source.%(entity)s += entity_item }"
"else {ctx._source.%(entity)s=entity_list};" %
{"entity": entity})
params = {
"entity_item": entity_data,
"entity_list": [entity_data]
}
payload = {"script": script, "params": params}
self.run_update(id, payload=payload, script=True)
def update_entity(self, id, entity, entity_data, entity_id, field_name):
entity_id = entity_id.lower()
script = ("obj=null; for(entity_item :ctx._source.%(entity)s)"
"{if(entity_item['%(field_name)s'].toLowerCase() "
" == entity_id ) obj=entity_item;};"
"if(obj!=null)ctx._source.%(entity)s.remove(obj);"
"if (ctx._source.containsKey('%(entity)s'))"
"{ctx._source.%(entity)s += entity_item; }"
"else {ctx._source.%(entity)s=entity_list;}" %
{"entity": entity, "field_name": field_name})
params = {
"entity_item": entity_data,
"entity_list": [entity_data],
"entity_id": entity_id
}
payload = {"script": script, "params": params}
self.run_update(id, payload=payload, script=True)
def delete_entity(self, id, entity, entity_id, field_name):
entity_id = entity_id.lower()
script = ("obj=null; for(entity_item :ctx._source.%(entity)s)"
"{if(entity_item['%(field_name)s'].toLowerCase() "
" == entity_id ) obj=entity_item;};"
"if(obj!=null)ctx._source.%(entity)s.remove(obj);" %
{"entity": entity, "field_name": field_name})
params = {
"entity_id": entity_id
}
payload = {"script": script, "params": params}
self.run_update(id, payload=payload, script=True)
def delete_field(self, payload, field):
id = payload['namespace']
script = ("if (ctx._source.containsKey('%(field)s'))"
"{ctx._source.remove('%(field)s')}") % {"field": field}
payload = {"script": script}
self.run_update(id, payload=payload, script=True)
def format_namespace(self, payload):
for key in self.namespace_delete_keys:
if key in payload.keys():
del payload[key]
return payload
def format_object(self, payload):
formatted_object = dict()
formatted_object['name'] = payload['name']
formatted_object['description'] = payload['description']
if payload['required']:
formatted_object['required'] = payload['required']
formatted_object['properties'] = []
for property in payload['properties']:
formatted_property = self.format_property(property)
formatted_object['properties'].append(formatted_property)
return formatted_object
def format_property(self, payload):
prop_data = dict()
prop_data['property'] = payload['name']
for key, value in six.iteritems(payload):
if key not in self.property_delete_keys and value:
prop_data[key] = value
return prop_data

View File

@ -1,107 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2012-2014 eNovance <licensing@enovance.com>
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import socket
import sys
from oslo_config import cfg
from oslo import i18n
import oslo.messaging
from oslo_log import log
CONF = cfg.CONF
OPTS = [
cfg.StrOpt('host',
default=socket.gethostname(),
help='Name of this node, which must be valid in an AMQP '
'key. Can be an opaque identifier. For ZeroMQ only, must '
'be a valid host name, FQDN, or IP address.'),
cfg.IntOpt('listener_workers',
default=1,
help='Number of workers for notification service. A single '
'notification agent is enabled by default.'),
cfg.IntOpt('http_timeout',
default=600,
help='Timeout seconds for HTTP requests. Set it to None to '
'disable timeout.'),
]
CONF.register_opts(OPTS)
CLI_OPTS = [
cfg.StrOpt('os-username',
deprecated_group="DEFAULT",
default=os.environ.get('OS_USERNAME', 'daisy'),
help='User name to use for OpenStack service access.'),
cfg.StrOpt('os-password',
deprecated_group="DEFAULT",
secret=True,
default=os.environ.get('OS_PASSWORD', 'admin'),
help='Password to use for OpenStack service access.'),
cfg.StrOpt('os-tenant-id',
deprecated_group="DEFAULT",
default=os.environ.get('OS_TENANT_ID', ''),
help='Tenant ID to use for OpenStack service access.'),
cfg.StrOpt('os-tenant-name',
deprecated_group="DEFAULT",
default=os.environ.get('OS_TENANT_NAME', 'admin'),
help='Tenant name to use for OpenStack service access.'),
cfg.StrOpt('os-cacert',
default=os.environ.get('OS_CACERT'),
help='Certificate chain for SSL validation.'),
cfg.StrOpt('os-auth-url',
deprecated_group="DEFAULT",
default=os.environ.get('OS_AUTH_URL',
'http://localhost:5000/v2.0'),
help='Auth URL to use for OpenStack service access.'),
cfg.StrOpt('os-region-name',
deprecated_group="DEFAULT",
default=os.environ.get('OS_REGION_NAME'),
help='Region name to use for OpenStack service endpoints.'),
cfg.StrOpt('os-endpoint-type',
default=os.environ.get('OS_ENDPOINT_TYPE', 'publicURL'),
help='Type of endpoint in Identity service catalog to use for '
'communication with OpenStack services.'),
cfg.BoolOpt('insecure',
default=False,
help='Disables X.509 certificate validation when an '
'SSL connection to Identity Service is established.'),
]
CONF.register_cli_opts(CLI_OPTS, group="service_credentials")
LOG = log.getLogger(__name__)
_DEFAULT_LOG_LEVELS = ['keystonemiddleware=WARN', 'stevedore=WARN']
class WorkerException(Exception):
"""Exception for errors relating to service workers."""
def get_workers(name):
return 1
def prepare_service(argv=None):
i18n.enable_lazy()
log.set_defaults(_DEFAULT_LOG_LEVELS)
log.register_options(CONF)
if argv is None:
argv = sys.argv
CONF(argv[1:], project='daisy-search')
log.setup(cfg.CONF, 'daisy-search')
oslo.messaging.set_transport_defaults('daisy')

View File

@ -1,60 +0,0 @@
..
Copyright 2013 OpenStack Foundation
All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
Common Image Properties
=======================
When adding an image to Glance, you may specify some common image properties
that may prove useful to consumers of your image.
This document explains the names of these properties and the expected values.
The common image properties are also described in a JSON schema, found in
etc/schema-image.json in the Glance source code.
**architecture**
----------------
Operating system architecture as specified in
http://docs.openstack.org/trunk/openstack-compute/admin/content/adding-images.html
**instance_uuid**
-----------------
The ID of the instance used to create this image.
**kernel_id**
-------------
The ID of image stored in Glance that should be used as the kernel when booting
an AMI-style image.
**ramdisk_id**
--------------
The ID of image stored in Glance that should be used as the ramdisk when
booting an AMI-style image.
**os_distro**
-------------
The common name of the operating system distribution as specified in
http://docs.openstack.org/trunk/openstack-compute/admin/content/adding-images.html
**os_version**
--------------
The operating system version as specified by the distributor.

View File

@ -1,47 +0,0 @@
====================
daisy-cache-cleaner
====================
----------------------------------------------------------------
daisy Image Cache Invalid Cache Entry and Stalled Image cleaner
----------------------------------------------------------------
:Author: daisy@lists.launchpad.net
:Date: 2014-01-16
:Copyright: OpenStack LLC
:Version: 2014.1
:Manual section: 1
:Manual group: cloud computing
SYNOPSIS
========
daisy-cache-cleaner [options]
DESCRIPTION
===========
This is meant to be run as a periodic task from cron.
If something goes wrong while we're caching an image (for example the fetch
times out, or an exception is raised), we create an 'invalid' entry. These
entires are left around for debugging purposes. However, after some period of
time, we want to clean these up.
Also, if an incomplete image hangs around past the image_cache_stall_time
period, we automatically sweep it up.
OPTIONS
=======
**General options**
.. include:: general_options.rst
FILES
======
**/etc/daisy/daisy-cache.conf**
Default configuration file for the daisy Cache
.. include:: footer.rst

View File

@ -1,88 +0,0 @@
===================
daisy-cache-manage
===================
------------------------
Cache management utility
------------------------
:Author: daisy@lists.launchpad.net
:Date: 2014-01-16
:Copyright: OpenStack LLC
:Version: 2014.1
:Manual section: 1
:Manual group: cloud computing
SYNOPSIS
========
daisy-cache-manage <command> [options] [args]
COMMANDS
========
**help <command>**
Output help for one of the commands below
**list-cached**
List all images currently cached
**list-queued**
List all images currently queued for caching
**queue-image**
Queue an image for caching
**delete-cached-image**
Purges an image from the cache
**delete-all-cached-images**
Removes all images from the cache
**delete-queued-image**
Deletes an image from the cache queue
**delete-all-queued-images**
Deletes all images from the cache queue
OPTIONS
=======
**--version**
show program's version number and exit
**-h, --help**
show this help message and exit
**-v, --verbose**
Print more verbose output
**-d, --debug**
Print more verbose output
**-H ADDRESS, --host=ADDRESS**
Address of daisy API host.
Default: 0.0.0.0
**-p PORT, --port=PORT**
Port the daisy API host listens on.
Default: 9292
**-k, --insecure**
Explicitly allow daisy to perform "insecure" SSL
(https) requests. The server's certificate will not be
verified against any certificate authorities. This
option should be used with caution.
**-A TOKEN, --auth_token=TOKEN**
Authentication token to use to identify the client to the daisy server
**-f, --force**
Prevent select actions from requesting user confirmation
**-S STRATEGY, --os-auth-strategy=STRATEGY**
Authentication strategy (keystone or noauth)
.. include:: openstack_options.rst
.. include:: footer.rst

View File

@ -1,40 +0,0 @@
=======================
daisy-cache-prefetcher
=======================
------------------------------
daisy Image Cache Pre-fetcher
------------------------------
:Author: daisy@lists.launchpad.net
:Date: 2014-01-16
:Copyright: OpenStack LLC
:Version: 2014.1
:Manual section: 1
:Manual group: cloud computing
SYNOPSIS
========
daisy-cache-prefetcher [options]
DESCRIPTION
===========
This is meant to be run from the command line after queueing
images to be pretched.
OPTIONS
=======
**General options**
.. include:: general_options.rst
FILES
=====
**/etc/daisy/daisy-cache.conf**
Default configuration file for the daisy Cache
.. include:: footer.rst

View File

@ -1,41 +0,0 @@
===================
daisy-cache-pruner
===================
-------------------
daisy cache pruner
-------------------
:Author: daisy@lists.launchpad.net
:Date: 2014-01-16
:Copyright: OpenStack LLC
:Version: 2014.1
:Manual section: 1
:Manual group: cloud computing
SYNOPSIS
========
daisy-cache-pruner [options]
DESCRIPTION
===========
Prunes images from the daisy cache when the space exceeds the value
set in the image_cache_max_size configuration option. This is meant
to be run as a periodic task, perhaps every half-hour.
OPTIONS
========
**General options**
.. include:: general_options.rst
FILES
=====
**/etc/daisy/daisy-cache.conf**
Default configuration file for the daisy Cache
.. include:: footer.rst

View File

@ -1,90 +0,0 @@
=================
daisy-replicator
=================
---------------------------------------------
Replicate images across multiple data centers
---------------------------------------------
:Author: daisy@lists.launchpad.net
:Date: 2014-01-16
:Copyright: OpenStack LLC
:Version: 2014.1
:Manual section: 1
:Manual group: cloud computing
SYNOPSIS
========
daisy-replicator <command> [options] [args]
DESCRIPTION
===========
daisy-replicator is a utility can be used to populate a new daisy
server using the images stored in an existing daisy server. The images
in the replicated daisy server preserve the uuids, metadata, and image
data from the original.
COMMANDS
========
**help <command>**
Output help for one of the commands below
**compare**
What is missing from the slave daisy?
**dump**
Dump the contents of a daisy instance to local disk.
**livecopy**
Load the contents of one daisy instance into another.
**load**
Load the contents of a local directory into daisy.
**size**
Determine the size of a daisy instance if dumped to disk.
OPTIONS
=======
**-h, --help**
Show this help message and exit
**-c CHUNKSIZE, --chunksize=CHUNKSIZE**
Amount of data to transfer per HTTP write
**-d, --debug**
Print debugging information
**-D DONTREPLICATE, --dontreplicate=DONTREPLICATE**
List of fields to not replicate
**-m, --metaonly**
Only replicate metadata, not images
**-l LOGFILE, --logfile=LOGFILE**
Path of file to log to
**-s, --syslog**
Log to syslog instead of a file
**-t TOKEN, --token=TOKEN**
Pass in your authentication token if you have one. If
you use this option the same token is used for both
the master and the slave.
**-M MASTERTOKEN, --mastertoken=MASTERTOKEN**
Pass in your authentication token if you have one.
This is the token used for the master.
**-S SLAVETOKEN, --slavetoken=SLAVETOKEN**
Pass in your authentication token if you have one.
This is the token used for the slave.
**-v, --verbose**
Print more verbose output
.. include:: footer.rst

View File

@ -1,149 +0,0 @@
..
Copyright 2013 OpenStack Foundation
All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
Property Protections
====================
There are two types of image properties in Glance:
* Core Properties, as specified by the image schema.
* Meta Properties, which are arbitrary key/value pairs that can be added to an
image.
Access to meta properties through Glance's public API calls may be
restricted to certain sets of users, using a property protections configuration
file.
This document explains exactly how property protections are configured and what
they apply to.
Constructing a Property Protections Configuration File
------------------------------------------------------
A property protections configuration file follows the format of the Glance API
configuration file, which consists of sections, led by a ``[section]`` header
and followed by ``name = value`` entries. Each section header is a regular
expression matching a set of properties to be protected.
.. note::
Section headers must compile to a valid regular expression, otherwise
glance api service will not start. Regular expressions
will be handled by python's re module which is PERL like.
Each section describes four key-value pairs, where the key is one of
``create/read/update/delete``, and the value is a comma separated list of user
roles that are permitted to perform that operation in the Glance API. **If any of
the keys are not specified, then the glance api service will not start
successfully.**
In the list of user roles, ``@`` means all roles and ``!`` means no role.
**If both @ and ! are specified for the same rule then the glance api service
will not start**
.. note::
Only one policy rule is allowed per property operation. **If multiple are
specified, then the glance api service will not start.**
The path to the file should be specified in the ``[DEFAULT]`` section of
``glance-api.conf`` as follows.
::
property_protection_file=/path/to/file
If this config value is not specified, property protections are not enforced.
**If the path is invalid, glance api service will not start successfully.**
The file may use either roles or policies to describe the property protections.
The config value should be specified in the ``[DEFAULT]`` section of
``glance-api.conf`` as follows.
::
property_protection_rule_format=<roles|policies>
The default value for ``property_protection_rule_format`` is ``roles``.
Property protections are applied in the order specified in the configuration
file. This means that if for example you specify a section with ``[.*]`` at
the top of the file, all proceeding sections will be ignored.
If a property does not match any of the given rules, all operations will be
disabled for all roles.
If an operation is misspelled or omitted, that operation will be disabled for
all roles.
Disallowing ``read`` operations will also disallow ``update/delete`` operations.
A successful HTTP request will return status ``200 OK``. If the user is not
permitted to perform the requested action, ``403 Forbidden`` will be returned.
V1 API X-glance-registry-Purge-props
------------------------------------
Property protections will still be honoured if
``X-glance-registry-Purge-props`` is set to ``True``. That is, if you request
to modify properties with this header set to ``True``, you will not be able to
delete or update properties for which you do not have the relevant permissions.
Properties which are not included in the request and for which you do have
delete permissions will still be removed.
Examples
--------
**Example 1**. Limit all property interactions to admin only.
::
[.*]
create = admin
read = admin
update = admin
delete = admin
**Example 2**. Allow both admins and users with the billing role to read
and modify properties prefixed with ``x_billing_code_``. Allow admins to
read and modify any properties.
::
[^x_billing_code_.*]
create = admin,billing
read = admin, billing
update = admin,billing
delete = admin,billing
[.*]
create = admin
read = admin
update = admin
delete = admin
**Example 3**. Limit all property interactions to admin only using policy
rule context_is_admin defined in policy.json.
::
[.*]
create = context_is_admin
read = context_is_admin
update = context_is_admin
delete = context_is_admin

View File

@ -1,265 +0,0 @@
[DEFAULT]
# Show more verbose log output (sets INFO log level output)
#verbose = False
# Show debugging output in logs (sets DEBUG log level output)
#debug = False
# Log to this file. Make sure you do not set the same log file for both the API
# and registry servers!
#
# If `log_file` is omitted and `use_syslog` is false, then log messages are
# sent to stdout as a fallback.
log_file = /var/log/daisy/image-cache.log
# Send logs to syslog (/dev/log) instead of to file specified by `log_file`
#use_syslog = False
# Directory that the Image Cache writes data to
image_cache_dir = /var/lib/daisy/image-cache/
# Number of seconds after which we should consider an incomplete image to be
# stalled and eligible for reaping
image_cache_stall_time = 86400
# The upper limit (the maximum size of accumulated cache in bytes) beyond
# which pruner, if running, starts cleaning the images cache.
image_cache_max_size = 10737418240
# Address to find the registry server
registry_host = 0.0.0.0
# Port the registry server is listening on
registry_port = 9191
# Auth settings if using Keystone
# auth_url = http://127.0.0.1:5000/v2.0/
# admin_tenant_name = %SERVICE_TENANT_NAME%
# admin_user = %SERVICE_USER%
# admin_password = %SERVICE_PASSWORD%
# List of which store classes and store class locations are
# currently known to glance at startup.
# known_stores = glance.store.filesystem.Store,
# glance.store.http.Store,
# glance.store.rbd.Store,
# glance.store.s3.Store,
# glance.store.swift.Store,
# glance.store.sheepdog.Store,
# glance.store.cinder.Store,
# glance.store.vmware_datastore.Store,
# ============ Filesystem Store Options ========================
# Directory that the Filesystem backend store
# writes image data to
filesystem_store_datadir = /var/lib/glance/images/
# ============ Swift Store Options =============================
# Version of the authentication service to use
# Valid versions are '2' for keystone and '1' for swauth and rackspace
swift_store_auth_version = 2
# Address where the Swift authentication service lives
# Valid schemes are 'http://' and 'https://'
# If no scheme specified, default to 'https://'
# For swauth, use something like '127.0.0.1:8080/v1.0/'
swift_store_auth_address = 127.0.0.1:5000/v2.0/
# User to authenticate against the Swift authentication service
# If you use Swift authentication service, set it to 'account':'user'
# where 'account' is a Swift storage account and 'user'
# is a user in that account
swift_store_user = jdoe:jdoe
# Auth key for the user authenticating against the
# Swift authentication service
swift_store_key = a86850deb2742ec3cb41518e26aa2d89
# Container within the account that the account should use
# for storing images in Swift
swift_store_container = glance
# Do we create the container if it does not exist?
swift_store_create_container_on_put = False
# What size, in MB, should Glance start chunking image files
# and do a large object manifest in Swift? By default, this is
# the maximum object size in Swift, which is 5GB
swift_store_large_object_size = 5120
# This file contains references for each of the configured
# Swift accounts/backing stores. If used, this option can prevent
# credentials being stored in the database. Using Swift references
# is disabled if this config is left blank.
#swift_store_config_file = glance-swift.conf
# The reference to the default Swift parameters to use for adding new images.
#default_swift_reference = 'ref1'
# When doing a large object manifest, what size, in MB, should
# Glance write chunks to Swift? This amount of data is written
# to a temporary disk buffer during the process of chunking
# the image file, and the default is 200MB
swift_store_large_object_chunk_size = 200
# If set, the configured endpoint will be used. If None, the storage URL
# from the auth response will be used. The location of an object is
# obtained by appending the container and object to the configured URL.
#
# swift_store_endpoint = https://www.example.com/v1/not_a_container
swift_store_endpoint = None
# If set to True enables multi-tenant storage mode which causes Glance images
# to be stored in tenant specific Swift accounts.
#swift_store_multi_tenant = False
# A list of swift ACL strings that will be applied as both read and
# write ACLs to the containers created by Glance in multi-tenant
# mode. This grants the specified tenants/users read and write access
# to all newly created image objects. The standard swift ACL string
# formats are allowed, including:
# <tenant_id>:<username>
# <tenant_name>:<username>
# *:<username>
# Multiple ACLs can be combined using a comma separated list, for
# example: swift_store_admin_tenants = service:glance,*:admin
#swift_store_admin_tenants =
# The region of the swift endpoint to be used for single tenant. This setting
# is only necessary if the tenant has multiple swift endpoints.
#swift_store_region =
# If set to False, disables SSL layer compression of https swift requests.
# Setting to 'False' may improve performance for images which are already
# in a compressed format, eg qcow2. If set to True, enables SSL layer
# compression (provided it is supported by the target swift proxy).
#swift_store_ssl_compression = True
# The number of times a Swift download will be retried before the
# request fails
#swift_store_retry_get_count = 0
# Bypass SSL verification for Swift
#swift_store_auth_insecure = False
# The path to a CA certificate bundle file to use for SSL verification when
# communicating with Swift.
#swift_store_cacert =
# ============ S3 Store Options =============================
# Address where the S3 authentication service lives
# Valid schemes are 'http://' and 'https://'
# If no scheme specified, default to 'http://'
s3_store_host = s3.amazonaws.com
# User to authenticate against the S3 authentication service
s3_store_access_key = <20-char AWS access key>
# Auth key for the user authenticating against the
# S3 authentication service
s3_store_secret_key = <40-char AWS secret key>
# Container within the account that the account should use
# for storing images in S3. Note that S3 has a flat namespace,
# so you need a unique bucket name for your glance images. An
# easy way to do this is append your AWS access key to "glance".
# S3 buckets in AWS *must* be lowercased, so remember to lowercase
# your AWS access key if you use it in your bucket name below!
s3_store_bucket = <lowercased 20-char aws access key>glance
# Do we create the bucket if it does not exist?
s3_store_create_bucket_on_put = False
# When sending images to S3, the data will first be written to a
# temporary buffer on disk. By default the platform's temporary directory
# will be used. If required, an alternative directory can be specified here.
# s3_store_object_buffer_dir = /path/to/dir
# ============ Cinder Store Options ===========================
# Info to match when looking for cinder in the service catalog
# Format is : separated values of the form:
# <service_type>:<service_name>:<endpoint_type> (string value)
#cinder_catalog_info = volume:cinder:publicURL
# Override service catalog lookup with template for cinder endpoint
# e.g. http://localhost:8776/v1/%(project_id)s (string value)
#cinder_endpoint_template = <None>
# Region name of this node (string value)
#os_region_name = <None>
# Location of ca certicates file to use for cinder client requests
# (string value)
#cinder_ca_certificates_file = <None>
# Number of cinderclient retries on failed http calls (integer value)
#cinder_http_retries = 3
# Allow to perform insecure SSL requests to cinder (boolean value)
#cinder_api_insecure = False
# ============ VMware Datastore Store Options =====================
# ESX/ESXi or vCenter Server target system.
# The server value can be an IP address or a DNS name
# e.g. 127.0.0.1, 127.0.0.1:443, www.vmware-infra.com
#vmware_server_host = <None>
# Server username (string value)
#vmware_server_username = <None>
# Server password (string value)
#vmware_server_password = <None>
# Inventory path to a datacenter (string value)
# Value optional when vmware_server_ip is an ESX/ESXi host: if specified
# should be `ha-datacenter`.
#vmware_datacenter_path = <None>
# Datastore associated with the datacenter (string value)
#vmware_datastore_name = <None>
# The number of times we retry on failures
# e.g., socket error, etc (integer value)
#vmware_api_retry_count = 10
# The interval used for polling remote tasks
# invoked on VMware ESX/VC server in seconds (integer value)
#vmware_task_poll_interval = 5
# Absolute path of the folder containing the images in the datastore
# (string value)
#vmware_store_image_dir = /openstack_glance
# Allow to perform insecure SSL requests to the target system (boolean value)
#vmware_api_insecure = False
# ================= Security Options ==========================
# AES key for encrypting store 'location' metadata, including
# -- if used -- Swift or S3 credentials
# Should be set to a random string of length 16, 24 or 32 bytes
# metadata_encryption_key = <16, 24 or 32 char registry metadata key>
# =============== Policy Options ==============================
[oslo_policy]
# The JSON file that defines policies.
# Deprecated group/name - [DEFAULT]/policy_file
#policy_file = policy.json
# Default rule. Enforced when a requested rule is not found.
# Deprecated group/name - [DEFAULT]/policy_default_rule
#policy_default_rule = default
# Directories where policy configuration files are stored.
# They can be relative to any directory in the search path
# defined by the config_dir option, or absolute paths.
# The file defined by policy_file must exist for these
# directories to be searched.
# Deprecated group/name - [DEFAULT]/policy_dirs
#policy_dirs = policy.d

View File

@ -1,23 +0,0 @@
# Use this pipeline for no auth - DEFAULT
[pipeline:glance-search]
pipeline = unauthenticated-context rootapp
[pipeline:glance-search-keystone]
pipeline = authtoken context rootapp
[composite:rootapp]
paste.composite_factory = glance.api:root_app_factory
/v0.1: apiv0_1app
[app:apiv0_1app]
paste.app_factory = glance.search.api.v0_1.router:API.factory
[filter:unauthenticated-context]
paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory
[filter:authtoken]
paste.filter_factory = keystonemiddleware.auth_token:filter_factory
delay_auth_decision = true
[filter:context]
paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory

View File

@ -1,116 +0,0 @@
[DEFAULT]
# Show more verbose log output (sets INFO log level output)
#verbose = False
# Show debugging output in logs (sets DEBUG log level output)
debug = True
# Address to bind the GRAFFITI server
bind_host = 0.0.0.0
# Port to bind the server to
bind_port = 9393
# Log to this file. Make sure you do not set the same log file for both the API
# and registry servers!
#
# If `log_file` is omitted and `use_syslog` is false, then log messages are
# sent to stdout as a fallback.
log_file = /var/log/daisy/search.log
# Backlog requests when creating socket
backlog = 4096
# TCP_KEEPIDLE value in seconds when creating socket.
# Not supported on OS X.
#tcp_keepidle = 600
# Property Protections config file
# This file contains the rules for property protections and the roles/policies
# associated with it.
# If this config value is not specified, by default, property protections
# won't be enforced.
# If a value is specified and the file is not found, then the glance-api
# service will not start.
#property_protection_file =
# Specify whether 'roles' or 'policies' are used in the
# property_protection_file.
# The default value for property_protection_rule_format is 'roles'.
#property_protection_rule_format = roles
# http_keepalive option. If False, server will return the header
# "Connection: close", If True, server will return "Connection: Keep-Alive"
# in its responses. In order to close the client socket connection
# explicitly after the response is sent and read successfully by the client,
# you simply have to set this option to False when you create a wsgi server.
#http_keepalive = True
# ================= Syslog Options ============================
# Send logs to syslog (/dev/log) instead of to file specified
# by `log_file`
#use_syslog = False
# Facility to use. If unset defaults to LOG_USER.
#syslog_log_facility = LOG_LOCAL0
# ================= SSL Options ===============================
# Certificate file to use when starting API server securely
#cert_file = /path/to/certfile
# Private key file to use when starting API server securely
#key_file = /path/to/keyfile
# CA certificate file to use to verify connecting clients
#ca_file = /path/to/cafile
# =============== Policy Options ==================================
# The JSON file that defines policies.
policy_file = search-policy.json
# Default rule. Enforced when a requested rule is not found.
#policy_default_rule = default
# Directories where policy configuration files are stored.
# They can be relative to any directory in the search path
# defined by the config_dir option, or absolute paths.
# The file defined by policy_file must exist for these
# directories to be searched.
#policy_dirs = policy.d
[paste_deploy]
# Name of the paste configuration file that defines the available pipelines
# config_file = glance-search-paste.ini
# Partial name of a pipeline in your paste configuration file with the
# service name removed. For example, if your paste section name is
# [pipeline:glance-registry-keystone], you would configure the flavor below
# as 'keystone'.
#flavor=
#
[database]
# The SQLAlchemy connection string used to connect to the
# database (string value)
# Deprecated group/name - [DEFAULT]/sql_connection
# Deprecated group/name - [DATABASE]/sql_connection
# Deprecated group/name - [sql]/connection
#connection = <None>
[keystone_authtoken]
identity_uri = http://127.0.0.1:35357
admin_tenant_name = %SERVICE_TENANT_NAME%
admin_user = %SERVICE_USER%
admin_password = %SERVICE_PASSWORD%
revocation_cache_time = 10
# =============== ElasticSearch Options =======================
[elasticsearch]
# List of nodes where Elasticsearch instances are running. A single node
# should be defined as an IP address and port number.
# The default is ['127.0.0.1:9200']
#hosts = ['127.0.0.1:9200']

View File

@ -1,21 +0,0 @@
# glance-swift.conf.sample
#
# This file is an example config file when
# multiple swift accounts/backing stores are enabled.
#
# Specify the reference name in []
# For each section, specify the auth_address, user and key.
#
# WARNING:
# * If any of auth_address, user or key is not specified,
# the glance-api's swift store will fail to configure
[ref1]
user = tenant:user1
key = key1
auth_address = auth123@example.com
[ref2]
user = user2
key = key2
auth_address = http://auth345@example.com

View File

@ -1,4 +0,0 @@
This directory contains predefined namespaces for Glance Metadata Definitions
Catalog. Files from this directory can be loaded into the database using
db_load_metadefs command for glance-manage. Similarly you can unload the
definitions using db_unload_metadefs command.

View File

@ -1,21 +0,0 @@
{
"namespace": "OS::Compute::AggregateDiskFilter",
"display_name": "Disk Allocation per Host",
"description": "Properties related to the Nova scheduler filter AggregateDiskFilter. Filters aggregate hosts based on the available disk space compared to the requested disk space. Hosts in the aggregate with not enough usable disk will be filtered out. The filter must be enabled in the Nova scheduler to use these properties.",
"visibility": "public",
"protected": true,
"resource_type_associations": [
{
"name": "OS::Nova::Aggregate"
}
],
"properties": {
"disk_allocation_ratio": {
"title": "Disk Subscription Ratio",
"description": "Allows the host to be under and over subscribed for the amount of disk space requested for an instance. A ratio greater than 1.0 allows for over subscription (hosts may have less usable disk space than requested). A ratio less than 1.0 allows for under subscription.",
"type": "number",
"readonly": false
}
},
"objects": []
}

View File

@ -1,23 +0,0 @@
{
"namespace": "OS::Compute::AggregateIoOpsFilter",
"display_name": "IO Ops per Host",
"description": "Properties related to the Nova scheduler filter AggregateIoOpsFilter. Filters aggregate hosts based on the number of instances currently changing state. Hosts in the aggregate with too many instances changing state will be filtered out. The filter must be enabled in the Nova scheduler to use these properties.",
"visibility": "public",
"protected": true,
"resource_type_associations": [
{
"name": "OS::Nova::Aggregate"
}
],
"properties": {
"max_io_ops_per_host": {
"title": "Maximum IO Operations per Host",
"description": "Prevents hosts in the aggregate that have this many or more instances currently in build, resize, snapshot, migrate, rescue or unshelve to be scheduled for new instances.",
"type": "integer",
"readonly": false,
"default": 8,
"minimum": 1
}
},
"objects": []
}

View File

@ -1,21 +0,0 @@
{
"namespace": "OS::Compute::AggregateNumInstancesFilter",
"display_name": "Instances per Host",
"description": "Properties related to the Nova scheduler filter AggregateNumInstancesFilter. Filters aggregate hosts by the number of running instances on it. Hosts in the aggregate with too many instances will be filtered out. The filter must be enabled in the Nova scheduler to use these properties.", "visibility": "public",
"protected": false,
"resource_type_associations": [
{
"name": "OS::Nova::Aggregate"
}
],
"properties": {
"max_instances_per_host": {
"title": "Max Instances Per Host",
"description": "Maximum number of instances allowed to run on a host in the aggregate.",
"type": "integer",
"readonly": false,
"minimum": 0
}
},
"objects": []
}

View File

@ -1,21 +0,0 @@
{
"namespace": "OS::Compute::GuestShutdownBehavior",
"display_name": "Shutdown Behavior",
"description": "These properties allow modifying the shutdown behavior for stop, rescue, resize, and shelve operations.",
"visibility": "public",
"protected": true,
"resource_type_associations": [
{
"name": "OS::Glance::Image"
}
],
"properties": {
"os_shutdown_timeout": {
"title": "Shutdown timeout",
"description": "By default, guests will be given 60 seconds to perform a graceful shutdown. After that, the VM is powered off. This property allows overriding the amount of time (unit: seconds) to allow a guest OS to cleanly shut down before power off. A value of 0 (zero) means the guest will be powered off immediately with no opportunity for guest OS clean-up.",
"type": "integer",
"minimum": 0
}
},
"objects": []
}

View File

@ -1,185 +0,0 @@
{
"namespace": "OS::Compute::HostCapabilities",
"display_name": "Compute Host Capabilities",
"description": "Hardware capabilities provided by the compute host. This provides the ability to fine tune the hardware specification required when an instance is requested. The ComputeCapabilitiesFilter should be enabled in the Nova scheduler to use these properties. When enabled, this filter checks that the capabilities provided by the compute host satisfy any extra specifications requested. Only hosts that can provide the requested capabilities will be eligible for hosting the instance.",
"visibility": "public",
"protected": true,
"resource_type_associations": [
{
"name": "OS::Nova::Flavor",
"prefix": "capabilities:"
},
{
"name": "OS::Nova::Aggregate",
"prefix": "aggregate_instance_extra_specs:"
}
],
"properties": {
"cpu_info:vendor": {
"title": "Vendor",
"description": "Specifies the CPU manufacturer.",
"operators": ["<or>"],
"type": "string",
"enum": [
"Intel",
"AMD"
]
},
"cpu_info:model": {
"title": "Model",
"description": "Specifies the CPU model. Use this property to ensure that your vm runs on a a specific cpu model.",
"operators": ["<or>"],
"type": "string",
"enum": [
"Conroe",
"Core2Duo",
"Penryn",
"Nehalem",
"Westmere",
"SandyBridge",
"IvyBridge",
"Haswell",
"Broadwell",
"Delhi",
"Seoul",
"Abu Dhabi",
"Interlagos",
"Kabini",
"Valencia",
"Zurich",
"Budapest",
"Barcelona",
"Suzuka",
"Shanghai",
"Istanbul",
"Lisbon",
"Magny-Cours",
"Valencia",
"Cortex-A57",
"Cortex-A53",
"Cortex-A12",
"Cortex-A17",
"Cortex-A15",
"Coretx-A7",
"X-Gene"
]
},
"cpu_info:arch": {
"title": "Architecture",
"description": "Specifies the CPU architecture. Use this property to specify the architecture supported by the hypervisor.",
"operators": ["<or>"],
"type": "string",
"enum": [
"x86",
"x86_64",
"i686",
"ia64",
"ARMv8-A",
"ARMv7-A"
]
},
"cpu_info:topology:cores": {
"title": "cores",
"description": "Number of cores.",
"type": "integer",
"readonly": false,
"default": 1
},
"cpu_info:topology:threads": {
"title": "threads",
"description": "Number of threads.",
"type": "integer",
"readonly": false,
"default": 1
},
"cpu_info:topology:sockets": {
"title": "sockets",
"description": "Number of sockets.",
"type": "integer",
"readonly": false,
"default": 1
},
"cpu_info:features": {
"title": "Features",
"description": "Specifies CPU flags/features. Using this property you can specify the required set of instructions supported by a vm.",
"operators": ["<or>", "<all-in>"],
"type": "array",
"items": {
"type": "string",
"enum": [
"aes",
"vme",
"de",
"pse",
"tsc",
"msr",
"pae",
"mce",
"cx8",
"apic",
"sep",
"mtrr",
"pge",
"mca",
"cmov",
"pat",
"pse36",
"clflush",
"dts",
"acpi",
"mmx",
"fxsr",
"sse",
"sse2",
"ss",
"ht",
"tm",
"ia64",
"pbe",
"rdtscp",
"pni",
"pclmulqdq",
"dtes64",
"monitor",
"ds_cpl",
"vmx",
"smx",
"est",
"tm2",
"ssse3",
"cid",
"fma",
"cx16",
"xtpr",
"pdcm",
"pcid",
"dca",
"sse4_1",
"sse4_2",
"x2apic",
"movbe",
"popcnt",
"tsc_deadline_timer",
"xsave",
"avx",
"f16c",
"rdrand",
"fsgsbase",
"bmi1",
"hle",
"avx2",
"smep",
"bmi2",
"erms",
"invpcid",
"rtm",
"mpx",
"rdseed",
"adx",
"smap"
]
}
}
},
"objects": []
}

View File

@ -1,41 +0,0 @@
{
"namespace": "OS::Compute::Hypervisor",
"display_name": "Hypervisor Selection",
"description": "OpenStack Compute supports many hypervisors, although most installations use only one hypervisor. For installations with multiple supported hypervisors, you can schedule different hypervisors using the ImagePropertiesFilter. This filters compute nodes that satisfy any architecture, hypervisor type, or virtual machine mode properties specified on the instance's image properties.",
"visibility": "public",
"protected": true,
"resource_type_associations": [
{
"name": "OS::Glance::Image"
}
],
"properties": {
"hypervisor_type": {
"title": "Hypervisor Type",
"description": "Hypervisor type required by the image. Used with the ImagePropertiesFilter. \n\n KVM - Kernel-based Virtual Machine. LXC - Linux Containers (through libvirt). QEMU - Quick EMUlator. UML - User Mode Linux. hyperv - Microsoft® hyperv. vmware - VMware® vsphere. Baremetal - physical provisioning. For more information, see: http://docs.openstack.org/trunk/config-reference/content/section_compute-hypervisors.html",
"type": "string",
"enum": [
"baremetal",
"hyperv",
"kvm",
"lxc",
"qemu",
"uml",
"vmware",
"xen"
]
},
"vm_mode": {
"title": "VM Mode",
"description": "The virtual machine mode. This represents the host/guest ABI (application binary interface) used for the virtual machine. Used with the ImagePropertiesFilter. \n\n hvm — Fully virtualized - This is the virtual machine mode (vm_mode) used by QEMU and KVM. \n\n xen - Xen 3.0 paravirtualized. \n\n uml — User Mode Linux paravirtualized. \n\n exe — Executables in containers. This is the mode used by LXC.",
"type": "string",
"enum": [
"hvm",
"xen",
"uml",
"exe"
]
}
},
"objects": []
}

View File

@ -1,36 +0,0 @@
{
"namespace": "OS::Compute::InstanceData",
"display_name": "Instance Config Data",
"description": "Instances can perform self-configuration based on data made available to the running instance. These properties affect instance configuration.",
"visibility": "public",
"protected": true,
"resource_type_associations": [
{
"name": "OS::Glance::Image"
},
{
"name": "OS::Cinder::Volume",
"properties_target": "image"
}
],
"properties": {
"img_config_drive": {
"title": "Config Drive",
"description": "This property specifies whether or not Nova should use a config drive when booting the image. Mandatory means that Nova will always use a config drive when booting the image. OpenStack can be configured to write metadata to a special configuration drive that will be attached to the instance when it boots. The instance can retrieve any information from the config drive. One use case for the config drive is to pass network configuration information to the instance. See also: http://docs.openstack.org/user-guide/content/config-drive.html",
"type": "string",
"enum": [
"optional",
"mandatory"
]
},
"os_require_quiesce": {
"title": "Require Quiescent File system",
"description": "This property specifies whether or not the filesystem must be quiesced during snapshot processing. For volume backed and image backed snapshots, yes means that snapshotting is aborted when quiescing fails, whereas, no means quiescing will be skipped and snapshot processing will continue after the quiesce failure.",
"type": "string",
"enum": [
"yes",
"no"
]
}
}
}

View File

@ -1,89 +0,0 @@
{
"namespace": "OS::Compute::LibvirtImage",
"display_name": "libvirt Driver Options for Images",
"description": "The libvirt Compute Driver Options for Glance Images. \n\nThese are properties specific to compute drivers. For a list of all hypervisors, see here: https://wiki.openstack.org/wiki/HypervisorSupportMatrix.",
"visibility": "public",
"protected": true,
"resource_type_associations": [
{
"name": "OS::Glance::Image"
}
],
"properties": {
"hw_disk_bus": {
"title": "Disk Bus",
"description": "Specifies the type of disk controller to attach disk devices to.",
"type": "string",
"enum": [
"scsi",
"virtio",
"uml",
"xen",
"ide",
"usb"
]
},
"hw_rng_model": {
"title": "Random Number Generator Device",
"description": "Adds a random-number generator device to the image's instances. The cloud administrator can enable and control device behavior by configuring the instance's flavor. By default: The generator device is disabled. /dev/random is used as the default entropy source. To specify a physical HW RNG device, use the following option in the nova.conf file: rng_dev_path=/dev/hwrng",
"type": "string",
"default": "virtio"
},
"hw_machine_type": {
"title": "Machine Type",
"description": "Enables booting an ARM system using the specified machine type. By default, if an ARM image is used and its type is not specified, Compute uses vexpress-a15 (for ARMv7) or virt (for AArch64) machine types. Valid types can be viewed by using the virsh capabilities command (machine types are displayed in the machine tag).",
"type": "string"
},
"hw_scsi_model": {
"title": "SCSI Model",
"description": "Enables the use of VirtIO SCSI (virtio-scsi) to provide block device access for compute instances; by default, instances use VirtIO Block (virtio-blk). VirtIO SCSI is a para-virtualized SCSI controller device that provides improved scalability and performance, and supports advanced SCSI hardware.",
"type": "string",
"default": "virtio-scsi"
},
"hw_video_model": {
"title": "Video Model",
"description": "The video image driver used.",
"type": "string",
"enum": [
"vga",
"cirrus",
"vmvga",
"xen",
"qxl"
]
},
"hw_video_ram": {
"title": "Max Video Ram",
"description": "Maximum RAM (unit: MB) for the video image. Used only if a hw_video:ram_max_mb value has been set in the flavor's extra_specs and that value is higher than the value set in hw_video_ram.",
"type": "integer",
"minimum": 0
},
"os_command_line": {
"title": "Kernel Command Line",
"description": "The kernel command line to be used by the libvirt driver, instead of the default. For linux containers (LXC), the value is used as arguments for initialization. This key is valid only for Amazon kernel, ramdisk, or machine images (aki, ari, or ami).",
"type": "string"
},
"hw_vif_model": {
"title": "Virtual Network Interface",
"description": "Specifies the model of virtual network interface device to use. The valid options depend on the hypervisor configuration. libvirt driver options: KVM and QEMU: e1000, ne2k_pci, pcnet, rtl8139, spapr-vlan, and virtio. Xen: e1000, netfront, ne2k_pci, pcnet, and rtl8139.",
"type": "string",
"enum": [
"e1000",
"e1000e",
"ne2k_pci",
"netfront",
"pcnet",
"rtl8139",
"spapr-vlan",
"virtio"
]
},
"hw_qemu_guest_agent": {
"title": "QEMU Guest Agent",
"description": "This is a background process which helps management applications execute guest OS level commands. For example, freezing and thawing filesystems, entering suspend. However, guest agent (GA) is not bullet proof, and hostile guest OS can send spurious replies.",
"type": "string",
"enum": ["yes", "no"]
}
},
"objects": []
}

View File

@ -1,32 +0,0 @@
{
"namespace": "OS::Compute::Libvirt",
"display_name": "libvirt Driver Options",
"description": "The libvirt compute driver options. \n\nThese are properties that affect the libvirt compute driver and may be specified on flavors and images. For a list of all hypervisors, see here: https://wiki.openstack.org/wiki/HypervisorSupportMatrix.",
"visibility": "public",
"protected": true,
"resource_type_associations": [
{
"name": "OS::Glance::Image",
"prefix": "hw_"
},
{
"name": "OS::Nova::Flavor",
"prefix": "hw:"
}
],
"properties": {
"serial_port_count": {
"title": "Serial Port Count",
"description": "Specifies the count of serial ports that should be provided. If hw:serial_port_count is not set in the flavor's extra_specs, then any count is permitted. If hw:serial_port_count is set, then this provides the default serial port count. It is permitted to override the default serial port count, but only with a lower value.",
"type": "integer",
"minimum": 0
},
"boot_menu": {
"title": "Boot Menu",
"description": "If true, enables the BIOS bootmenu. In cases where both the image metadata and Extra Spec are set, the Extra Spec setting is used. This allows for flexibility in setting/overriding the default behavior as needed.",
"type": "string",
"enum": ["true", "false"]
}
},
"objects": []
}

View File

@ -1,109 +0,0 @@
{
"namespace": "OS::Compute::Quota",
"display_name": "Flavor Quota",
"description": "Compute drivers may enable quotas on CPUs available to a VM, disk tuning, bandwidth I/O, and instance VIF traffic control. See: http://docs.openstack.org/admin-guide-cloud/content/customize-flavors.html",
"visibility": "public",
"protected": true,
"resource_type_associations": [
{
"name": "OS::Nova::Flavor"
}
],
"objects": [
{
"name": "CPU Limits",
"description": "You can configure the CPU limits with control parameters.",
"properties": {
"quota:cpu_shares": {
"title": "Quota: CPU Shares",
"description": "Specifies the proportional weighted share for the domain. If this element is omitted, the service defaults to the OS provided defaults. There is no unit for the value; it is a relative measure based on the setting of other VMs. For example, a VM configured with value 2048 gets twice as much CPU time as a VM configured with value 1024.",
"type": "integer"
},
"quota:cpu_period": {
"title": "Quota: CPU Period",
"description": "Specifies the enforcement interval (unit: microseconds) for QEMU and LXC hypervisors. Within a period, each VCPU of the domain is not allowed to consume more than the quota worth of runtime. The value should be in range [1000, 1000000]. A period with value 0 means no value.",
"type": "integer",
"minimum": 1000,
"maximum": 1000000
},
"quota:cpu_quota": {
"title": "Quota: CPU Quota",
"description": "Specifies the maximum allowed bandwidth (unit: microseconds). A domain with a negative-value quota indicates that the domain has infinite bandwidth, which means that it is not bandwidth controlled. The value should be in range [1000, 18446744073709551] or less than 0. A quota with value 0 means no value. You can use this feature to ensure that all vCPUs run at the same speed.",
"type": "integer"
}
}
},
{
"name": "Disk QoS",
"description": "Using disk I/O quotas, you can set maximum disk write to 10 MB per second for a VM user.",
"properties": {
"quota:disk_read_bytes_sec": {
"title": "Quota: Disk read bytes / sec",
"description": "Sets disk I/O quota for disk read bytes / sec.",
"type": "integer"
},
"quota:disk_read_iops_sec": {
"title": "Quota: Disk read IOPS / sec",
"description": "Sets disk I/O quota for disk read IOPS / sec.",
"type": "integer"
},
"quota:disk_write_bytes_sec": {
"title": "Quota: Disk Write Bytes / sec",
"description": "Sets disk I/O quota for disk write bytes / sec.",
"type": "integer"
},
"quota:disk_write_iops_sec": {
"title": "Quota: Disk Write IOPS / sec",
"description": "Sets disk I/O quota for disk write IOPS / sec.",
"type": "integer"
},
"quota:disk_total_bytes_sec": {
"title": "Quota: Disk Total Bytes / sec",
"description": "Sets disk I/O quota for total disk bytes / sec.",
"type": "integer"
},
"quota:disk_total_iops_sec": {
"title": "Quota: Disk Total IOPS / sec",
"description": "Sets disk I/O quota for disk total IOPS / sec.",
"type": "integer"
}
}
},
{
"name": "Virtual Interface QoS",
"description": "Bandwidth QoS tuning for instance virtual interfaces (VIFs) may be specified with these properties. Incoming and outgoing traffic can be shaped independently. If not specified, no quality of service (QoS) is applied on that traffic direction. So, if you want to shape only the network's incoming traffic, use inbound only (and vice versa). The OpenStack Networking service abstracts the physical implementation of the network, allowing plugins to configure and manage physical resources. Virtual Interfaces (VIF) in the logical model are analogous to physical network interface cards (NICs). VIFs are typically owned a managed by an external service; for instance when OpenStack Networking is used for building OpenStack networks, VIFs would be created, owned, and managed in Nova. VIFs are connected to OpenStack Networking networks via ports. A port is analogous to a port on a network switch, and it has an administrative state. When a VIF is attached to a port the OpenStack Networking API creates an attachment object, which specifies the fact that a VIF with a given identifier is plugged into the port.",
"properties": {
"quota:vif_inbound_average": {
"title": "Quota: VIF Inbound Average",
"description": "Network Virtual Interface (VIF) inbound average in kilobytes per second. Specifies average bit rate on the interface being shaped.",
"type": "integer"
},
"quota:vif_inbound_burst": {
"title": "Quota: VIF Inbound Burst",
"description": "Network Virtual Interface (VIF) inbound burst in total kilobytes. Specifies the amount of bytes that can be burst at peak speed.",
"type": "integer"
},
"quota:vif_inbound_peak": {
"title": "Quota: VIF Inbound Peak",
"description": "Network Virtual Interface (VIF) inbound peak in kilobytes per second. Specifies maximum rate at which an interface can receive data.",
"type": "integer"
},
"quota:vif_outbound_average": {
"title": "Quota: VIF Outbound Average",
"description": "Network Virtual Interface (VIF) outbound average in kilobytes per second. Specifies average bit rate on the interface being shaped.",
"type": "integer"
},
"quota:vif_outbound_burst": {
"title": "Quota: VIF Outbound Burst",
"description": "Network Virtual Interface (VIF) outbound burst in total kilobytes. Specifies the amount of bytes that can be burst at peak speed.",
"type": "integer"
},
"quota:vif_outbound_peak": {
"title": "Quota: VIF Outbound Burst",
"description": "Network Virtual Interface (VIF) outbound peak in kilobytes per second. Specifies maximum rate at which an interface can send data.",
"type": "integer"
}
}
}
]
}

View File

@ -1,29 +0,0 @@
{
"namespace": "OS::Compute::RandomNumberGenerator",
"display_name": "Random Number Generator",
"description": "If a random-number generator device has been added to the instance through its image properties, the device can be enabled and configured.",
"visibility": "public",
"protected": true,
"resource_type_associations": [
{
"name": "OS::Nova::Flavor"
}
],
"properties": {
"hw_rng:allowed": {
"title": "Random Number Generator Allowed",
"description": "",
"type": "boolean"
},
"hw_rng:rate_bytes": {
"title": "Random number generator limits.",
"description": "Allowed amount of bytes that the guest can read from the host's entropy per period.",
"type": "integer"
},
"hw_rng:rate_period": {
"title": "Random number generator read period.",
"description": "Duration of the read period in seconds.",
"type": "integer"
}
}
}

View File

@ -1,24 +0,0 @@
{
"namespace": "OS::Compute::Trust",
"display_name": "Trusted Compute Pools (Intel® TXT)",
"description": "Trusted compute pools with Intel® Trusted Execution Technology (Intel® TXT) support IT compliance by protecting virtualized data centers - private, public, and hybrid clouds against attacks toward hypervisor and BIOS, firmware, and other pre-launch software components. The Nova trust scheduling filter must be enabled and configured with the trust attestation service in order to use this feature.",
"visibility": "public",
"protected": true,
"resource_type_associations": [
{
"name": "OS::Nova::Flavor"
}
],
"properties": {
"trust:trusted_host": {
"title": "Intel® TXT attestation",
"description": "Select to ensure that node has been attested by Intel® Trusted Execution Technology (Intel® TXT). The Nova trust scheduling filter must be enabled and configured with the trust attestation service in order to use this feature.",
"type": "string",
"enum": [
"trusted",
"untrusted",
"unknown"
]
}
}
}

View File

@ -1,54 +0,0 @@
{
"namespace": "OS::Compute::VirtCPUTopology",
"display_name": "Virtual CPU Topology",
"description": "This provides the preferred socket/core/thread counts for the virtual CPU instance exposed to guests. This enables the ability to avoid hitting limitations on vCPU topologies that OS vendors place on their products. See also: http://git.openstack.org/cgit/openstack/nova-specs/tree/specs/juno/virt-driver-vcpu-topology.rst",
"visibility": "public",
"protected": true,
"resource_type_associations": [
{
"name": "OS::Glance::Image",
"prefix": "hw_"
},
{
"name": "OS::Cinder::Volume",
"prefix": "hw_",
"properties_target": "image"
},
{
"name": "OS::Nova::Flavor",
"prefix": "hw:"
}
],
"properties": {
"cpu_sockets": {
"title": "vCPU Sockets",
"description": "Preferred number of sockets to expose to the guest.",
"type": "integer"
},
"cpu_cores": {
"title": "vCPU Cores",
"description": "Preferred number of cores to expose to the guest.",
"type": "integer"
},
"cpu_threads": {
"title": " vCPU Threads",
"description": "Preferred number of threads to expose to the guest.",
"type": "integer"
},
"cpu_maxsockets": {
"title": "Max vCPU Sockets",
"description": "Maximum number of sockets to expose to the guest.",
"type": "integer"
},
"cpu_maxcores": {
"title": "Max vCPU Cores",
"description": "Maximum number of cores to expose to the guest.",
"type": "integer"
},
"cpu_maxthreads": {
"title": "Max vCPU Threads",
"description": "Maximum number of threads to expose to the guest.",
"type": "integer"
}
}
}

View File

@ -1,19 +0,0 @@
{
"namespace": "OS::Compute::VMwareFlavor",
"display_name": "VMware Driver Options for Flavors",
"description": "VMware Driver Options for Flavors may be used to customize and manage Nova Flavors. These are properties specific to VMWare compute drivers and will only have an effect if the VMWare compute driver is enabled in Nova. See: http://docs.openstack.org/admin-guide-cloud/content/customize-flavors.html",
"visibility": "public",
"protected": true,
"resource_type_associations": [
{
"name": "OS::Nova::Flavor"
}
],
"properties": {
"vmware:hw_version": {
"title": "VMware Hardware Version",
"description": "Specifies the hardware version VMware uses to create images. If the hardware version needs to be compatible with a cluster version, for backward compatibility or other circumstances, the vmware:hw_version key specifies a virtual machine hardware version. In the event that a cluster has mixed host version types, the key will enable the VC to place the cluster on the correct host.",
"type": "string"
}
}
}

View File

@ -1,26 +0,0 @@
{
"namespace": "OS::Compute::VMwareQuotaFlavor",
"display_name": "VMware Quota for Flavors",
"description": "The VMware compute driver allows various compute quotas to be specified on flavors. When specified, the VMWare driver will ensure that the quota is enforced. These are properties specific to VMWare compute drivers and will only have an effect if the VMWare compute driver is enabled in Nova. For a list of hypervisors, see: https://wiki.openstack.org/wiki/HypervisorSupportMatrix. For flavor customization, see: http://docs.openstack.org/admin-guide-cloud/content/customize-flavors.html",
"visibility": "public",
"protected": true,
"resource_type_associations": [
{
"name": "OS::Nova::Flavor"
}
],
"properties": {
"quota:cpu_limit": {
"title": "Quota: CPU Limit",
"description": "Specifies the upper limit for CPU allocation in MHz. This parameter ensures that a machine never uses more than the defined amount of CPU time. It can be used to enforce a limit on the machine's CPU performance. The value should be a numerical value in MHz. If zero is supplied then the cpu_limit is unlimited.",
"type": "integer",
"minimum": 0
},
"quota:cpu_reservation": {
"title": "Quota: CPU Reservation Limit",
"description": "Specifies the guaranteed minimum CPU reservation in MHz. This means that if needed, the machine will definitely get allocated the reserved amount of CPU cycles. The value should be a numerical value in MHz.",
"type": "integer",
"minimum": 0
}
}
}

View File

@ -1,60 +0,0 @@
{
"namespace": "OS::Compute::VMware",
"display_name": "VMware Driver Options",
"description": "The VMware compute driver options. \n\nThese are properties specific to VMWare compute drivers and will only have an effect if the VMWare compute driver is enabled in Nova. For a list of all hypervisors, see here: https://wiki.openstack.org/wiki/HypervisorSupportMatrix.",
"visibility": "public",
"protected": true,
"resource_type_associations": [
{
"name": "OS::Glance::Image"
}
],
"properties": {
"vmware_adaptertype": {
"title": "Disk Adapter Type",
"description": "The virtual SCSI or IDE controller used by the hypervisor.",
"type": "string",
"enum": [
"lsiLogic",
"lsiLogicsas",
"paraVirtual",
"busLogic",
"ide"
],
"default" : "lsiLogic"
},
"vmware_disktype": {
"title": "Disk Provisioning Type",
"description": "When performing operations such as creating a virtual disk, cloning, or migrating, the disk provisioning type may be specified. Please refer to VMware documentation for more.",
"type": "string",
"enum": [
"streamOptimized",
"sparse",
"preallocated"
],
"default" : "preallocated"
},
"vmware_ostype": {
"title": "OS Type",
"description": "A VMware GuestID which describes the operating system installed in the image. This value is passed to the hypervisor when creating a virtual machine. If not specified, the key defaults to otherGuest. See thinkvirt.com.",
"type": "string",
"default": "otherGuest"
},
"hw_vif_model": {
"title": "Virtual Network Interface",
"description": "Specifies the model of virtual network interface device to use. The valid options depend on the hypervisor. VMware driver supported options: e1000, e1000e, VirtualE1000, VirtualE1000e, VirtualPCNet32, VirtualSriovEthernetCard, and VirtualVmxnet.",
"type": "string",
"enum": [
"e1000",
"e1000e",
"VirtualE1000",
"VirtualE1000e",
"VirtualPCNet32",
"VirtualSriovEthernetCard",
"VirtualVmxnet"
],
"default" : "e1000"
}
},
"objects": []
}

View File

@ -1,33 +0,0 @@
{
"namespace": "OS::Compute::Watchdog",
"display_name": "Watchdog Behavior",
"description": "Compute drivers may enable watchdog behavior over instances. See: http://docs.openstack.org/admin-guide-cloud/content/customize-flavors.html",
"visibility": "public",
"protected": true,
"resource_type_associations": [
{
"name": "OS::Glance::Image"
},
{
"name": "OS::Cinder::Volume",
"properties_target": "image"
},
{
"name": "OS::Nova::Flavor"
}
],
"properties": {
"hw_watchdog_action": {
"title": "Watchdog Action",
"description": "For the libvirt driver, you can enable and set the behavior of a virtual hardware watchdog device for each flavor. Watchdog devices keep an eye on the guest server, and carry out the configured action, if the server hangs. The watchdog uses the i6300esb device (emulating a PCI Intel 6300ESB). If hw_watchdog_action is not specified, the watchdog is disabled. Watchdog behavior set using a specific image's properties will override behavior set using flavors.",
"type": "string",
"enum": [
"disabled",
"reset",
"poweroff",
"pause",
"none"
]
}
}
}

View File

@ -1,29 +0,0 @@
{
"namespace": "OS::Compute::XenAPI",
"display_name": "XenAPI Driver Options",
"description": "The XenAPI compute driver options. \n\nThese are properties specific to compute drivers. For a list of all hypervisors, see here: https://wiki.openstack.org/wiki/HypervisorSupportMatrix.",
"visibility": "public",
"protected": true,
"resource_type_associations": [
{
"name": "OS::Glance::Image"
}
],
"properties": {
"os_type": {
"title": "OS Type",
"description": "The operating system installed on the image. The XenAPI driver contains logic that takes different actions depending on the value of the os_type parameter of the image. For example, for os_type=windows images, it creates a FAT32-based swap partition instead of a Linux swap partition, and it limits the injected host name to less than 16 characters.",
"type": "string",
"enum": [
"linux",
"windows"
]
},
"auto_disk_config": {
"title": "Disk Adapter Type",
"description": "If true, the root partition on the disk is automatically resized before the instance boots. This value is only taken into account by the Compute service when using a Xen-based hypervisor with the XenAPI driver. The Compute service will only attempt to resize if there is a single partition on the image, and only if the partition is in ext3 or ext4 format.",
"type": "boolean"
}
},
"objects": []
}

View File

@ -1,42 +0,0 @@
{
"display_name": "Common Image Properties",
"namespace": "OS::Glance::CommonImageProperties",
"description": "When adding an image to Glance, you may specify some common image properties that may prove useful to consumers of your image.",
"protected": true,
"resource_type_associations" : [
],
"properties": {
"kernel_id": {
"title": "Kernel ID",
"type": "string",
"pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$",
"description": "ID of image stored in Glance that should be used as the kernel when booting an AMI-style image."
},
"ramdisk_id": {
"title": "Ramdisk ID",
"type": "string",
"pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$",
"description": "ID of image stored in Glance that should be used as the ramdisk when booting an AMI-style image."
},
"instance_uuid": {
"title": "Instance ID",
"type": "string",
"description": "ID of instance used to create this image."
},
"architecture": {
"title": "CPU Architecture",
"description": "The CPU architecture that must be supported by the hypervisor. For example, x86_64, arm, or ppc64. Run uname -m to get the architecture of a machine. We strongly recommend using the architecture data vocabulary defined by the libosinfo project for this purpose.",
"type": "string"
},
"os_distro": {
"title": "OS Distro",
"description": "The common name of the operating system distribution in lowercase (uses the same data vocabulary as the libosinfo project). Specify only a recognized value for this field. Deprecated values are listed to assist you in searching for the recognized value.",
"type": "string"
},
"os_version": {
"title": "OS Version",
"description": "Operating system version as specified by the distributor. (for example, '11.10')",
"type": "string"
}
}
}

View File

@ -1,27 +0,0 @@
{
"display_name": "Common Operating System Properties",
"namespace": "OS::OperatingSystem",
"description": "When adding an image to Glance, you may specify some common image properties that may prove useful to consumers of your image.",
"protected": true,
"resource_type_associations" : [
{
"name": "OS::Glance::Image"
},
{
"name": "OS::Cinder::Volume",
"properties_target": "image"
}
],
"properties": {
"os_distro": {
"title": "OS Distro",
"description": "The common name of the operating system distribution in lowercase (uses the same data vocabulary as the libosinfo project). Specify only a recognized value for this field. Deprecated values are listed to assist you in searching for the recognized value.",
"type": "string"
},
"os_version": {
"title": "OS Version",
"description": "Operating system version as specified by the distributor. (for example, '11.10')",
"type": "string"
}
}
}

View File

@ -1,333 +0,0 @@
{
"namespace": "OS::Software::DBMS",
"display_name": "Database Software",
"description": "A database is an organized collection of data. The data is typically organized to model aspects of reality in a way that supports processes requiring information. Database management systems are computer software applications that interact with the user, other applications, and the database itself to capture and analyze data. (http://en.wikipedia.org/wiki/Database)",
"visibility": "public",
"protected": true,
"resource_type_associations": [
{
"name": "OS::Glance::Image"
},
{
"name": "OS::Cinder::Volume",
"properties_target": "image"
},
{
"name": "OS::Nova::Instance"
},
{
"name": "OS::Trove::Instance"
}
],
"objects": [
{
"name": "MySQL",
"description": "MySQL is an object-relational database management system (ORDBMS). The MySQL development project has made its source code available under the terms of the GNU General Public License, as well as under a variety of proprietary agreements. MySQL was owned and sponsored by a single for-profit firm, the Swedish company MySQL AB, now owned by Oracle Corporation. MySQL is a popular choice of database for use in web applications, and is a central component of the widely used LAMP open source web application software stack (and other 'AMP' stacks). (http://en.wikipedia.org/wiki/MySQL)",
"properties": {
"sw_database_mysql_version": {
"title": "Version",
"description": "The specific version of MySQL.",
"type": "string"
},
"sw_database_mysql_listen_port": {
"title": "Listen Port",
"description": "The configured TCP/IP port which MySQL listens for incoming connections.",
"type": "integer",
"minimum": 1,
"maximum": 65535,
"default": 3606
},
"sw_database_mysql_admin": {
"title": "Admin User",
"description": "The primary user with privileges to perform administrative operations.",
"type": "string",
"default": "root"
}
}
},
{
"name": "PostgreSQL",
"description": "PostgreSQL, often simply 'Postgres', is an object-relational database management system (ORDBMS) with an emphasis on extensibility and standards-compliance. PostgreSQL is cross-platform and runs on many operating systems. (http://en.wikipedia.org/wiki/PostgreSQL)",
"properties": {
"sw_database_postgresql_version": {
"title": "Version",
"description": "The specific version of PostgreSQL.",
"type": "string"
},
"sw_database_postgresql_listen_port": {
"title": "Listen Port",
"description": "Specifies the TCP/IP port or local Unix domain socket file extension on which PostgreSQL is to listen for connections from client applications.",
"type": "integer",
"minimum": 1,
"maximum": 65535,
"default": 5432
},
"sw_database_postgresql_admin": {
"title": "Admin User",
"description": "The primary user with privileges to perform administrative operations.",
"type": "string",
"default": "postgres"
}
}
},
{
"name": "SQL Server",
"description": "Microsoft SQL Server is a relational database management system developed by Microsoft. There are at least a dozen different editions of Microsoft SQL Server aimed at different audiences and for workloads ranging from small single-machine applications to large Internet-facing applications with many concurrent users. Its primary query languages are T-SQL and ANSI SQL. (http://en.wikipedia.org/wiki/Microsoft_SQL_Server)",
"properties": {
"sw_database_sqlserver_version": {
"title": "Version",
"description": "The specific version of Microsoft SQL Server.",
"type": "string"
},
"sw_database_sqlserver_edition": {
"title": "Edition",
"description": "SQL Server is available in multiple editions, with different feature sets and targeting different users.",
"type": "string",
"default": "Express",
"enum": [
"Datacenter",
"Enterprise",
"Standard",
"Web",
"Business Intelligence",
"Workgroup",
"Express",
"Compact (SQL CE)",
"Developer",
"Embedded (SSEE)",
"Express",
"Fast Track",
"LocalDB",
"Parallel Data Warehouse (PDW)",
"Business Intelligence",
"Datawarehouse Appliance Edition"
]
},
"sw_database_sqlserver_listen_port": {
"title": "Listen Port",
"description": "Specifies the TCP/IP port or local Unix domain socket file extension on which SQL Server is to listen for connections from client applications. The default SQL Server port is 1433, and client ports are assigned a random value between 1024 and 5000.",
"type": "integer",
"minimum": 1,
"maximum": 65535,
"default": 1433
},
"sw_database_postsqlserver_admin": {
"title": "Admin User",
"description": "The primary user with privileges to perform administrative operations.",
"type": "string",
"default": "sa"
}
}
},
{
"name": "Oracle",
"description": "Oracle Database (commonly referred to as Oracle RDBMS or simply as Oracle) is an object-relational database management system produced and marketed by Oracle Corporation. (http://en.wikipedia.org/wiki/Oracle_Database)",
"properties": {
"sw_database_oracle_version": {
"title": "Version",
"description": "The specific version of Oracle.",
"type": "string"
},
"sw_database_oracle_edition": {
"title": "Edition",
"description": "Over and above the different versions of the Oracle database management software developed over time, Oracle Corporation subdivides its product into varying editions.",
"type": "string",
"default": "Express",
"enum": [
"Enterprise",
"Standard",
"Standard Edition One",
"Express (XE)",
"Workgroup",
"Lite"
]
},
"sw_database_oracle_listen_port": {
"title": "Listen Port",
"description": "Specifies the TCP/IP port or local Unix domain socket file extension on which Oracle is to listen for connections from client applications.",
"type": "integer",
"minimum": 1,
"maximum": 65535,
"default": 1521
}
}
},
{
"name": "DB2",
"description": "IBM DB2 is a family of database server products developed by IBM. These products all support the relational model, but in recent years some products have been extended to support object-relational features and non-relational structures, in particular XML. (http://en.wikipedia.org/wiki/IBM_DB2)",
"properties": {
"sw_database_db2_version": {
"title": "Version",
"description": "The specific version of DB2.",
"type": "string"
},
"sw_database_db2_port": {
"title": "Listen Port",
"description": "Specifies the TCP/IP port or local Unix domain socket file extension on which DB2 is to listen for connections from client applications.",
"type": "integer",
"minimum": 1,
"maximum": 65535,
"default": 5432
},
"sw_database_db2_admin": {
"title": "Admin User",
"description": "The primary user with privileges to perform administrative operations.",
"type": "string"
}
}
},
{
"name": "MongoDB",
"description": "MongoDB is a cross-platform document-oriented database. Classified as a NoSQL database, MongoDB uses JSON-like documents with dynamic schemas (MongoDB calls the format BSON), making the integration of data in certain types of applications easier and faster. Released under a combination of the GNU Affero General Public License and the Apache License, MongoDB is free and open-source software. (http://en.wikipedia.org/wiki/MongoDB)",
"properties": {
"sw_database_mongodb_version": {
"title": "Version",
"description": "The specific version of MongoDB.",
"type": "string"
},
"sw_database_mongodb_listen_port": {
"title": "Listen Port",
"description": "Specifies the TCP/IP port or local Unix domain socket file extension on which MongoDB is to listen for connections from client applications.",
"type": "integer",
"minimum": 1,
"maximum": 65535,
"default": 27017
},
"sw_database_mongodb_admin": {
"title": "Admin User",
"description": "The primary user with privileges to perform administrative operations.",
"type": "string"
}
}
},
{
"name": "Couchbase Server",
"description": "Couchbase Server, originally known as Membase, is an open source, distributed (shared-nothing architecture) NoSQL document-oriented database that is optimized for interactive applications. These applications must serve many concurrent users by creating, storing, retrieving, aggregating, manipulating and presenting data. In support of these kinds of application needs, Couchbase is designed to provide easy-to-scale key-value or document access with low latency and high sustained throughput. (http://en.wikipedia.org/wiki/Couchbase_Server)",
"properties": {
"sw_database_couchbaseserver_version": {
"title": "Version",
"description": "The specific version of Couchbase Server.",
"type": "string"
},
"sw_database_couchbaseserver_listen_port": {
"title": "Listen Port",
"description": "Specifies the TCP/IP port or local Unix domain socket file extension on which Couchbase is to listen for connections from client applications.",
"type": "integer",
"minimum": 1,
"maximum": 65535,
"default": 11211
},
"sw_database_couchbaseserver_admin": {
"title": "Admin User",
"description": "The primary user with privileges to perform administrative operations.",
"type": "string",
"default": "admin"
}
}
},
{
"name": "Redis",
"description": "Redis is a data structure server (NoSQL). It is open-source, networked, in-memory, and stores keys with optional durability. The development of Redis has been sponsored by Pivotal Software since May 2013; before that, it was sponsored by VMware. The name Redis means REmote DIctionary Server. (http://en.wikipedia.org/wiki/Redis)",
"properties": {
"sw_database_redis_version": {
"title": "Version",
"description": "The specific version of Redis.",
"type": "string"
},
"sw_database_redis_listen_port": {
"title": "Listen Port",
"description": "Specifies the TCP/IP port or local Unix domain socket file extension on which Redis is to listen for connections from client applications.",
"type": "integer",
"minimum": 1,
"maximum": 65535,
"default": 6379
},
"sw_database_redis_admin": {
"title": "Admin User",
"description": "The primary user with privileges to perform administrative operations.",
"type": "string",
"default": "admin"
}
}
},
{
"name": "CouchDB",
"description": "Apache CouchDB, commonly referred to as CouchDB, is an open source NoSQL database. It is a NoSQL database that uses JSON to store data, JavaScript as its query language using MapReduce, and HTTP for an API. One of its distinguishing features is multi-master replication. CouchDB was first released in 2005 and later became an Apache project in 2008. (http://en.wikipedia.org/wiki/CouchDB)",
"properties": {
"sw_database_couchdb_version": {
"title": "Version",
"description": "The specific version of CouchDB.",
"type": "string"
},
"sw_database_couchdb_listen_port": {
"title": "Listen Port",
"description": "Specifies the TCP/IP port or local Unix domain socket file extension on which CouchDB is to listen for connections from client applications.",
"type": "integer",
"minimum": 1,
"maximum": 65535,
"default": 5984
},
"sw_database_couchdb_admin": {
"title": "Admin User",
"description": "The primary user with privileges to perform administrative operations.",
"type": "string"
}
}
},
{
"name": "Apache Cassandra",
"description": "Apache Cassandra is an open source distributed NoSQL database management system designed to handle large amounts of data across many commodity servers, providing high availability with no single point of failure. (http://en.wikipedia.org/wiki/Apache_Cassandra)",
"properties": {
"sw_database_cassandra_version": {
"title": "Version",
"description": "The specific version of Apache Cassandra.",
"type": "string"
},
"sw_database_cassandra_listen_port": {
"title": "Listen Port",
"description": "Specifies the TCP/IP port or local Unix domain socket file extension on which Cassandra is to listen for connections from client applications.",
"type": "integer",
"minimum": 1,
"maximum": 65535,
"default": 9160
},
"sw_database_cassandra_admin": {
"title": "Admin User",
"description": "The primary user with privileges to perform administrative operations.",
"type": "string",
"default": "cassandra"
}
}
},
{
"name": "HBase",
"description": "HBase is an open source, non-relational (NoSQL), distributed database modeled after Google's BigTable and written in Java. It is developed as part of Apache Software Foundation's Apache Hadoop project and runs on top of HDFS (Hadoop Distributed Filesystem), providing BigTable-like capabilities for Hadoop. (http://en.wikipedia.org/wiki/Apache_HBase)",
"properties": {
"sw_database_hbase_version": {
"title": "Version",
"description": "The specific version of HBase.",
"type": "string"
}
}
},
{
"name": "Hazlecast",
"description": "In computing, Hazelcast is an in-memory open source software data grid based on Java. By having multiple nodes form a cluster, data is evenly distributed among the nodes. This allows for horizontal scaling both in terms of available storage space and processing power. Backups are also distributed in a similar fashion to other nodes, based on configuration, thereby protecting against single node failure. (http://en.wikipedia.org/wiki/Hazelcast)",
"properties": {
"sw_database_hazlecast_version": {
"title": "Version",
"description": "The specific version of Hazlecast.",
"type": "string"
},
"sw_database_hazlecast_port": {
"title": "Listen Port",
"description": "Specifies the TCP/IP port or local Unix domain socket file extension on which Hazlecast is to listen for connections between members.",
"type": "integer",
"minimum": 1,
"maximum": 65535,
"default": 5701
}
}
}
]
}

View File

@ -1,76 +0,0 @@
{
"namespace": "OS::Software::Runtimes",
"display_name": "Runtime Environment",
"description": "Software is written in a specific programming language and the language must execute within a runtime environment. The runtime environment provides an abstraction to utilizing a computer's processor, memory (RAM), and other system resources.",
"visibility": "public",
"protected": true,
"resource_type_associations": [
{
"name": "OS::Glance::Image"
},
{
"name": "OS::Cinder::Volume",
"properties_target": "image"
},
{
"name": "OS::Nova::Instance"
}
],
"objects": [
{
"name": "PHP",
"description": "PHP is a server-side scripting language designed for web development but also used as a general-purpose programming language. PHP code can be simply mixed with HTML code, or it can be used in combination with various templating engines and web frameworks. PHP code is usually processed by a PHP interpreter, which is usually implemented as a web server's native module or a Common Gateway Interface (CGI) executable. After the PHP code is interpreted and executed, the web server sends resulting output to its client, usually in form of a part of the generated web page for example, PHP code can generate a web page's HTML code, an image, or some other data. PHP has also evolved to include a command-line interface (CLI) capability and can be used in standalone graphical applications. (http://en.wikipedia.org/wiki/PHP)",
"properties": {
"sw_runtime_php_version": {
"title": "Version",
"description": "The specific version of PHP.",
"type": "string"
}
}
},
{
"name": "Python",
"description": "Python is a widely used general-purpose, high-level programming language. Its design philosophy emphasizes code readability, and its syntax allows programmers to express concepts in fewer lines of code than would be possible in languages such as C++ or Java. The language provides constructs intended to enable clear programs on both a small and large scale. Python supports multiple programming paradigms, including object-oriented, imperative and functional programming or procedural styles. It features a dynamic type system and automatic memory management and has a large and comprehensive standard library. (http://en.wikipedia.org/wiki/Python_(programming_language))",
"properties": {
"sw_runtime_python_version": {
"title": "Version",
"description": "The specific version of python.",
"type": "string"
}
}
},
{
"name": "Java",
"description": "Java is a functional computer programming language that is concurrent, class-based, object-oriented, and specifically designed to have as few implementation dependencies as possible. It is intended to let application developers write once, run anywhere (WORA), meaning that code that runs on one platform does not need to be recompiled to run on another. Java applications are typically compiled to bytecode that can run on any Java virtual machine (JVM) regardless of computer architecture. (http://en.wikipedia.org/wiki/Java_(programming_language))",
"properties": {
"sw_runtime_java_version": {
"title": "Version",
"description": "The specific version of Java.",
"type": "string"
}
}
},
{
"name": "Ruby",
"description": "Ruby is a dynamic, reflective, object-oriented, general-purpose programming language. It was designed and developed in the mid-1990s by Yukihiro Matsumoto in Japan. According to its authors, Ruby was influenced by Perl, Smalltalk, Eiffel, Ada, and Lisp. It supports multiple programming paradigms, including functional, object-oriented, and imperative. It also has a dynamic type system and automatic memory management. (http://en.wikipedia.org/wiki/Python_(programming_language))",
"properties": {
"sw_runtime_ruby_version": {
"title": "Version",
"description": "The specific version of Ruby.",
"type": "string"
}
}
},
{
"name": "Perl",
"description": "Perl is a family of high-level, general-purpose, interpreted, dynamic programming languages. The languages in this family include Perl 5 and Perl 6. Though Perl is not officially an acronym, there are various backronyms in use, the most well-known being Practical Extraction and Reporting Language (http://en.wikipedia.org/wiki/Perl)",
"properties": {
"sw_runtime_perl_version": {
"title": "Version",
"description": "The specific version of Perl.",
"type": "string"
}
}
}
]
}

View File

@ -1,102 +0,0 @@
{
"namespace": "OS::Software::WebServers",
"display_name": "Web Servers",
"description": "A web server is a computer system that processes requests via HTTP, the basic network protocol used to distribute information on the World Wide Web. The most common use of web servers is to host websites, but there are other uses such as gaming, data storage, running enterprise applications, handling email, FTP, or other web uses. (http://en.wikipedia.org/wiki/Web_server)",
"visibility": "public",
"protected": true,
"resource_type_associations": [
{
"name": "OS::Glance::Image"
},
{
"name": "OS::Cinder::Volume",
"properties_target": "image"
},
{
"name": "OS::Nova::Instance"
}
],
"objects": [
{
"name": "Apache HTTP Server",
"description": "The Apache HTTP Server, colloquially called Apache, is a Web server application notable for playing a key role in the initial growth of the World Wide Web. Apache is developed and maintained by an open community of developers under the auspices of the Apache Software Foundation. Most commonly used on a Unix-like system, the software is available for a wide variety of operating systems, including Unix, FreeBSD, Linux, Solaris, Novell NetWare, OS X, Microsoft Windows, OS/2, TPF, OpenVMS and eComStation. Released under the Apache License, Apache is open-source software. (http://en.wikipedia.org/wiki/Apache_HTTP_Server)",
"properties": {
"sw_webserver_apache_version": {
"title": "Version",
"description": "The specific version of Apache.",
"type": "string"
},
"sw_webserver_apache_http_port": {
"title": "HTTP Port",
"description": "The configured TCP/IP port on which the web server listens for incoming HTTP connections.",
"type": "integer",
"minimum": 1,
"maximum": 65535,
"default": 80
},
"sw_webserver_apache_https_port": {
"title": "HTTPS Port",
"description": "The configured TCP/IP port on which the web server listens for incoming HTTPS connections.",
"type": "integer",
"minimum": 1,
"maximum": 65535,
"default": 443
}
}
},
{
"name": "Nginx",
"description": "Nginx (pronounced 'engine-x') is an open source reverse proxy server for HTTP, HTTPS, SMTP, POP3, and IMAP protocols, as well as a load balancer, HTTP cache, and a web server (origin server). The nginx project started with a strong focus on high concurrency, high performance and low memory usage. It is licensed under the 2-clause BSD-like license and it runs on Linux, BSD variants, Mac OS X, Solaris, AIX, HP-UX, as well as on other *nix flavors. It also has a proof of concept port for Microsoft Windows. (http://en.wikipedia.org/wiki/Nginx)",
"properties": {
"sw_webserver_nginx_version": {
"title": "Version",
"description": "The specific version of Nginx.",
"type": "string"
},
"sw_webserver_nginx_http_port": {
"title": "HTTP Port",
"description": "The configured TCP/IP port on which the web server listens for incoming HTTP connections.",
"type": "integer",
"minimum": 1,
"maximum": 65535,
"default": 80
},
"sw_webserver_nginx_https_port": {
"title": "HTTPS Port",
"description": "The configured TCP/IP port on which the web server listens for incoming HTTPS connections.",
"type": "integer",
"minimum": 1,
"maximum": 65535,
"default": 443
}
}
},
{
"name": "IIS",
"description": "Internet Information Services (IIS, formerly Internet Information Server) is an extensible web server created by Microsoft. IIS supports HTTP, HTTPS, FTP, FTPS, SMTP and NNTP. IIS is not turned on by default when Windows is installed. The IIS Manager is accessed through the Microsoft Management Console or Administrative Tools in the Control Panel. (http://en.wikipedia.org/wiki/Internet_Information_Services)",
"properties": {
"sw_webserver_iis_version": {
"title": "Version",
"description": "The specific version of IIS.",
"type": "string"
},
"sw_webserver_iis_http_port": {
"title": "HTTP Port",
"description": "The configured TCP/IP port on which the web server listens for incoming HTTP connections.",
"type": "integer",
"minimum": 1,
"maximum": 65535,
"default": 80
},
"sw_webserver_iis_https_port": {
"title": "HTTPS Port",
"description": "The configured TCP/IP port on which the web server listens for incoming HTTPS connections.",
"type": "integer",
"minimum": 1,
"maximum": 65535,
"default": 443
}
}
}
]
}

View File

@ -1,5 +0,0 @@
[DEFAULT]
output_file = etc/daisy-cache.conf.sample
namespace = daisy.cache
namespace = oslo_log
namespace = oslo_policy

View File

@ -1,28 +0,0 @@
{
"kernel_id": {
"type": "string",
"pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$",
"description": "ID of image stored in Glance that should be used as the kernel when booting an AMI-style image."
},
"ramdisk_id": {
"type": "string",
"pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$",
"description": "ID of image stored in Glance that should be used as the ramdisk when booting an AMI-style image."
},
"instance_uuid": {
"type": "string",
"description": "ID of instance used to create this image."
},
"architecture": {
"description": "Operating system architecture as specified in http://docs.openstack.org/trunk/openstack-compute/admin/content/adding-images.html",
"type": "string"
},
"os_distro": {
"description": "Common name of operating system distribution as specified in http://docs.openstack.org/trunk/openstack-compute/admin/content/adding-images.html",
"type": "string"
},
"os_version": {
"description": "Operating system version as specified by the distributor",
"type": "string"
}
}

View File

@ -1,8 +0,0 @@
{
"context_is_admin": "role:admin",
"default": "",
"catalog_index": "role:admin",
"catalog_search": "",
"catalog_plugins": ""
}

View File

@ -23,33 +23,15 @@ setup-hooks =
[entry_points]
console_scripts =
daisy-api = daisy.cmd.api:main
daisy-cache-prefetcher = daisy.cmd.cache_prefetcher:main
daisy-cache-pruner = daisy.cmd.cache_pruner:main
daisy-cache-manage = daisy.cmd.cache_manage:main
daisy-cache-cleaner = daisy.cmd.cache_cleaner:main
daisy-search = daisy.cmd.search:main
daisy-index = daisy.cmd.index:main
daisy-manage = daisy.cmd.manage:main
daisy-registry = daisy.cmd.registry:main
daisy-replicator = daisy.cmd.replicator:main
daisy-orchestration = daisy.cmd.orchestration:main
oslo_config.opts =
daisy.api = daisy.opts:list_api_opts
daisy.registry = daisy.opts:list_registry_opts
daisy.cache= daisy.opts:list_cache_opts
daisy.manage = daisy.opts:list_manage_opts
daisy.database.migration_backend =
sqlalchemy = oslo_db.sqlalchemy.migration
daisy.database.metadata_backend =
sqlalchemy = daisy.db.sqlalchemy.metadata
daisy.search.index_backend =
image = daisy.search.plugins.images:ImageIndex
metadef = daisy.search.plugins.metadefs:MetadefIndex
daisy.flows =
import = daisy.async.flows.base_import:get_flow
daisy.flows.import =
convert = daisy.async.flows.convert:get_flow
introspect = daisy.async.flows.introspect:get_flow
[build_sphinx]
all_files = 1

View File

@ -32,9 +32,7 @@ commands = {posargs}
commands =
oslo-config-generator --config-file etc/oslo-config-generator/daisy-api.conf
oslo-config-generator --config-file etc/oslo-config-generator/daisy-registry.conf
oslo-config-generator --config-file etc/oslo-config-generator/daisy-cache.conf
oslo-config-generator --config-file etc/oslo-config-generator/daisy-manage.conf
oslo-config-generator --config-file etc/oslo-config-generator/daisy-search.conf
[testenv:docs]
commands = python setup.py build_sphinx

View File

@ -19,7 +19,6 @@ Source4: daisy.logrotate
Source5: daisy-api-dist.conf
Source6: daisy-registry-dist.conf
Source7: daisy-cache-dist.conf
Source9: daisy-orchestration.service
Source10: daisy-orchestration.conf
@ -150,7 +149,6 @@ rm -rf {test-,}requirements.txt tools/{pip,test}-requires
# Programmatically update defaults in example config
api_dist=%{SOURCE5}
registry_dist=%{SOURCE6}
cache_dist=%{SOURCE7}
%build
%{__python2} setup.py build
@ -180,7 +178,6 @@ rm -f %{buildroot}%{_sysconfdir}/daisy*.conf
rm -f %{buildroot}%{_sysconfdir}/daisy*.ini
rm -f %{buildroot}%{_sysconfdir}/logging.cnf.sample
rm -f %{buildroot}%{_sysconfdir}/policy.json
rm -f %{buildroot}%{_sysconfdir}/schema-image.json
rm -f %{buildroot}/usr/share/doc/daisy/README.rst
# Setup directories
@ -197,11 +194,8 @@ install -p -D -m 640 etc/daisy-registry.conf %{buildroot}%{_sysconfdir}/daisy/da
install -p -D -m 644 %{SOURCE6} %{buildroot}%{_datadir}/daisy/daisy-registry-dist.conf
install -p -D -m 644 etc/daisy-registry-paste.ini %{buildroot}%{_datadir}/daisy/daisy-registry-dist-paste.ini
install -p -D -m 644 etc/daisy-registry-paste.ini %{buildroot}%{_sysconfdir}/daisy/daisy-registry-paste.ini
install -p -D -m 640 etc/daisy-cache.conf %{buildroot}%{_sysconfdir}/daisy/daisy-cache.conf
install -p -D -m 644 %{SOURCE7} %{buildroot}%{_datadir}/daisy/daisy-cache-dist.conf
install -p -D -m 640 etc/policy.json %{buildroot}%{_sysconfdir}/daisy/policy.json
install -p -D -m 640 etc/schema-image.json %{buildroot}%{_sysconfdir}/daisy/schema-image.json
# systemd services
install -p -D -m 644 %{SOURCE1} %{buildroot}%{_unitdir}/daisy-api.service
@ -264,18 +258,10 @@ fi
%{_bindir}/daisy-api
%{_bindir}/daisy-manage
%{_bindir}/daisy-registry
%{_bindir}/daisy-cache-cleaner
%{_bindir}/daisy-cache-manage
%{_bindir}/daisy-cache-prefetcher
%{_bindir}/daisy-cache-pruner
%{_bindir}/daisy-replicator
%{_bindir}/daisy-index
%{_bindir}/daisy-search
%{_bindir}/daisy-orchestration
%{_datadir}/daisy/daisy-api-dist.conf
%{_datadir}/daisy/daisy-registry-dist.conf
%{_datadir}/daisy/daisy-cache-dist.conf
%{_datadir}/daisy/daisy-api-dist-paste.ini
%{_datadir}/daisy/daisy-registry-dist-paste.ini
@ -291,9 +277,7 @@ fi
%config(noreplace) %attr(-, root, daisy) %{_sysconfdir}/daisy/daisy-api.conf
%config(noreplace) %attr(-, root, daisy) %{_sysconfdir}/daisy/daisy-registry.conf
%config(noreplace) %attr(-, root, daisy) %{_sysconfdir}/daisy/daisy-orchestration.conf
%config(noreplace) %attr(-, root, daisy) %{_sysconfdir}/daisy/daisy-cache.conf
%config(noreplace) %attr(-, root, daisy) %{_sysconfdir}/daisy/policy.json
%config(noreplace) %attr(-, root, daisy) %{_sysconfdir}/daisy/schema-image.json
%config(noreplace) %attr(-, root, daisy) %{_sysconfdir}/logrotate.d/daisy
%dir %attr(0755, daisy, daisy) %{_sharedstatedir}/daisy
%dir %attr(0750, daisy, daisy) %{_localstatedir}/log/daisy

View File

@ -0,0 +1,319 @@
import uuid
class FakeLogicNetwork():
# 1----------------------------------------------------------
def fake_network_parameters(self):
return {
'base_mac': 'fa:16:3e:00:00:00',
'gre_id_range': [2, 4094],
'net_l23_provider': 'ovs',
'public_vip': '172.16.0.3',
'segmentation_type': 'vlan,flat,vxlan,gre',
'vni_range': [2, 4094],
'vlan_range': [2, 4094],
}
def fake_logical_parameters(self, private_network):
return [{
'name': 'internal1',
'physnet_name': private_network.name,
'segmentation_id': 200,
'segmentation_type': 'vlan',
'shared': True,
'subnets': [{'cidr': '192.168.1.0/24',
'dns_nameservers': ['8.8.4.4',
'8.8.8.8'],
'floating_ranges': [['192.168.1.2',
'192.168.1.200']],
'gateway': '192.168.1.1',
'name': 'subnet2'},
{'cidr': '172.16.1.0/24',
'dns_nameservers': ['8.8.4.4',
'8.8.8.8'],
'floating_ranges': [['172.16.1.130',
'172.16.1.150'],
['172.16.1.151',
'172.16.1.254']],
'gateway': '172.16.1.1',
'name': 'subnet10'}],
'type': 'internal'},
{'name': 'flat1',
'physnet_name': 'physnet1',
'segmentation_type': 'flat',
'segmentation_id': -1,
'shared': True,
'subnets': [{'cidr': '192.168.2.0/24',
'dns_nameservers': ['8.8.4.4',
'8.8.8.8'],
'floating_ranges': [['192.168.2.130',
'192.168.2.254']],
'gateway': '192.168.2.1',
'name': 'subnet123'}],
'type': 'external'}
]
def fake_router_parameters(self):
return [{
'description': 'router1',
'external_logic_network': 'flat1',
'name': 'router1',
'subnets': ['subnet2', 'subnet10']}
]
def fake_cluster_parameters(self, private_network=None):
networks = []
if private_network:
networks.append(private_network.id)
return {
'description': 'desc',
'name': str(uuid.uuid1()).split('-')[0],
'networks': networks
}
# 2----------------------------------------------------------
def fake_logical_parameters2(self):
return [{
'name': 'internal1',
'physnet_name': 'phynet2',
'segmentation_id': 200,
'segmentation_type': 'vlan',
'shared': True,
'subnets': [],
'type': 'internal'}]
def fake_subnet_parameters2(self):
return [{'cidr': '192.168.1.0/24',
'dns_nameservers': ['8.8.4.4',
'8.8.8.8'],
'floating_ranges': [['192.168.1.2',
'192.168.1.200']],
'gateway': '192.168.1.1',
'name': 'subnet10'},
{'cidr': '172.16.1.0/24',
'dns_nameservers': ['8.8.4.4',
'8.8.8.8'],
'floating_ranges': [['172.16.1.130',
'172.16.1.152'],
['172.16.1.151',
'172.16.1.254']],
'gateway': '172.16.1.1',
'name': 'subnet10'}]
def fake_router_parameters2(self):
return [{
'description': 'router1',
'external_logic_network': 'flat1',
'name': 'router1',
'subnets': ['subnet2', 'subnet10']},
{
'description': 'test',
'external_logic_network': 'flat1',
'name': 'router1',
'subnets': ['subnet123']}
]
# 3-------------------------------------------------------------
def fake_private_network_parameters(self):
return {
'name' : 'phynet2',
'description' : 'phynet2',
'network_type':'DATAPLANE',
'type':'custom',
'vlan_start':'101',
'vlan_end':'1001',
'ml2_type':'ovs'
}
def fake_private_network_parameters1(self):
return {
'name' : 'phynet3',
'description' : 'phynet3',
'network_type':'DATAPLANE',
'type':'custom',
'vlan_start':'101',
'vlan_end':'2000',
'ml2_type':'ovs'
}
def fake_private_network_parameters2(self):
return {
'name' : 'phynet1',
'description' : 'phynet1',
'network_type':'DATAPLANE',
'type':'custom',
'vlan_start':'101',
'vlan_end':'2000',
'ml2_type':'ovs'
}
class FakeDiscoverHosts():
# 1----------------------------------------------------------
daisy_data = [{'description': 'default',
'name': '4c09b4b2788a',
'ipmi_addr': '10.43.203.230',
'ipmi_user':'albert',
'ipmi_passwd':'superuser',
'interfaces': [{'name': 'enp132s0f0',
"mac": '4c:09:b4:b2:78:8a',
"ip": '99.99.1.60',
'is_deployment': 'True',
'pci': '0000:84:00.0',
'netmask': '255.255.255.0'}],
'os_status': 'init',
'dmi_uuid': '03000200-0400-0500-0006-000700080009'},
{'description': 'default',
'name': '4c09b4b2798a',
'ipmi_addr': '10.43.203.231',
'ipmi_user':'albert',
'ipmi_passwd':'superuser',
'interfaces': [{'name': 'enp132s0f0',
"mac": '4c:09:b4:b2:79:8a',
"ip": '99.99.1.61',
'is_deployment': 'True',
'pci': '0000:84:00.0',
'netmask': '255.255.255.0'}],
'os_status': 'init',
'dmi_uuid': '03000200-0400-0500-0006-000700080009'},
{'description': 'default',
'name': '4c09b4b2808a',
'ipmi_addr': '10.43.203.232',
'ipmi_user':'albert',
'ipmi_passwd':'superuser',
'interfaces': [{'name': 'enp132s0f0',
"mac": '4c:09:b4:b2:80:8a',
"ip": '99.99.1.62',
'is_deployment': 'True',
'pci': '0000:84:00.0',
'netmask': '255.255.255.0'}],
'os_status': 'init',
'dmi_uuid': '03000200-0400-0500-0006-000700080009'}]
ironic_disk_data = [{'uuid':'03000200-0400-0500-0006-000700080009',
'mac': '4c:09:b4:b2:78:8a',
'patch':[{'op': 'add',
'path': '/disks/sda',
'value': {'disk': 'pci-0000:01:00.0-sas-0x500003956831a6da-lun-0',
'extra': ['scsi-3500003956831a6d8', 'wwn-0x500003956831a6d8'],
'model': '',
'name': 'sda',
'removable': '',
'size': ' 200127266816 bytes'}}]},
{'uuid':'03000200-0400-0500-0006-000700080009',
'mac': '4c:09:b4:b2:79:8a',
'patch':[{'op': 'add',
'path': '/disks/sda',
'value': {'disk': 'pci-0000:01:00.0-sas-0x500003956831a6da-lun-0',
'extra': ['scsi-3500003956831a6d8', 'wwn-0x500003956831a6d8'],
'model': '',
'name': 'sda',
'removable': '',
'size': ' 200127266816 bytes'}}]},
{'uuid':'03000200-0400-0500-0006-000700080009',
'mac': '4c:09:b4:b2:80:8a',
'patch':[{'op': 'add',
'path': '/disks/sda',
'value': {'disk': 'pci-0000:01:00.0-sas-0x500003956831a6da-lun-0',
'extra': ['scsi-3500003956831a6d8', 'wwn-0x500003956831a6d8'],
'model': '',
'name': 'sda',
'removable': '',
'size': ' 200127266816 bytes'}}]}]
ironic_memory_data = [{'uuid':'03000200-0400-0500-0006-000700080009',
'mac': '4c:09:b4:b2:78:8a',
'patch':[{'path': '/memory/total',
'value': ' 1850020 kB',
'op': 'add'},
{'path': '/memory/phy_memory_1',
'value': {'slots': ' 2',
'devices_1': {'frequency': '',
'type': ' DIMM SDRAM',
'size': ' 4096 MB'},
'maximum_capacity': ' 4 GB',
'devices_2': {'frequency': ' 3 ns',
'type': ' DIMM SDRAM',
'size': ' 8192 MB'}},
'op': 'add'},
]},
{'uuid':'03000200-0400-0500-0006-000700080009',
'mac': '4c:09:b4:b2:79:8a',
'patch':[{'path': '/memory/total',
'value': ' 1850020 kB',
'op': 'add'},
{'path': '/memory/phy_memory_1',
'value': {'slots': ' 3',
'devices_1': {'frequency': '',
'type': ' DIMM SDRAM',
'size': ' 4096 MB'},
'maximum_capacity': ' 4 GB',
'devices_2': {'frequency': ' 3 ns',
'type': ' DIMM SDRAM',
'size': ' 8192 MB'}},
'op': 'add'},
]},
{'uuid':'03000200-0400-0500-0006-000700080009',
'mac': '4c:09:b4:b2:80:8a',
'patch':[{'path': '/memory/total',
'value': ' 1850020 kB',
'op': 'add'},
{'path': '/memory/phy_memory_1',
'value': {'slots': ' 3',
'devices_1': {'frequency': '',
'type': ' DIMM SDRAM',
'size': ' 4096 MB'},
'maximum_capacity': ' 4 GB',
'devices_2': {'frequency': ' 3 ns',
'type': ' DIMM SDRAM',
'size': ' 8192 MB'}},
'op': 'add'},
]}]
ironic_cpu_data = [{'uuid':'03000200-0400-0500-0006-000700080009',
'mac': '4c:09:b4:b2:78:8a',
'patch':[{'path': '/cpu/real',
'value': 1,
'op': 'add'},
{'path': '/cpu/total',
'value': 2,
'op': 'add'},
{'path': '/cpu/spec_1',
'value': {'model': ' Pentium(R) Dual-Core CPU E5700 @ 3.00GHz' , 'frequency': 3003},
'op': 'add'},
{'path': '/cpu/spec_2',
'value': {'model': ' Pentium(R) Dual-Core CPU E5700 @ 3.00GHz', 'frequency': 3003},
'op': 'add'}
]},
{'uuid':'03000200-0400-0500-0006-000700080009',
'mac': '4c:09:b4:b2:79:8a',
'patch':[{'path': '/cpu/real',
'value': 1,
'op': 'add'},
{'path': '/cpu/total',
'value': 2,
'op': 'add'},
{'path': '/cpu/spec_1',
'value': {'model': ' Pentium(R) Dual-Core CPU E5700 @ 3.00GHz' , 'frequency': 3003},
'op': 'add'},
{'path': '/cpu/spec_2',
'value': {'model': ' Pentium(R) Dual-Core CPU E5700 @ 3.00GHz', 'frequency': 3003},
'op': 'add'}
]},
{'uuid':'03000200-0400-0500-0006-000700080009',
'mac': '4c:09:b4:b2:80:8a',
'patch':[{'path': '/cpu/real',
'value': 1,
'op': 'add'},
{'path': '/cpu/total',
'value': 2,
'op': 'add'},
{'path': '/cpu/spec_1',
'value': {'model': ' Pentium(R) Dual-Core CPU E5700 @ 3.00GHz' , 'frequency': 3003},
'op': 'add'},
{'path': '/cpu/spec_2',
'value': {'model': ' Pentium(R) Dual-Core CPU E5700 @ 3.00GHz', 'frequency': 3003},
'op': 'add'}
]}]

View File

@ -0,0 +1,58 @@
# (c) 2014 Deutsche Telekom AG
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
common_flavor_details = {
"name": "get-flavor-details",
"http-method": "GET",
"url": "flavors/%s",
"resources": [
{"name": "flavor", "expected_result": 404}
]
}
common_flavor_list = {
"name": "list-flavors-with-detail",
"http-method": "GET",
"url": "flavors/detail",
"json-schema": {
"type": "object",
"properties": {
}
}
}
common_admin_flavor_create = {
"name": "flavor-create",
"http-method": "POST",
"admin_client": True,
"url": "flavors",
"default_result_code": 400,
"json-schema": {
"type": "object",
"properties": {
"flavor": {
"type": "object",
"properties": {
"name": {"type": "string",
"exclude_tests": ["gen_str_min_length"]},
"ram": {"type": "integer", "minimum": 1},
"vcpus": {"type": "integer", "minimum": 1},
"disk": {"type": "integer"},
"id": {"type": "integer",
"exclude_tests": ["gen_none", "gen_string"]
},
}
}
}
}
}

View File

@ -0,0 +1,39 @@
# (c) 2014 Deutsche Telekom AG
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from tempest.api_schema.request.compute import flavors
flavors_details = copy.deepcopy(flavors.common_flavor_details)
flavor_list = copy.deepcopy(flavors.common_flavor_list)
flavor_create = copy.deepcopy(flavors.common_admin_flavor_create)
flavor_list["json-schema"]["properties"] = {
"minRam": {
"type": "integer",
"results": {
"gen_none": 400,
"gen_string": 400
}
},
"minDisk": {
"type": "integer",
"results": {
"gen_none": 400,
"gen_string": 400
}
}
}

View File

@ -0,0 +1,60 @@
# Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
node = {
'type': 'object',
'properties': {
'id': {'type': 'string'},
'interfaces': {'type': 'array'},
'host': {'type': 'string'},
'task_state': {'type': ['string', 'null']},
'cpus': {'type': ['integer', 'string']},
'memory_mb': {'type': ['integer', 'string']},
'disk_gb': {'type': ['integer', 'string']},
},
'required': ['id', 'interfaces', 'host', 'task_state', 'cpus', 'memory_mb',
'disk_gb']
}
list_baremetal_nodes = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'nodes': {
'type': 'array',
'items': node
}
},
'required': ['nodes']
}
}
baremetal_node = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'node': node
},
'required': ['node']
}
}
get_baremetal_node = copy.deepcopy(baremetal_node)
get_baremetal_node['response_body']['properties']['node'][
'properties'].update({'instance_uuid': {'type': ['string', 'null']}})
get_baremetal_node['response_body']['properties']['node'][
'required'].append('instance_uuid')

View File

@ -0,0 +1,34 @@
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
add_remove_list_flavor_access = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'flavor_access': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'flavor_id': {'type': 'string'},
'tenant_id': {'type': 'string'},
},
'required': ['flavor_id', 'tenant_id'],
}
}
},
'required': ['flavor_access']
}
}

View File

@ -0,0 +1,39 @@
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
flavor_extra_specs = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'extra_specs': {
'type': 'object',
'patternProperties': {
'^[a-zA-Z0-9_\-\. :]+$': {'type': 'string'}
}
}
},
'required': ['extra_specs']
}
}
flavor_extra_specs_key = {
'status_code': [200],
'response_body': {
'type': 'object',
'patternProperties': {
'^[a-zA-Z0-9_\-\. :]+$': {'type': 'string'}
}
}
}

View File

@ -0,0 +1,50 @@
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
list_migrations = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'migrations': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'id': {'type': 'integer'},
'status': {'type': 'string'},
'instance_uuid': {'type': 'string'},
'source_node': {'type': 'string'},
'source_compute': {'type': 'string'},
'dest_node': {'type': 'string'},
'dest_compute': {'type': 'string'},
'dest_host': {'type': 'string'},
#zfl : tecs 1.0 old_instance_type_id is None
#'old_instance_type_id': {'type': 'integer'},
'new_instance_type_id': {'type': 'integer'},
'created_at': {'type': 'string'},
'updated_at': {'type': ['string', 'null']}
},
'required': [
'id', 'status', 'instance_uuid', 'source_node',
'source_compute', 'dest_node', 'dest_compute',
'dest_host', 'old_instance_type_id',
'new_instance_type_id', 'created_at', 'updated_at'
]
}
}
},
'required': ['migrations']
}
}

View File

@ -0,0 +1,81 @@
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
links = {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'href': {
'type': 'string',
'format': 'uri'
},
'rel': {'type': 'string'}
},
'required': ['href', 'rel']
}
}
mac_address = {
'type': 'string',
'pattern': '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
}
access_ip_v4 = {
'type': 'string',
'anyOf': [{'format': 'ipv4'}, {'enum': ['']}]
}
access_ip_v6 = {
'type': 'string',
'anyOf': [{'format': 'ipv6'}, {'enum': ['']}]
}
addresses = {
'type': 'object',
'patternProperties': {
# NOTE: Here is for 'private' or something.
'^[a-zA-Z0-9-_.]+$': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'version': {'type': 'integer'},
'addr': {
'type': 'string',
'anyOf': [
{'format': 'ipv4'},
{'format': 'ipv6'}
]
}
},
'required': ['version', 'addr']
}
}
}
}
response_header = {
'connection': {'type': 'string'},
'content-length': {'type': 'string'},
'content-type': {'type': 'string'},
'status': {'type': 'string'},
'x-compute-request-id': {'type': 'string'},
'vary': {'type': 'string'},
'x-openstack-nova-api-version': {'type': 'string'},
'date': {
'type': 'string',
'format': 'data-time'
}
}

View File

@ -0,0 +1,61 @@
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
list_services = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'services': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'id': {'type': ['integer', 'string'],
'pattern': '^[a-zA-Z!]*@[0-9]+$'},
'zone': {'type': 'string'},
'host': {'type': 'string'},
'state': {'type': 'string'},
'binary': {'type': 'string'},
'status': {'type': 'string'},
'updated_at': {'type': ['string', 'null']},
'disabled_reason': {'type': ['string', 'null']}
},
'required': ['id', 'zone', 'host', 'state', 'binary',
'status', 'updated_at', 'disabled_reason']
}
}
},
'required': ['services']
}
}
enable_service = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'service': {
'type': 'object',
'properties': {
'status': {'type': 'string'},
'binary': {'type': 'string'},
'host': {'type': 'string'}
},
'required': ['status', 'binary', 'host']
}
},
'required': ['service']
}
}

View File

@ -0,0 +1,57 @@
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
common_agent_info = {
'type': 'object',
'properties': {
'agent_id': {'type': ['integer', 'string']},
'hypervisor': {'type': 'string'},
'os': {'type': 'string'},
'architecture': {'type': 'string'},
'version': {'type': 'string'},
'url': {'type': 'string', 'format': 'uri'},
'md5hash': {'type': 'string'}
},
'required': ['agent_id', 'hypervisor', 'os', 'architecture',
'version', 'url', 'md5hash']
}
list_agents = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'agents': {
'type': 'array',
'items': common_agent_info
}
},
'required': ['agents']
}
}
create_agent = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'agent': common_agent_info
},
'required': ['agent']
}
}
delete_agent = {
'status_code': [200]
}

View File

@ -0,0 +1,88 @@
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
# create-aggregate api doesn't have 'hosts' and 'metadata' attributes.
aggregate_for_create = {
'type': 'object',
'properties': {
'availability_zone': {'type': ['string', 'null']},
'created_at': {'type': 'string'},
'deleted': {'type': 'boolean'},
'deleted_at': {'type': ['string', 'null']},
'id': {'type': 'integer'},
'name': {'type': 'string'},
'updated_at': {'type': ['string', 'null']}
},
'required': ['availability_zone', 'created_at', 'deleted',
'deleted_at', 'id', 'name', 'updated_at'],
}
common_aggregate_info = copy.deepcopy(aggregate_for_create)
common_aggregate_info['properties'].update({
'hosts': {'type': 'array'},
'metadata': {'type': 'object'}
})
common_aggregate_info['required'].extend(['hosts', 'metadata'])
list_aggregates = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'aggregates': {
'type': 'array',
'items': common_aggregate_info
}
},
'required': ['aggregates'],
}
}
get_aggregate = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'aggregate': common_aggregate_info
},
'required': ['aggregate'],
}
}
aggregate_set_metadata = get_aggregate
# The 'updated_at' attribute of 'update_aggregate' can't be null.
update_aggregate = copy.deepcopy(get_aggregate)
update_aggregate['response_body']['properties']['aggregate']['properties'][
'updated_at'] = {
'type': 'string'
}
delete_aggregate = {
'status_code': [200]
}
create_aggregate = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'aggregate': aggregate_for_create
},
'required': ['aggregate'],
}
}
aggregate_add_remove_host = get_aggregate

View File

@ -0,0 +1,74 @@
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
base = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'availabilityZoneInfo': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'zoneName': {'type': 'string'},
'zoneState': {
'type': 'object',
'properties': {
'available': {'type': 'boolean'}
},
'required': ['available']
},
# NOTE: Here is the difference between detail and
# non-detail.
'hosts': {'type': 'null'}
},
'required': ['zoneName', 'zoneState', 'hosts']
}
}
},
'required': ['availabilityZoneInfo']
}
}
detail = {
'type': 'object',
'patternProperties': {
# NOTE: Here is for a hostname
'^[a-zA-Z0-9-_.]+$': {
'type': 'object',
'patternProperties': {
# NOTE: Here is for a service name
'^.*$': {
'type': 'object',
'properties': {
'available': {'type': 'boolean'},
'active': {'type': 'boolean'},
'updated_at': {'type': ['string', 'null']}
},
'required': ['available', 'active', 'updated_at']
}
}
}
}
}
list_availability_zone_list = copy.deepcopy(base)
list_availability_zone_list_detail = copy.deepcopy(base)
list_availability_zone_list_detail['response_body']['properties'][
'availabilityZoneInfo']['items']['properties']['hosts'] = detail

View File

@ -0,0 +1,39 @@
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
_common_schema = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'certificate': {
'type': 'object',
'properties': {
'data': {'type': 'string'},
'private_key': {'type': 'string'},
},
'required': ['data', 'private_key']
}
},
'required': ['certificate']
}
}
get_certificate = copy.deepcopy(_common_schema)
get_certificate['response_body']['properties']['certificate'][
'properties']['private_key'].update({'type': 'null'})
create_certificate = copy.deepcopy(_common_schema)

View File

@ -0,0 +1,45 @@
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
list_extensions = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'extensions': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'updated': {
'type': 'string',
'format': 'data-time'
},
'name': {'type': 'string'},
'links': {'type': 'array'},
'namespace': {
'type': 'string',
'format': 'uri'
},
'alias': {'type': 'string'},
'description': {'type': 'string'}
},
'required': ['updated', 'name', 'links', 'namespace',
'alias', 'description']
}
}
},
'required': ['extensions']
}
}

View File

@ -0,0 +1,41 @@
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
get_fixed_ip = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'fixed_ip': {
'type': 'object',
'properties': {
'address': {
'type': 'string',
'format': 'ip-address'
},
'cidr': {'type': 'string'},
'host': {'type': 'string'},
'hostname': {'type': 'string'}
},
'required': ['address', 'cidr', 'host', 'hostname']
}
},
'required': ['fixed_ip']
}
}
reserve_fixed_ip = {
'status_code': [202],
'response_body': {'type': 'string'}
}

Some files were not shown because too many files have changed in this diff Show More