Delete glance scrubber code

Change-Id: I8229ee50356a5c63c8922d7c30d01d160be788e8
Signed-off-by: Zhijiang Hu <hu.zhijiang@zte.com.cn>
This commit is contained in:
Zhijiang Hu 2016-09-18 04:28:15 -04:00
parent 58673769c2
commit a489eb4df3
31 changed files with 10 additions and 3257 deletions

View File

@ -1,289 +0,0 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import glance_store as store_api
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
import webob.exc
from daisy.common import exception
from daisy.common import store_utils
from daisy.common import utils
import daisy.db
from daisy import i18n
import daisy.registry.client.v1.api as registry
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
def initiate_deletion(req, location_data, id):
"""
Deletes image data from the location of backend store.
:param req: The WSGI/Webob Request object
:param location_data: Location to the image data in a data store
:param id: Opaque image identifier
"""
store_utils.delete_image_location_from_backend(req.context,
id, location_data)
def _kill(req, image_id, from_state):
"""
Marks the image status to `killed`.
:param req: The WSGI/Webob Request object
:param image_id: Opaque image identifier
:param from_state: Permitted current status for transition to 'killed'
"""
# TODO(dosaboy): http://docs.openstack.org/developer/glance/statuses.html
# needs updating to reflect the fact that queued->killed and saving->killed
# are both allowed.
registry.update_image_metadata(req.context, image_id,
{'status': 'killed'},
from_state=from_state)
def safe_kill(req, image_id, from_state):
"""
Mark image killed without raising exceptions if it fails.
Since _kill is meant to be called from exceptions handlers, it should
not raise itself, rather it should just log its error.
:param req: The WSGI/Webob Request object
:param image_id: Opaque image identifier
:param from_state: Permitted current status for transition to 'killed'
"""
try:
_kill(req, image_id, from_state)
except Exception:
LOG.exception(_LE("Unable to kill image %(id)s: ") % {'id': image_id})
def upload_data_to_store(req, image_meta, image_data, store, notifier):
"""
Upload image data to specified store.
Upload image data to the store and cleans up on error.
"""
image_id = image_meta['id']
db_api = daisy.db.get_api()
image_size = image_meta.get('size')
try:
# By default image_data will be passed as CooperativeReader object.
# But if 'user_storage_quota' is enabled and 'remaining' is not None
# then it will be passed as object of LimitingReader to
# 'store_add_to_backend' method.
image_data = utils.CooperativeReader(image_data)
remaining = daisy.api.common.check_quota(
req.context, image_size, db_api, image_id=image_id)
if remaining is not None:
image_data = utils.LimitingReader(image_data, remaining)
(uri,
size,
checksum,
location_metadata) = store_api.store_add_to_backend(
image_meta['id'],
image_data,
image_meta['size'],
store,
context=req.context)
location_data = {'url': uri,
'metadata': location_metadata,
'status': 'active'}
try:
# recheck the quota in case there were simultaneous uploads that
# did not provide the size
daisy.api.common.check_quota(
req.context, size, db_api, image_id=image_id)
except exception.StorageQuotaFull:
with excutils.save_and_reraise_exception():
LOG.info(_LI('Cleaning up %s after exceeding '
'the quota') % image_id)
store_utils.safe_delete_from_backend(
req.context, image_meta['id'], location_data)
def _kill_mismatched(image_meta, attr, actual):
supplied = image_meta.get(attr)
if supplied and supplied != actual:
msg = (_("Supplied %(attr)s (%(supplied)s) and "
"%(attr)s generated from uploaded image "
"(%(actual)s) did not match. Setting image "
"status to 'killed'.") % {'attr': attr,
'supplied': supplied,
'actual': actual})
LOG.error(msg)
safe_kill(req, image_id, 'saving')
initiate_deletion(req, location_data, image_id)
raise webob.exc.HTTPBadRequest(explanation=msg,
content_type="text/plain",
request=req)
# Verify any supplied size/checksum value matches size/checksum
# returned from store when adding image
_kill_mismatched(image_meta, 'size', size)
_kill_mismatched(image_meta, 'checksum', checksum)
# Update the database with the checksum returned
# from the backend store
LOG.debug("Updating image %(image_id)s data. "
"Checksum set to %(checksum)s, size set "
"to %(size)d", {'image_id': image_id,
'checksum': checksum,
'size': size})
update_data = {'checksum': checksum,
'size': size}
try:
try:
state = 'saving'
image_meta = registry.update_image_metadata(req.context,
image_id,
update_data,
from_state=state)
except exception.Duplicate:
image = registry.get_image_metadata(req.context, image_id)
if image['status'] == 'deleted':
raise exception.NotFound()
else:
raise
except exception.NotFound:
msg = _LI("Image %s could not be found after upload. The image may"
" have been deleted during the upload.") % image_id
LOG.info(msg)
# NOTE(jculp): we need to clean up the datastore if an image
# resource is deleted while the image data is being uploaded
#
# We get "location_data" from above call to store.add(), any
# exceptions that occur there handle this same issue internally,
# Since this is store-agnostic, should apply to all stores.
initiate_deletion(req, location_data, image_id)
raise webob.exc.HTTPPreconditionFailed(explanation=msg,
request=req,
content_type='text/plain')
except store_api.StoreAddDisabled:
msg = _("Error in store configuration. Adding images to store "
"is disabled.")
LOG.exception(msg)
safe_kill(req, image_id, 'saving')
notifier.error('image.upload', msg)
raise webob.exc.HTTPGone(explanation=msg, request=req,
content_type='text/plain')
except exception.Duplicate as e:
msg = (_("Attempt to upload duplicate image: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
# NOTE(dosaboy): do not delete the image since it is likely that this
# conflict is a result of another concurrent upload that will be
# successful.
notifier.error('image.upload', msg)
raise webob.exc.HTTPConflict(explanation=msg,
request=req,
content_type="text/plain")
except exception.Forbidden as e:
msg = (_("Forbidden upload attempt: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
safe_kill(req, image_id, 'saving')
notifier.error('image.upload', msg)
raise webob.exc.HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
except store_api.StorageFull as e:
msg = (_("Image storage media is full: %s") %
utils.exception_to_str(e))
LOG.error(msg)
safe_kill(req, image_id, 'saving')
notifier.error('image.upload', msg)
raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg,
request=req,
content_type='text/plain')
except store_api.StorageWriteDenied as e:
msg = (_("Insufficient permissions on image storage media: %s") %
utils.exception_to_str(e))
LOG.error(msg)
safe_kill(req, image_id, 'saving')
notifier.error('image.upload', msg)
raise webob.exc.HTTPServiceUnavailable(explanation=msg,
request=req,
content_type='text/plain')
except exception.ImageSizeLimitExceeded as e:
msg = (_("Denying attempt to upload image larger than %d bytes.")
% CONF.image_size_cap)
LOG.warn(msg)
safe_kill(req, image_id, 'saving')
notifier.error('image.upload', msg)
raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg,
request=req,
content_type='text/plain')
except exception.StorageQuotaFull as e:
msg = (_("Denying attempt to upload image because it exceeds the "
"quota: %s") % utils.exception_to_str(e))
LOG.warn(msg)
safe_kill(req, image_id, 'saving')
notifier.error('image.upload', msg)
raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg,
request=req,
content_type='text/plain')
except webob.exc.HTTPError:
# NOTE(bcwaldon): Ideally, we would just call 'raise' here,
# but something in the above function calls is affecting the
# exception context and we must explicitly re-raise the
# caught exception.
msg = _LE("Received HTTP error while uploading image %s") % image_id
notifier.error('image.upload', msg)
with excutils.save_and_reraise_exception():
LOG.exception(msg)
safe_kill(req, image_id, 'saving')
except (ValueError, IOError) as e:
msg = _("Client disconnected before sending all data to backend")
LOG.warn(msg)
safe_kill(req, image_id, 'saving')
raise webob.exc.HTTPBadRequest(explanation=msg,
content_type="text/plain",
request=req)
except Exception as e:
msg = _("Failed to upload image %s") % image_id
LOG.exception(msg)
safe_kill(req, image_id, 'saving')
notifier.error('image.upload', msg)
raise webob.exc.HTTPInternalServerError(explanation=msg,
request=req,
content_type='text/plain')
return image_meta, location_data

View File

@ -1,411 +0,0 @@
# Copyright (c) 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Helper script for starting/stopping/reloading Glance server programs.
Thanks for some of the code, Swifties ;)
"""
from __future__ import print_function
from __future__ import with_statement
import argparse
import fcntl
import os
import resource
import signal
import subprocess
import sys
import tempfile
import time
from oslo_config import cfg
from oslo_utils import units
# NOTE(jokke): simplified transition to py3, behaves like py2 xrange
from six.moves import range
from daisy.common import config
from daisy import i18n
# If ../glance/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')):
sys.path.insert(0, possible_topdir)
_ = i18n._
CONF = cfg.CONF
ALL_COMMANDS = ['start', 'status', 'stop', 'shutdown', 'restart',
'reload', 'force-reload']
ALL_SERVERS = ['api', 'registry', 'scrubber']
RELOAD_SERVERS = ['glance-api', 'glance-registry']
GRACEFUL_SHUTDOWN_SERVERS = ['glance-api', 'glance-registry',
'glance-scrubber']
MAX_DESCRIPTORS = 32768
MAX_MEMORY = 2 * units.Gi # 2 GB
USAGE = """%(prog)s [options] <SERVER> <COMMAND> [CONFPATH]
Where <SERVER> is one of:
all, {0}
And command is one of:
{1}
And CONFPATH is the optional configuration file to use.""".format(
', '.join(ALL_SERVERS), ', '.join(ALL_COMMANDS))
exitcode = 0
def gated_by(predicate):
def wrap(f):
def wrapped_f(*args):
if predicate:
return f(*args)
else:
return None
return wrapped_f
return wrap
def pid_files(server, pid_file):
pid_files = []
if pid_file:
if os.path.exists(os.path.abspath(pid_file)):
pid_files = [os.path.abspath(pid_file)]
else:
if os.path.exists('/var/run/glance/%s.pid' % server):
pid_files = ['/var/run/glance/%s.pid' % server]
for pid_file in pid_files:
pid = int(open(pid_file).read().strip())
yield pid_file, pid
def do_start(verb, pid_file, server, args):
if verb != 'Respawn' and pid_file == CONF.pid_file:
for pid_file, pid in pid_files(server, pid_file):
if os.path.exists('/proc/%s' % pid):
print(_("%(serv)s appears to already be running: %(pid)s") %
{'serv': server, 'pid': pid_file})
return
else:
print(_("Removing stale pid file %s") % pid_file)
os.unlink(pid_file)
try:
resource.setrlimit(resource.RLIMIT_NOFILE,
(MAX_DESCRIPTORS, MAX_DESCRIPTORS))
resource.setrlimit(resource.RLIMIT_DATA,
(MAX_MEMORY, MAX_MEMORY))
except ValueError:
print(_('Unable to increase file descriptor limit. '
'Running as non-root?'))
os.environ['PYTHON_EGG_CACHE'] = '/tmp'
def write_pid_file(pid_file, pid):
with open(pid_file, 'w') as fp:
fp.write('%d\n' % pid)
def redirect_to_null(fds):
with open(os.devnull, 'r+b') as nullfile:
for desc in fds: # close fds
try:
os.dup2(nullfile.fileno(), desc)
except OSError:
pass
def redirect_to_syslog(fds, server):
log_cmd = 'logger'
log_cmd_params = '-t "%s[%d]"' % (server, os.getpid())
process = subprocess.Popen([log_cmd, log_cmd_params],
stdin=subprocess.PIPE)
for desc in fds: # pipe to logger command
try:
os.dup2(process.stdin.fileno(), desc)
except OSError:
pass
def redirect_stdio(server, capture_output):
input = [sys.stdin.fileno()]
output = [sys.stdout.fileno(), sys.stderr.fileno()]
redirect_to_null(input)
if capture_output:
redirect_to_syslog(output, server)
else:
redirect_to_null(output)
@gated_by(CONF.capture_output)
def close_stdio_on_exec():
fds = [sys.stdin.fileno(), sys.stdout.fileno(), sys.stderr.fileno()]
for desc in fds: # set close on exec flag
fcntl.fcntl(desc, fcntl.F_SETFD, fcntl.FD_CLOEXEC)
def launch(pid_file, conf_file=None, capture_output=False, await_time=0):
args = [server]
if conf_file:
args += ['--config-file', conf_file]
msg = (_('%(verb)sing %(serv)s with %(conf)s') %
{'verb': verb, 'serv': server, 'conf': conf_file})
else:
msg = (_('%(verb)sing %(serv)s') % {'verb': verb, 'serv': server})
print(msg)
close_stdio_on_exec()
pid = os.fork()
if pid == 0:
os.setsid()
redirect_stdio(server, capture_output)
try:
os.execlp('%s' % server, *args)
except OSError as e:
msg = (_('unable to launch %(serv)s. Got error: %(e)s') %
{'serv': server, 'e': e})
sys.exit(msg)
sys.exit(0)
else:
write_pid_file(pid_file, pid)
await_child(pid, await_time)
return pid
@gated_by(CONF.await_child)
def await_child(pid, await_time):
bail_time = time.time() + await_time
while time.time() < bail_time:
reported_pid, status = os.waitpid(pid, os.WNOHANG)
if reported_pid == pid:
global exitcode
exitcode = os.WEXITSTATUS(status)
break
time.sleep(0.05)
conf_file = None
if args and os.path.exists(args[0]):
conf_file = os.path.abspath(os.path.expanduser(args[0]))
return launch(pid_file, conf_file, CONF.capture_output, CONF.await_child)
def do_check_status(pid_file, server):
if os.path.exists(pid_file):
with open(pid_file, 'r') as pidfile:
pid = pidfile.read().strip()
print(_("%(serv)s (pid %(pid)s) is running...") %
{'serv': server, 'pid': pid})
else:
print(_("%s is stopped") % server)
def get_pid_file(server, pid_file):
pid_file = (os.path.abspath(pid_file) if pid_file else
'/var/run/glance/%s.pid' % server)
dir, file = os.path.split(pid_file)
if not os.path.exists(dir):
try:
os.makedirs(dir)
except OSError:
pass
if not os.access(dir, os.W_OK):
fallback = os.path.join(tempfile.mkdtemp(), '%s.pid' % server)
msg = (_('Unable to create pid file %(pid)s. Running as non-root?\n'
'Falling back to a temp file, you can stop %(service)s '
'service using:\n'
' %(file)s %(server)s stop --pid-file %(fb)s') %
{'pid': pid_file,
'service': server,
'file': __file__,
'server': server,
'fb': fallback})
print(msg)
pid_file = fallback
return pid_file
def do_reload(pid_file, server):
if server not in RELOAD_SERVERS:
msg = (_('Reload of %(serv)s not supported') % {'serv': server})
sys.exit(msg)
pid = None
if os.path.exists(pid_file):
with open(pid_file, 'r') as pidfile:
pid = int(pidfile.read().strip())
else:
msg = (_('Server %(serv)s is stopped') % {'serv': server})
sys.exit(msg)
sig = signal.SIGHUP
try:
print(_('Reloading %(serv)s (pid %(pid)s) with signal(%(sig)s)')
% {'serv': server, 'pid': pid, 'sig': sig})
os.kill(pid, sig)
except OSError:
print(_("Process %d not running") % pid)
def do_stop(server, args, graceful=False):
if graceful and server in GRACEFUL_SHUTDOWN_SERVERS:
sig = signal.SIGHUP
else:
sig = signal.SIGTERM
did_anything = False
pfiles = pid_files(server, CONF.pid_file)
for pid_file, pid in pfiles:
did_anything = True
try:
os.unlink(pid_file)
except OSError:
pass
try:
print(_('Stopping %(serv)s (pid %(pid)s) with signal(%(sig)s)')
% {'serv': server, 'pid': pid, 'sig': sig})
os.kill(pid, sig)
except OSError:
print(_("Process %d not running") % pid)
for pid_file, pid in pfiles:
for _junk in range(150): # 15 seconds
if not os.path.exists('/proc/%s' % pid):
break
time.sleep(0.1)
else:
print(_('Waited 15 seconds for pid %(pid)s (%(file)s) to die;'
' giving up') % {'pid': pid, 'file': pid_file})
if not did_anything:
print(_('%s is already stopped') % server)
def add_command_parsers(subparsers):
cmd_parser = argparse.ArgumentParser(add_help=False)
cmd_subparsers = cmd_parser.add_subparsers(dest='command')
for cmd in ALL_COMMANDS:
parser = cmd_subparsers.add_parser(cmd)
parser.add_argument('args', nargs=argparse.REMAINDER)
for server in ALL_SERVERS:
full_name = 'glance-' + server
parser = subparsers.add_parser(server, parents=[cmd_parser])
parser.set_defaults(servers=[full_name])
parser = subparsers.add_parser(full_name, parents=[cmd_parser])
parser.set_defaults(servers=[full_name])
parser = subparsers.add_parser('all', parents=[cmd_parser])
parser.set_defaults(servers=['glance-' + s for s in ALL_SERVERS])
def main():
global exitcode
opts = [
cfg.SubCommandOpt('server',
title='Server types',
help='Available server types',
handler=add_command_parsers),
cfg.StrOpt('pid-file',
metavar='PATH',
help='File to use as pid file. Default: '
'/var/run/glance/$server.pid.'),
cfg.IntOpt('await-child',
metavar='DELAY',
default=0,
help='Period to wait for service death '
'in order to report exit code '
'(default is to not wait at all).'),
cfg.BoolOpt('capture-output',
default=False,
help='Capture stdout/err in syslog '
'instead of discarding it.'),
cfg.BoolOpt('respawn',
default=False,
help='Restart service on unexpected death.'),
]
CONF.register_cli_opts(opts)
config.parse_args(usage=USAGE)
@gated_by(CONF.await_child)
@gated_by(CONF.respawn)
def mutually_exclusive():
sys.stderr.write('--await-child and --respawn are mutually exclusive')
sys.exit(1)
mutually_exclusive()
@gated_by(CONF.respawn)
def anticipate_respawn(children):
while children:
pid, status = os.wait()
if pid in children:
(pid_file, server, args) = children.pop(pid)
running = os.path.exists(pid_file)
one_second_ago = time.time() - 1
bouncing = (running and
os.path.getmtime(pid_file) >= one_second_ago)
if running and not bouncing:
args = (pid_file, server, args)
new_pid = do_start('Respawn', *args)
children[new_pid] = args
else:
rsn = 'bouncing' if bouncing else 'deliberately stopped'
print(_('Suppressed respawn as %(serv)s was %(rsn)s.')
% {'serv': server, 'rsn': rsn})
if CONF.server.command == 'start':
children = {}
for server in CONF.server.servers:
pid_file = get_pid_file(server, CONF.pid_file)
args = (pid_file, server, CONF.server.args)
pid = do_start('Start', *args)
children[pid] = args
anticipate_respawn(children)
if CONF.server.command == 'status':
for server in CONF.server.servers:
pid_file = get_pid_file(server, CONF.pid_file)
do_check_status(pid_file, server)
if CONF.server.command == 'stop':
for server in CONF.server.servers:
do_stop(server, CONF.server.args)
if CONF.server.command == 'shutdown':
for server in CONF.server.servers:
do_stop(server, CONF.server.args, graceful=True)
if CONF.server.command == 'restart':
for server in CONF.server.servers:
do_stop(server, CONF.server.args)
for server in CONF.server.servers:
pid_file = get_pid_file(server, CONF.pid_file)
do_start('Restart', pid_file, server, CONF.server.args)
if CONF.server.command in ('reload', 'force-reload'):
for server in CONF.server.servers:
pid_file = get_pid_file(server, CONF.pid_file)
do_reload(pid_file, server)
sys.exit(exitcode)

View File

@ -1,73 +0,0 @@
#!/usr/bin/env python
# Copyright 2011-2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Glance Scrub Service
"""
import os
import sys
import glance_store
from oslo_config import cfg
from oslo_log import log as logging
from daisy.common import config
from daisy.openstack.common import systemd
from daisy import scrubber
# If ../glance/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')):
sys.path.insert(0, possible_topdir)
CONF = cfg.CONF
logging.register_options(CONF)
def main():
CONF.register_cli_opts(scrubber.scrubber_cmd_cli_opts)
CONF.register_opts(scrubber.scrubber_cmd_opts)
try:
config.parse_args()
logging.setup(CONF, 'glance')
glance_store.register_opts(config.CONF)
glance_store.create_stores(config.CONF)
glance_store.verify_default_store()
app = scrubber.Scrubber(glance_store)
if CONF.daemon:
server = scrubber.Daemon(CONF.wakeup_time)
server.start(app)
systemd.notify_once()
server.wait()
else:
import eventlet
pool = eventlet.greenpool.GreenPool(1000)
app.run(pool)
except RuntimeError as e:
sys.exit("ERROR: %s" % e)
if __name__ == '__main__':
main()

View File

@ -1,164 +0,0 @@
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_concurrency import lockutils
from oslo_log import log as logging
from oslo_utils import excutils
import six
from daisy.api.v2 import images as v2_api
from daisy.common import exception
from daisy.common.scripts import utils as script_utils
from daisy.common import store_utils
from daisy.common import utils as common_utils
from daisy import i18n
__all__ = [
'run',
]
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
def run(t_id, context, task_repo, image_repo, image_factory):
LOG.info(_LI('Task %(task_id)s beginning import '
'execution.') % {'task_id': t_id})
_execute(t_id, task_repo, image_repo, image_factory)
# NOTE(nikhil): This lock prevents more than N number of threads to be spawn
# simultaneously. The number N represents the number of threads in the
# executor pool. The value is set to 10 in the eventlet executor.
@lockutils.synchronized("glance_import")
def _execute(t_id, task_repo, image_repo, image_factory):
task = script_utils.get_task(task_repo, t_id)
if task is None:
# NOTE: This happens if task is not found in the database. In
# such cases, there is no way to update the task status so,
# it's ignored here.
return
try:
task_input = script_utils.unpack_task_input(task)
uri = script_utils.validate_location_uri(task_input.get('import_from'))
image_id = import_image(image_repo, image_factory, task_input, t_id,
uri)
task.succeed({'image_id': image_id})
except Exception as e:
# Note: The message string contains Error in it to indicate
# in the task.message that it's a error message for the user.
# TODO(nikhil): need to bring back save_and_reraise_exception when
# necessary
err_msg = ("Error: " + six.text_type(type(e)) + ': ' +
common_utils.exception_to_str(e))
log_msg = _LE(err_msg + ("Task ID %s" % task.task_id)) # noqa
LOG.exception(log_msg)
task.fail(_LE(err_msg)) # noqa
finally:
task_repo.save(task)
def import_image(image_repo, image_factory, task_input, task_id, uri):
original_image = create_image(image_repo, image_factory,
task_input.get('image_properties'), task_id)
# NOTE: set image status to saving just before setting data
original_image.status = 'saving'
image_repo.save(original_image)
image_id = original_image.image_id
# NOTE: Retrieving image from the database because the Image object
# returned from create_image method does not have appropriate factories
# wrapped around it.
new_image = image_repo.get(image_id)
set_image_data(new_image, uri, None)
try:
# NOTE: Check if the Image is not deleted after setting the data
# before saving the active image. Here if image status is
# saving, then new_image is saved as it contains updated location,
# size, virtual_size and checksum information and the status of
# new_image is already set to active in set_image_data() call.
image = image_repo.get(image_id)
if image.status == 'saving':
image_repo.save(new_image)
return image_id
else:
msg = _("The Image %(image_id)s object being created by this task "
"%(task_id)s, is no longer in valid status for further "
"processing.") % {"image_id": image_id,
"task_id": task_id}
raise exception.Conflict(msg)
except (exception.Conflict, exception.NotFound):
with excutils.save_and_reraise_exception():
if new_image.locations:
for location in new_image.locations:
store_utils.delete_image_location_from_backend(
new_image.context,
image_id,
location)
def create_image(image_repo, image_factory, image_properties, task_id):
_base_properties = []
for k, v in v2_api.get_base_properties().items():
_base_properties.append(k)
properties = {}
# NOTE: get the base properties
for key in _base_properties:
try:
properties[key] = image_properties.pop(key)
except KeyError:
msg = ("Task ID %(task_id)s: Ignoring property %(k)s for setting "
"base properties while creating "
"Image.") % {'task_id': task_id, 'k': key}
LOG.debug(msg)
# NOTE: get the rest of the properties and pass them as
# extra_properties for Image to be created with them.
properties['extra_properties'] = image_properties
script_utils.set_base_image_properties(properties=properties)
image = image_factory.new_image(**properties)
image_repo.add(image)
return image
def set_image_data(image, uri, task_id):
data_iter = None
try:
LOG.info(_LI("Task %(task_id)s: Got image data uri %(data_uri)s to be "
"imported") % {"data_uri": uri, "task_id": task_id})
data_iter = script_utils.get_image_data_iter(uri)
image.set_data(data_iter)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.warn(_LW("Task %(task_id)s failed with exception %(error)s") %
{"error": common_utils.exception_to_str(e),
"task_id": task_id})
LOG.info(_LI("Task %(task_id)s: Could not import image file"
" %(image_data)s") % {"image_data": uri,
"task_id": task_id})
finally:
if isinstance(data_iter, file):
data_iter.close()

View File

@ -1,144 +0,0 @@
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import glance_store as store_api
from oslo_config import cfg
from oslo_log import log as logging
import six.moves.urllib.parse as urlparse
from daisy.common import utils
import daisy.db as db_api
from daisy import i18n
from daisy import scrubber
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LW = i18n._LW
store_utils_opts = [
cfg.BoolOpt('use_user_token', default=True,
help=_('Whether to pass through the user token when '
'making requests to the registry.')),
]
CONF = cfg.CONF
CONF.register_opts(store_utils_opts)
RESTRICTED_URI_SCHEMAS = frozenset(['file', 'filesystem', 'swift+config'])
def safe_delete_from_backend(context, image_id, location):
"""
Given a location, delete an image from the store and
update location status to db.
This function try to handle all known exceptions which might be raised
by those calls on store and DB modules in its implementation.
:param context: The request context
:param image_id: The image identifier
:param location: The image location entry
"""
try:
ret = store_api.delete_from_backend(location['url'], context=context)
location['status'] = 'deleted'
if 'id' in location:
db_api.get_api().image_location_delete(context, image_id,
location['id'], 'deleted')
return ret
except store_api.NotFound:
msg = _LW('Failed to delete image %s in store from URI') % image_id
LOG.warn(msg)
except store_api.StoreDeleteNotSupported as e:
LOG.warn(utils.exception_to_str(e))
except store_api.UnsupportedBackend:
exc_type = sys.exc_info()[0].__name__
msg = (_LE('Failed to delete image %(image_id)s from store: %(exc)s') %
dict(image_id=image_id, exc=exc_type))
LOG.error(msg)
def schedule_delayed_delete_from_backend(context, image_id, location):
"""
Given a location, schedule the deletion of an image location and
update location status to db.
:param context: The request context
:param image_id: The image identifier
:param location: The image location entry
"""
__, db_queue = scrubber.get_scrub_queues()
if not CONF.use_user_token:
context = None
ret = db_queue.add_location(image_id, location, user_context=context)
if ret:
location['status'] = 'pending_delete'
if 'id' in location:
# NOTE(zhiyan): New added image location entry will has no 'id'
# field since it has not been saved to DB.
db_api.get_api().image_location_delete(context, image_id,
location['id'],
'pending_delete')
else:
db_api.get_api().image_location_add(context, image_id, location)
return ret
def delete_image_location_from_backend(context, image_id, location):
"""
Given a location, immediately or schedule the deletion of an image
location and update location status to db.
:param context: The request context
:param image_id: The image identifier
:param location: The image location entry
"""
deleted = False
if CONF.delayed_delete:
deleted = schedule_delayed_delete_from_backend(context,
image_id, location)
if not deleted:
# NOTE(zhiyan) If image metadata has not been saved to DB
# such as uploading process failure then we can't use
# location status mechanism to support image pending delete.
safe_delete_from_backend(context, image_id, location)
def validate_external_location(uri):
"""
Validate if URI of external location are supported.
Only over non-local store types are OK, i.e. S3, Swift,
HTTP. Note the absence of 'file://' for security reasons,
see LP bug #942118, 1400966, 'swift+config://' is also
absent for security reasons, see LP bug #1334196.
:param uri: The URI of external image location.
:return: Whether given URI of external image location are OK.
"""
# TODO(zhiyan): This function could be moved to glance_store.
# TODO(gm): Use a whitelist of allowed schemes
scheme = urlparse.urlparse(uri).scheme
return (scheme in store_api.get_known_schemes() and
scheme not in RESTRICTED_URI_SCHEMAS)

View File

@ -1,262 +0,0 @@
# Copyright 2012 OpenStack Foundation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import glance_store
from oslo_log import log as logging
from daisy.api import authorization
from daisy.api import policy
from daisy.api import property_protections
from daisy.common import exception
from daisy.common import property_utils
from daisy.common import store_utils
import daisy.db
import daisy.domain
import daisy.location
import daisy.notifier
import daisy.quota
try:
import daisy.search
daisy_search = daisy.search
except ImportError:
daisy_search = None
LOG = logging.getLogger(__name__)
class Gateway(object):
def __init__(self, db_api=None, store_api=None, notifier=None,
policy_enforcer=None, es_api=None):
self.db_api = db_api or daisy.db.get_api()
self.store_api = store_api or glance_store
self.store_utils = store_utils
self.notifier = notifier or daisy.notifier.Notifier()
self.policy = policy_enforcer or policy.Enforcer()
if es_api:
self.es_api = es_api
else:
self.es_api = daisy_search.get_api() if daisy_search else None
def get_image_factory(self, context):
image_factory = daisy.domain.ImageFactory()
store_image_factory = daisy.location.ImageFactoryProxy(
image_factory, context, self.store_api, self.store_utils)
quota_image_factory = daisy.quota.ImageFactoryProxy(
store_image_factory, context, self.db_api, self.store_utils)
policy_image_factory = policy.ImageFactoryProxy(
quota_image_factory, context, self.policy)
notifier_image_factory = daisy.notifier.ImageFactoryProxy(
policy_image_factory, context, self.notifier)
if property_utils.is_property_protection_enabled():
property_rules = property_utils.PropertyRules(self.policy)
pif = property_protections.ProtectedImageFactoryProxy(
notifier_image_factory, context, property_rules)
authorized_image_factory = authorization.ImageFactoryProxy(
pif, context)
else:
authorized_image_factory = authorization.ImageFactoryProxy(
notifier_image_factory, context)
return authorized_image_factory
def get_image_member_factory(self, context):
image_factory = daisy.domain.ImageMemberFactory()
quota_image_factory = daisy.quota.ImageMemberFactoryProxy(
image_factory, context, self.db_api, self.store_utils)
policy_member_factory = policy.ImageMemberFactoryProxy(
quota_image_factory, context, self.policy)
authorized_image_factory = authorization.ImageMemberFactoryProxy(
policy_member_factory, context)
return authorized_image_factory
def get_repo(self, context):
image_repo = daisy.db.ImageRepo(context, self.db_api)
store_image_repo = daisy.location.ImageRepoProxy(
image_repo, context, self.store_api, self.store_utils)
quota_image_repo = daisy.quota.ImageRepoProxy(
store_image_repo, context, self.db_api, self.store_utils)
policy_image_repo = policy.ImageRepoProxy(
quota_image_repo, context, self.policy)
notifier_image_repo = daisy.notifier.ImageRepoProxy(
policy_image_repo, context, self.notifier)
if property_utils.is_property_protection_enabled():
property_rules = property_utils.PropertyRules(self.policy)
pir = property_protections.ProtectedImageRepoProxy(
notifier_image_repo, context, property_rules)
authorized_image_repo = authorization.ImageRepoProxy(
pir, context)
else:
authorized_image_repo = authorization.ImageRepoProxy(
notifier_image_repo, context)
return authorized_image_repo
def get_task_factory(self, context):
task_factory = daisy.domain.TaskFactory()
policy_task_factory = policy.TaskFactoryProxy(
task_factory, context, self.policy)
notifier_task_factory = daisy.notifier.TaskFactoryProxy(
policy_task_factory, context, self.notifier)
authorized_task_factory = authorization.TaskFactoryProxy(
notifier_task_factory, context)
return authorized_task_factory
def get_task_repo(self, context):
task_repo = daisy.db.TaskRepo(context, self.db_api)
policy_task_repo = policy.TaskRepoProxy(
task_repo, context, self.policy)
notifier_task_repo = daisy.notifier.TaskRepoProxy(
policy_task_repo, context, self.notifier)
authorized_task_repo = authorization.TaskRepoProxy(
notifier_task_repo, context)
return authorized_task_repo
def get_task_stub_repo(self, context):
task_stub_repo = daisy.db.TaskRepo(context, self.db_api)
policy_task_stub_repo = policy.TaskStubRepoProxy(
task_stub_repo, context, self.policy)
notifier_task_stub_repo = daisy.notifier.TaskStubRepoProxy(
policy_task_stub_repo, context, self.notifier)
authorized_task_stub_repo = authorization.TaskStubRepoProxy(
notifier_task_stub_repo, context)
return authorized_task_stub_repo
def get_task_executor_factory(self, context):
task_repo = self.get_task_repo(context)
image_repo = self.get_repo(context)
image_factory = self.get_image_factory(context)
return daisy.domain.TaskExecutorFactory(task_repo,
image_repo,
image_factory)
def get_metadef_namespace_factory(self, context):
ns_factory = daisy.domain.MetadefNamespaceFactory()
policy_ns_factory = policy.MetadefNamespaceFactoryProxy(
ns_factory, context, self.policy)
notifier_ns_factory = daisy.notifier.MetadefNamespaceFactoryProxy(
policy_ns_factory, context, self.notifier)
authorized_ns_factory = authorization.MetadefNamespaceFactoryProxy(
notifier_ns_factory, context)
return authorized_ns_factory
def get_metadef_namespace_repo(self, context):
ns_repo = daisy.db.MetadefNamespaceRepo(context, self.db_api)
policy_ns_repo = policy.MetadefNamespaceRepoProxy(
ns_repo, context, self.policy)
notifier_ns_repo = daisy.notifier.MetadefNamespaceRepoProxy(
policy_ns_repo, context, self.notifier)
authorized_ns_repo = authorization.MetadefNamespaceRepoProxy(
notifier_ns_repo, context)
return authorized_ns_repo
def get_metadef_object_factory(self, context):
object_factory = daisy.domain.MetadefObjectFactory()
policy_object_factory = policy.MetadefObjectFactoryProxy(
object_factory, context, self.policy)
notifier_object_factory = daisy.notifier.MetadefObjectFactoryProxy(
policy_object_factory, context, self.notifier)
authorized_object_factory = authorization.MetadefObjectFactoryProxy(
notifier_object_factory, context)
return authorized_object_factory
def get_metadef_object_repo(self, context):
object_repo = daisy.db.MetadefObjectRepo(context, self.db_api)
policy_object_repo = policy.MetadefObjectRepoProxy(
object_repo, context, self.policy)
notifier_object_repo = daisy.notifier.MetadefObjectRepoProxy(
policy_object_repo, context, self.notifier)
authorized_object_repo = authorization.MetadefObjectRepoProxy(
notifier_object_repo, context)
return authorized_object_repo
def get_metadef_resource_type_factory(self, context):
resource_type_factory = daisy.domain.MetadefResourceTypeFactory()
policy_resource_type_factory = policy.MetadefResourceTypeFactoryProxy(
resource_type_factory, context, self.policy)
notifier_resource_type_factory = (
daisy.notifier.MetadefResourceTypeFactoryProxy(
policy_resource_type_factory, context, self.notifier)
)
authorized_resource_type_factory = (
authorization.MetadefResourceTypeFactoryProxy(
notifier_resource_type_factory, context)
)
return authorized_resource_type_factory
def get_metadef_resource_type_repo(self, context):
resource_type_repo = daisy.db.MetadefResourceTypeRepo(
context, self.db_api)
policy_object_repo = policy.MetadefResourceTypeRepoProxy(
resource_type_repo, context, self.policy)
notifier_object_repo = daisy.notifier.MetadefResourceTypeRepoProxy(
policy_object_repo, context, self.notifier)
authorized_object_repo = authorization.MetadefResourceTypeRepoProxy(
notifier_object_repo, context)
return authorized_object_repo
def get_metadef_property_factory(self, context):
prop_factory = daisy.domain.MetadefPropertyFactory()
policy_prop_factory = policy.MetadefPropertyFactoryProxy(
prop_factory, context, self.policy)
notifier_prop_factory = daisy.notifier.MetadefPropertyFactoryProxy(
policy_prop_factory, context, self.notifier)
authorized_prop_factory = authorization.MetadefPropertyFactoryProxy(
notifier_prop_factory, context)
return authorized_prop_factory
def get_metadef_property_repo(self, context):
prop_repo = daisy.db.MetadefPropertyRepo(context, self.db_api)
policy_prop_repo = policy.MetadefPropertyRepoProxy(
prop_repo, context, self.policy)
notifier_prop_repo = daisy.notifier.MetadefPropertyRepoProxy(
policy_prop_repo, context, self.notifier)
authorized_prop_repo = authorization.MetadefPropertyRepoProxy(
notifier_prop_repo, context)
return authorized_prop_repo
def get_metadef_tag_factory(self, context):
tag_factory = daisy.domain.MetadefTagFactory()
policy_tag_factory = policy.MetadefTagFactoryProxy(
tag_factory, context, self.policy)
notifier_tag_factory = daisy.notifier.MetadefTagFactoryProxy(
policy_tag_factory, context, self.notifier)
authorized_tag_factory = authorization.MetadefTagFactoryProxy(
notifier_tag_factory, context)
return authorized_tag_factory
def get_metadef_tag_repo(self, context):
tag_repo = daisy.db.MetadefTagRepo(context, self.db_api)
policy_tag_repo = policy.MetadefTagRepoProxy(
tag_repo, context, self.policy)
notifier_tag_repo = daisy.notifier.MetadefTagRepoProxy(
policy_tag_repo, context, self.notifier)
authorized_tag_repo = authorization.MetadefTagRepoProxy(
notifier_tag_repo, context)
return authorized_tag_repo
def get_catalog_search_repo(self, context):
if self.es_api is None:
# TODO(mriedem): Make this a separate exception or change to
# warning/error logging in Liberty once we're past string freeze.
# See bug 1441764.
LOG.debug('The search and index services are not available. '
'Ensure you have the necessary prerequisite '
'dependencies installed like elasticsearch to use these '
'services.')
raise exception.ServiceUnavailable()
search_repo = daisy.search.CatalogSearchRepo(context, self.es_api)
policy_search_repo = policy.CatalogSearchRepoProxy(
search_repo, context, self.policy)
return policy_search_repo

View File

@ -1,431 +0,0 @@
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import glance_store as store
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from daisy.common import exception
from daisy.common import utils
import daisy.domain.proxy
from daisy import i18n
_ = i18n._
_LE = i18n._LE
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class ImageRepoProxy(daisy.domain.proxy.Repo):
def __init__(self, image_repo, context, store_api, store_utils):
self.context = context
self.store_api = store_api
proxy_kwargs = {'context': context, 'store_api': store_api,
'store_utils': store_utils}
super(ImageRepoProxy, self).__init__(image_repo,
item_proxy_class=ImageProxy,
item_proxy_kwargs=proxy_kwargs)
def _set_acls(self, image):
public = image.visibility == 'public'
member_ids = []
if image.locations and not public:
member_repo = image.get_member_repo()
member_ids = [m.member_id for m in member_repo.list()]
for location in image.locations:
self.store_api.set_acls(location['url'], public=public,
read_tenants=member_ids,
context=self.context)
def add(self, image):
result = super(ImageRepoProxy, self).add(image)
self._set_acls(image)
return result
def save(self, image, from_state=None):
result = super(ImageRepoProxy, self).save(image, from_state=from_state)
self._set_acls(image)
return result
def _check_location_uri(context, store_api, store_utils, uri):
"""Check if an image location is valid.
:param context: Glance request context
:param store_api: store API module
:param store_utils: store utils module
:param uri: location's uri string
"""
is_ok = True
try:
# NOTE(zhiyan): Some stores return zero when it catch exception
is_ok = (store_utils.validate_external_location(uri) and
store_api.get_size_from_backend(uri, context=context) > 0)
except (store.UnknownScheme, store.NotFound):
is_ok = False
if not is_ok:
reason = _('Invalid location')
raise exception.BadStoreUri(message=reason)
def _check_image_location(context, store_api, store_utils, location):
_check_location_uri(context, store_api, store_utils, location['url'])
store_api.check_location_metadata(location['metadata'])
def _set_image_size(context, image, locations):
if not image.size:
for location in locations:
size_from_backend = store.get_size_from_backend(
location['url'], context=context)
if size_from_backend:
# NOTE(flwang): This assumes all locations have the same size
image.size = size_from_backend
break
def _count_duplicated_locations(locations, new):
"""
To calculate the count of duplicated locations for new one.
:param locations: The exiting image location set
:param new: The new image location
:returns: The count of duplicated locations
"""
ret = 0
for loc in locations:
if loc['url'] == new['url'] and loc['metadata'] == new['metadata']:
ret += 1
return ret
class ImageFactoryProxy(daisy.domain.proxy.ImageFactory):
def __init__(self, factory, context, store_api, store_utils):
self.context = context
self.store_api = store_api
self.store_utils = store_utils
proxy_kwargs = {'context': context, 'store_api': store_api,
'store_utils': store_utils}
super(ImageFactoryProxy, self).__init__(factory,
proxy_class=ImageProxy,
proxy_kwargs=proxy_kwargs)
def new_image(self, **kwargs):
locations = kwargs.get('locations', [])
for loc in locations:
_check_image_location(self.context,
self.store_api,
self.store_utils,
loc)
loc['status'] = 'active'
if _count_duplicated_locations(locations, loc) > 1:
raise exception.DuplicateLocation(location=loc['url'])
return super(ImageFactoryProxy, self).new_image(**kwargs)
class StoreLocations(collections.MutableSequence):
"""
The proxy for store location property. It takes responsibility for:
1. Location uri correctness checking when adding a new location.
2. Remove the image data from the store when a location is removed
from an image.
"""
def __init__(self, image_proxy, value):
self.image_proxy = image_proxy
if isinstance(value, list):
self.value = value
else:
self.value = list(value)
def append(self, location):
# NOTE(flaper87): Insert this
# location at the very end of
# the value list.
self.insert(len(self.value), location)
def extend(self, other):
if isinstance(other, StoreLocations):
locations = other.value
else:
locations = list(other)
for location in locations:
self.append(location)
def insert(self, i, location):
_check_image_location(self.image_proxy.context,
self.image_proxy.store_api,
self.image_proxy.store_utils,
location)
location['status'] = 'active'
if _count_duplicated_locations(self.value, location) > 0:
raise exception.DuplicateLocation(location=location['url'])
self.value.insert(i, location)
_set_image_size(self.image_proxy.context,
self.image_proxy,
[location])
def pop(self, i=-1):
location = self.value.pop(i)
try:
self.image_proxy.store_utils.delete_image_location_from_backend(
self.image_proxy.context,
self.image_proxy.image.image_id,
location)
except Exception:
with excutils.save_and_reraise_exception():
self.value.insert(i, location)
return location
def count(self, location):
return self.value.count(location)
def index(self, location, *args):
return self.value.index(location, *args)
def remove(self, location):
if self.count(location):
self.pop(self.index(location))
else:
self.value.remove(location)
def reverse(self):
self.value.reverse()
# Mutable sequence, so not hashable
__hash__ = None
def __getitem__(self, i):
return self.value.__getitem__(i)
def __setitem__(self, i, location):
_check_image_location(self.image_proxy.context,
self.image_proxy.store_api,
self.image_proxy.store_utils,
location)
location['status'] = 'active'
self.value.__setitem__(i, location)
_set_image_size(self.image_proxy.context,
self.image_proxy,
[location])
def __delitem__(self, i):
location = None
try:
location = self.value.__getitem__(i)
except Exception:
return self.value.__delitem__(i)
self.image_proxy.store_utils.delete_image_location_from_backend(
self.image_proxy.context,
self.image_proxy.image.image_id,
location)
self.value.__delitem__(i)
def __delslice__(self, i, j):
i = max(i, 0)
j = max(j, 0)
locations = []
try:
locations = self.value.__getslice__(i, j)
except Exception:
return self.value.__delslice__(i, j)
for location in locations:
self.image_proxy.store_utils.delete_image_location_from_backend(
self.image_proxy.context,
self.image_proxy.image.image_id,
location)
self.value.__delitem__(i)
def __iadd__(self, other):
self.extend(other)
return self
def __contains__(self, location):
return location in self.value
def __len__(self):
return len(self.value)
def __cast(self, other):
if isinstance(other, StoreLocations):
return other.value
else:
return other
def __cmp__(self, other):
return cmp(self.value, self.__cast(other))
def __iter__(self):
return iter(self.value)
def __copy__(self):
return type(self)(self.image_proxy, self.value)
def __deepcopy__(self, memo):
# NOTE(zhiyan): Only copy location entries, others can be reused.
value = copy.deepcopy(self.value, memo)
self.image_proxy.image.locations = value
return type(self)(self.image_proxy, value)
def _locations_proxy(target, attr):
"""
Make a location property proxy on the image object.
:param target: the image object on which to add the proxy
:param attr: the property proxy we want to hook
"""
def get_attr(self):
value = getattr(getattr(self, target), attr)
return StoreLocations(self, value)
def set_attr(self, value):
if not isinstance(value, (list, StoreLocations)):
reason = _('Invalid locations')
raise exception.BadStoreUri(message=reason)
ori_value = getattr(getattr(self, target), attr)
if ori_value != value:
# NOTE(zhiyan): Enforced locations list was previously empty list.
if len(ori_value) > 0:
raise exception.Invalid(_('Original locations is not empty: '
'%s') % ori_value)
# NOTE(zhiyan): Check locations are all valid.
for location in value:
_check_image_location(self.context,
self.store_api,
self.store_utils,
location)
location['status'] = 'active'
if _count_duplicated_locations(value, location) > 1:
raise exception.DuplicateLocation(location=location['url'])
_set_image_size(self.context, getattr(self, target), value)
return setattr(getattr(self, target), attr, list(value))
def del_attr(self):
value = getattr(getattr(self, target), attr)
while len(value):
self.store_utils.delete_image_location_from_backend(
self.context,
self.image.image_id,
value[0])
del value[0]
setattr(getattr(self, target), attr, value)
return delattr(getattr(self, target), attr)
return property(get_attr, set_attr, del_attr)
class ImageProxy(daisy.domain.proxy.Image):
locations = _locations_proxy('image', 'locations')
def __init__(self, image, context, store_api, store_utils):
self.image = image
self.context = context
self.store_api = store_api
self.store_utils = store_utils
proxy_kwargs = {
'context': context,
'image': self,
'store_api': store_api,
}
super(ImageProxy, self).__init__(
image, member_repo_proxy_class=ImageMemberRepoProxy,
member_repo_proxy_kwargs=proxy_kwargs)
def delete(self):
self.image.delete()
if self.image.locations:
for location in self.image.locations:
self.store_utils.delete_image_location_from_backend(
self.context,
self.image.image_id,
location)
def set_data(self, data, size=None):
if size is None:
size = 0 # NOTE(markwash): zero -> unknown size
location, size, checksum, loc_meta = self.store_api.add_to_backend(
CONF,
self.image.image_id,
utils.LimitingReader(utils.CooperativeReader(data),
CONF.image_size_cap),
size,
context=self.context)
self.image.locations = [{'url': location, 'metadata': loc_meta,
'status': 'active'}]
self.image.size = size
self.image.checksum = checksum
self.image.status = 'active'
def get_data(self, offset=0, chunk_size=None):
if not self.image.locations:
raise store.NotFound(_("No image data could be found"))
err = None
for loc in self.image.locations:
try:
data, size = self.store_api.get_from_backend(
loc['url'],
offset=offset,
chunk_size=chunk_size,
context=self.context)
return data
except Exception as e:
LOG.warn(_('Get image %(id)s data failed: '
'%(err)s.') % {'id': self.image.image_id,
'err': utils.exception_to_str(e)})
err = e
# tried all locations
LOG.error(_LE('Glance tried all active locations to get data for '
'image %s but all have failed.') % self.image.image_id)
raise err
class ImageMemberRepoProxy(daisy.domain.proxy.Repo):
def __init__(self, repo, image, context, store_api):
self.repo = repo
self.image = image
self.context = context
self.store_api = store_api
super(ImageMemberRepoProxy, self).__init__(repo)
def _set_acls(self):
public = self.image.visibility == 'public'
if self.image.locations and not public:
member_ids = [m.member_id for m in self.repo.list()]
for location in self.image.locations:
self.store_api.set_acls(location['url'], public=public,
read_tenants=member_ids,
context=self.context)
def add(self, member):
super(ImageMemberRepoProxy, self).add(member)
self._set_acls()
def remove(self, member):
super(ImageMemberRepoProxy, self).remove(member)
self._set_acls()

View File

@ -29,12 +29,10 @@ import daisy.notifier
import daisy.registry
import daisy.registry.client
import daisy.registry.client.v1.api
import daisy.scrubber
__all__ = [
'list_api_opts',
'list_registry_opts',
'list_scrubber_opts',
'list_cache_opts',
'list_manage_opts'
]
@ -57,8 +55,7 @@ _api_opts = [
daisy.registry.registry_addr_opts,
daisy.registry.client.registry_client_ctx_opts,
daisy.registry.client.registry_client_opts,
daisy.registry.client.v1.api.registry_client_ctx_opts,
daisy.scrubber.scrubber_opts))),
daisy.registry.client.v1.api.registry_client_ctx_opts))),
('image_format', daisy.common.config.image_format_opts),
('task', daisy.common.config.task_opts),
('store_type_location_strategy',
@ -74,15 +71,6 @@ _registry_opts = [
daisy.common.wsgi.eventlet_opts))),
('paste_deploy', daisy.common.config.paste_deploy_opts)
]
_scrubber_opts = [
(None, list(itertools.chain(
daisy.common.config.common_opts,
daisy.scrubber.scrubber_opts,
daisy.scrubber.scrubber_cmd_opts,
daisy.scrubber.scrubber_cmd_cli_opts,
daisy.registry.client.registry_client_ctx_opts,
daisy.registry.registry_addr_opts))),
]
_cache_opts = [
(None, list(itertools.chain(
daisy.common.config.common_opts,
@ -123,13 +111,6 @@ def list_registry_opts():
return [(g, copy.deepcopy(o)) for g, o in _registry_opts]
def list_scrubber_opts():
"""Return a list of oslo_config options available in Glance Scrubber
service.
"""
return [(g, copy.deepcopy(o)) for g, o in _scrubber_opts]
def list_cache_opts():
"""Return a list of oslo_config options available in Glance Cache
service.

View File

@ -1,368 +0,0 @@
# Copyright 2013, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import glance_store as store
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
import six
import daisy.api.common
import daisy.common.exception as exception
from daisy.common import utils
import daisy.domain
import daisy.domain.proxy
from daisy import i18n
LOG = logging.getLogger(__name__)
_ = i18n._
_LI = i18n._LI
CONF = cfg.CONF
CONF.import_opt('image_member_quota', 'daisy.common.config')
CONF.import_opt('image_property_quota', 'daisy.common.config')
CONF.import_opt('image_tag_quota', 'daisy.common.config')
def _enforce_image_tag_quota(tags):
if CONF.image_tag_quota < 0:
# If value is negative, allow unlimited number of tags
return
if not tags:
return
if len(tags) > CONF.image_tag_quota:
raise exception.ImageTagLimitExceeded(attempted=len(tags),
maximum=CONF.image_tag_quota)
def _calc_required_size(context, image, locations):
required_size = None
if image.size:
required_size = image.size * len(locations)
else:
for location in locations:
size_from_backend = None
try:
size_from_backend = store.get_size_from_backend(
location['url'], context=context)
except (store.UnknownScheme, store.NotFound):
pass
if size_from_backend:
required_size = size_from_backend * len(locations)
break
return required_size
def _enforce_image_location_quota(image, locations, is_setter=False):
if CONF.image_location_quota < 0:
# If value is negative, allow unlimited number of locations
return
attempted = len(image.locations) + len(locations)
attempted = attempted if not is_setter else len(locations)
maximum = CONF.image_location_quota
if attempted > maximum:
raise exception.ImageLocationLimitExceeded(attempted=attempted,
maximum=maximum)
class ImageRepoProxy(daisy.domain.proxy.Repo):
def __init__(self, image_repo, context, db_api, store_utils):
self.image_repo = image_repo
self.db_api = db_api
proxy_kwargs = {'context': context, 'db_api': db_api,
'store_utils': store_utils}
super(ImageRepoProxy, self).__init__(image_repo,
item_proxy_class=ImageProxy,
item_proxy_kwargs=proxy_kwargs)
def _enforce_image_property_quota(self, attempted):
if CONF.image_property_quota < 0:
# If value is negative, allow unlimited number of properties
return
maximum = CONF.image_property_quota
if attempted > maximum:
kwargs = {'attempted': attempted, 'maximum': maximum}
exc = exception.ImagePropertyLimitExceeded(**kwargs)
LOG.debug(six.text_type(exc))
raise exc
def save(self, image, from_state=None):
if image.added_new_properties():
self._enforce_image_property_quota(len(image.extra_properties))
return super(ImageRepoProxy, self).save(image, from_state=from_state)
def add(self, image):
self._enforce_image_property_quota(len(image.extra_properties))
return super(ImageRepoProxy, self).add(image)
class ImageFactoryProxy(daisy.domain.proxy.ImageFactory):
def __init__(self, factory, context, db_api, store_utils):
proxy_kwargs = {'context': context, 'db_api': db_api,
'store_utils': store_utils}
super(ImageFactoryProxy, self).__init__(factory,
proxy_class=ImageProxy,
proxy_kwargs=proxy_kwargs)
def new_image(self, **kwargs):
tags = kwargs.pop('tags', set([]))
_enforce_image_tag_quota(tags)
return super(ImageFactoryProxy, self).new_image(tags=tags, **kwargs)
class QuotaImageTagsProxy(object):
def __init__(self, orig_set):
if orig_set is None:
orig_set = set([])
self.tags = orig_set
def add(self, item):
self.tags.add(item)
_enforce_image_tag_quota(self.tags)
def __cast__(self, *args, **kwargs):
return self.tags.__cast__(*args, **kwargs)
def __contains__(self, *args, **kwargs):
return self.tags.__contains__(*args, **kwargs)
def __eq__(self, other):
return self.tags == other
def __iter__(self, *args, **kwargs):
return self.tags.__iter__(*args, **kwargs)
def __len__(self, *args, **kwargs):
return self.tags.__len__(*args, **kwargs)
def __getattr__(self, name):
return getattr(self.tags, name)
class ImageMemberFactoryProxy(daisy.domain.proxy.ImageMembershipFactory):
def __init__(self, member_factory, context, db_api, store_utils):
self.db_api = db_api
self.context = context
proxy_kwargs = {'context': context, 'db_api': db_api,
'store_utils': store_utils}
super(ImageMemberFactoryProxy, self).__init__(
member_factory,
image_proxy_class=ImageProxy,
image_proxy_kwargs=proxy_kwargs)
def _enforce_image_member_quota(self, image):
if CONF.image_member_quota < 0:
# If value is negative, allow unlimited number of members
return
current_member_count = self.db_api.image_member_count(self.context,
image.image_id)
attempted = current_member_count + 1
maximum = CONF.image_member_quota
if attempted > maximum:
raise exception.ImageMemberLimitExceeded(attempted=attempted,
maximum=maximum)
def new_image_member(self, image, member_id):
self._enforce_image_member_quota(image)
return super(ImageMemberFactoryProxy, self).new_image_member(image,
member_id)
class QuotaImageLocationsProxy(object):
def __init__(self, image, context, db_api):
self.image = image
self.context = context
self.db_api = db_api
self.locations = image.locations
def __cast__(self, *args, **kwargs):
return self.locations.__cast__(*args, **kwargs)
def __contains__(self, *args, **kwargs):
return self.locations.__contains__(*args, **kwargs)
def __delitem__(self, *args, **kwargs):
return self.locations.__delitem__(*args, **kwargs)
def __delslice__(self, *args, **kwargs):
return self.locations.__delslice__(*args, **kwargs)
def __eq__(self, other):
return self.locations == other
def __getitem__(self, *args, **kwargs):
return self.locations.__getitem__(*args, **kwargs)
def __iadd__(self, other):
if not hasattr(other, '__iter__'):
raise TypeError()
self._check_user_storage_quota(other)
return self.locations.__iadd__(other)
def __iter__(self, *args, **kwargs):
return self.locations.__iter__(*args, **kwargs)
def __len__(self, *args, **kwargs):
return self.locations.__len__(*args, **kwargs)
def __setitem__(self, key, value):
return self.locations.__setitem__(key, value)
def count(self, *args, **kwargs):
return self.locations.count(*args, **kwargs)
def index(self, *args, **kwargs):
return self.locations.index(*args, **kwargs)
def pop(self, *args, **kwargs):
return self.locations.pop(*args, **kwargs)
def remove(self, *args, **kwargs):
return self.locations.remove(*args, **kwargs)
def reverse(self, *args, **kwargs):
return self.locations.reverse(*args, **kwargs)
def _check_user_storage_quota(self, locations):
required_size = _calc_required_size(self.context,
self.image,
locations)
daisy.api.common.check_quota(self.context,
required_size,
self.db_api)
_enforce_image_location_quota(self.image, locations)
def __copy__(self):
return type(self)(self.image, self.context, self.db_api)
def __deepcopy__(self, memo):
# NOTE(zhiyan): Only copy location entries, others can be reused.
self.image.locations = copy.deepcopy(self.locations, memo)
return type(self)(self.image, self.context, self.db_api)
def append(self, object):
self._check_user_storage_quota([object])
return self.locations.append(object)
def insert(self, index, object):
self._check_user_storage_quota([object])
return self.locations.insert(index, object)
def extend(self, iter):
self._check_user_storage_quota(iter)
return self.locations.extend(iter)
class ImageProxy(daisy.domain.proxy.Image):
def __init__(self, image, context, db_api, store_utils):
self.image = image
self.context = context
self.db_api = db_api
self.store_utils = store_utils
super(ImageProxy, self).__init__(image)
self.orig_props = set(image.extra_properties.keys())
def set_data(self, data, size=None):
remaining = daisy.api.common.check_quota(
self.context, size, self.db_api, image_id=self.image.image_id)
if remaining is not None:
# NOTE(jbresnah) we are trying to enforce a quota, put a limit
# reader on the data
data = utils.LimitingReader(data, remaining)
try:
self.image.set_data(data, size=size)
except exception.ImageSizeLimitExceeded:
raise exception.StorageQuotaFull(image_size=size,
remaining=remaining)
# NOTE(jbresnah) If two uploads happen at the same time and neither
# properly sets the size attribute[1] then there is a race condition
# that will allow for the quota to be broken[2]. Thus we must recheck
# the quota after the upload and thus after we know the size.
#
# Also, when an upload doesn't set the size properly then the call to
# check_quota above returns None and so utils.LimitingReader is not
# used above. Hence the store (e.g. filesystem store) may have to
# download the entire file before knowing the actual file size. Here
# also we need to check for the quota again after the image has been
# downloaded to the store.
#
# [1] For e.g. when using chunked transfers the 'Content-Length'
# header is not set.
# [2] For e.g.:
# - Upload 1 does not exceed quota but upload 2 exceeds quota.
# Both uploads are to different locations
# - Upload 2 completes before upload 1 and writes image.size.
# - Immediately, upload 1 completes and (over)writes image.size
# with the smaller size.
# - Now, to glance, image has not exceeded quota but, in
# reality, the quota has been exceeded.
try:
daisy.api.common.check_quota(
self.context, self.image.size, self.db_api,
image_id=self.image.image_id)
except exception.StorageQuotaFull:
with excutils.save_and_reraise_exception():
LOG.info(_LI('Cleaning up %s after exceeding the quota.')
% self.image.image_id)
self.store_utils.safe_delete_from_backend(
self.context, self.image.image_id, self.image.locations[0])
@property
def tags(self):
return QuotaImageTagsProxy(self.image.tags)
@tags.setter
def tags(self, value):
_enforce_image_tag_quota(value)
self.image.tags = value
@property
def locations(self):
return QuotaImageLocationsProxy(self.image,
self.context,
self.db_api)
@locations.setter
def locations(self, value):
_enforce_image_location_quota(self.image, value, is_setter=True)
if not isinstance(value, (list, QuotaImageLocationsProxy)):
raise exception.Invalid(_('Invalid locations: %s') % value)
required_size = _calc_required_size(self.context,
self.image,
value)
daisy.api.common.check_quota(
self.context, required_size, self.db_api,
image_id=self.image.image_id)
self.image.locations = value
def added_new_properties(self):
current_props = set(self.image.extra_properties.keys())
return bool(current_props.difference(self.orig_props))

View File

@ -1,650 +0,0 @@
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import calendar
import os
import time
import eventlet
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_log import log as logging
import six
from daisy.common import crypt
from daisy.common import exception
from daisy.common import utils
from daisy import context
import daisy.db as db_api
from daisy import i18n
import daisy.registry.client.v1.api as registry
LOG = logging.getLogger(__name__)
_ = i18n._
_LI = i18n._LI
_LW = i18n._LW
_LE = i18n._LE
scrubber_opts = [
cfg.StrOpt('scrubber_datadir',
default='/var/lib/daisy/scrubber',
help=_('Directory that the scrubber will use to track '
'information about what to delete. '
'Make sure this is set in daisy-api.conf and '
'daisy-scrubber.conf.')),
cfg.IntOpt('scrub_time', default=0,
help=_('The amount of time in seconds to delay before '
'performing a delete.')),
cfg.BoolOpt('cleanup_scrubber', default=False,
help=_('A boolean that determines if the scrubber should '
'clean up the files it uses for taking data. Only '
'one server in your deployment should be designated '
'the cleanup host.')),
cfg.BoolOpt('delayed_delete', default=False,
help=_('Turn on/off delayed delete.')),
cfg.IntOpt('cleanup_scrubber_time', default=86400,
help=_('Items must have a modified time that is older than '
'this value in order to be candidates for cleanup.'))
]
scrubber_cmd_opts = [
cfg.IntOpt('wakeup_time', default=300,
help=_('Loop time between checking for new '
'items to schedule for delete.'))
]
scrubber_cmd_cli_opts = [
cfg.BoolOpt('daemon',
short='D',
default=False,
help=_('Run as a long-running process. When not '
'specified (the default) run the scrub operation '
'once and then exits. When specified do not exit '
'and run scrub on wakeup_time interval as '
'specified in the config.'))
]
CONF = cfg.CONF
CONF.register_opts(scrubber_opts)
CONF.import_opt('metadata_encryption_key', 'daisy.common.config')
class ScrubQueue(object):
"""Image scrub queue base class.
The queue contains image's location which need to delete from backend.
"""
def __init__(self):
self.scrub_time = CONF.scrub_time
self.metadata_encryption_key = CONF.metadata_encryption_key
registry.configure_registry_client()
registry.configure_registry_admin_creds()
self.registry = registry.get_registry_client(context.RequestContext())
@abc.abstractmethod
def add_location(self, image_id, location, user_context=None):
"""Adding image location to scrub queue.
:param image_id: The opaque image identifier
:param location: The opaque image location
:param user_context: The user's request context
:retval A boolean value to indicate success or not
"""
pass
@abc.abstractmethod
def get_all_locations(self):
"""Returns a list of image id and location tuple from scrub queue.
:retval a list of image id and location tuple from scrub queue
"""
pass
@abc.abstractmethod
def pop_all_locations(self):
"""Pop out a list of image id and location tuple from scrub queue.
:retval a list of image id and location tuple from scrub queue
"""
pass
@abc.abstractmethod
def has_image(self, image_id):
"""Returns whether the queue contains an image or not.
:param image_id: The opaque image identifier
:retval a boolean value to inform including or not
"""
pass
class ScrubFileQueue(ScrubQueue):
"""File-based image scrub queue class."""
def __init__(self):
super(ScrubFileQueue, self).__init__()
self.scrubber_datadir = CONF.scrubber_datadir
utils.safe_mkdirs(self.scrubber_datadir)
def _read_queue_file(self, file_path):
"""Reading queue file to loading deleted location and timestamp out.
:param file_path: Queue file full path
:retval a list of image location id, uri and timestamp tuple
"""
loc_ids = []
uris = []
delete_times = []
try:
with open(file_path, 'r') as f:
while True:
loc_id = f.readline().strip()
if loc_id:
lid = six.text_type(loc_id)
loc_ids.append(int(lid) if lid.isdigit() else lid)
uris.append(unicode(f.readline().strip()))
delete_times.append(int(f.readline().strip()))
else:
break
return loc_ids, uris, delete_times
except Exception:
LOG.error(_LE("%s file can not be read.") % file_path)
def _update_queue_file(self, file_path, remove_record_idxs):
"""Updating queue file to remove such queue records.
:param file_path: Queue file full path
:param remove_record_idxs: A list of record index those want to remove
"""
try:
with open(file_path, 'r') as f:
lines = f.readlines()
# NOTE(zhiyan) we need bottom up removing to
# keep record index be valid.
remove_record_idxs.sort(reverse=True)
for record_idx in remove_record_idxs:
# Each record has three lines:
# location id, uri and delete time.
line_no = (record_idx + 1) * 3 - 1
del lines[line_no:line_no + 3]
with open(file_path, 'w') as f:
f.write(''.join(lines))
os.chmod(file_path, 0o600)
except Exception:
LOG.error(_LE("%s file can not be wrote.") % file_path)
def add_location(self, image_id, location, user_context=None):
"""Adding image location to scrub queue.
:param image_id: The opaque image identifier
:param location: The opaque image location
:param user_context: The user's request context
:retval A boolean value to indicate success or not
"""
if user_context is not None:
registry_client = registry.get_registry_client(user_context)
else:
registry_client = self.registry
with lockutils.lock("scrubber-%s" % image_id,
lock_file_prefix='daisy-', external=True):
# NOTE(zhiyan): make sure scrubber does not cleanup
# 'pending_delete' images concurrently before the code
# get lock and reach here.
try:
image = registry_client.get_image(image_id)
if image['status'] == 'deleted':
return True
except exception.NotFound as e:
LOG.warn(_LW("Failed to find image to delete: %s"),
utils.exception_to_str(e))
return False
loc_id = location.get('id', '-')
if self.metadata_encryption_key:
uri = crypt.urlsafe_encrypt(self.metadata_encryption_key,
location['url'], 64)
else:
uri = location['url']
delete_time = time.time() + self.scrub_time
file_path = os.path.join(self.scrubber_datadir, str(image_id))
if os.path.exists(file_path):
# Append the uri of location to the queue file
with open(file_path, 'a') as f:
f.write('\n')
f.write('\n'.join([str(loc_id),
uri,
str(int(delete_time))]))
else:
# NOTE(zhiyan): Protect the file before we write any data.
open(file_path, 'w').close()
os.chmod(file_path, 0o600)
with open(file_path, 'w') as f:
f.write('\n'.join([str(loc_id),
uri,
str(int(delete_time))]))
os.utime(file_path, (delete_time, delete_time))
return True
def _walk_all_locations(self, remove=False):
"""Returns a list of image id and location tuple from scrub queue.
:param remove: Whether remove location from queue or not after walk
:retval a list of image id, location id and uri tuple from scrub queue
"""
if not os.path.exists(self.scrubber_datadir):
LOG.warn(_LW("%s directory does not exist.") %
self.scrubber_datadir)
return []
ret = []
for root, dirs, files in os.walk(self.scrubber_datadir):
for image_id in files:
if not utils.is_uuid_like(image_id):
continue
with lockutils.lock("scrubber-%s" % image_id,
lock_file_prefix='daisy-', external=True):
file_path = os.path.join(self.scrubber_datadir, image_id)
records = self._read_queue_file(file_path)
loc_ids, uris, delete_times = records
remove_record_idxs = []
skipped = False
for (record_idx, delete_time) in enumerate(delete_times):
if delete_time > time.time():
skipped = True
continue
else:
ret.append((image_id,
loc_ids[record_idx],
uris[record_idx]))
remove_record_idxs.append(record_idx)
if remove:
if skipped:
# NOTE(zhiyan): remove location records from
# the queue file.
self._update_queue_file(file_path,
remove_record_idxs)
else:
utils.safe_remove(file_path)
return ret
def get_all_locations(self):
"""Returns a list of image id and location tuple from scrub queue.
:retval a list of image id and location tuple from scrub queue
"""
return self._walk_all_locations()
def pop_all_locations(self):
"""Pop out a list of image id and location tuple from scrub queue.
:retval a list of image id and location tuple from scrub queue
"""
return self._walk_all_locations(remove=True)
def has_image(self, image_id):
"""Returns whether the queue contains an image or not.
:param image_id: The opaque image identifier
:retval a boolean value to inform including or not
"""
return os.path.exists(os.path.join(self.scrubber_datadir,
str(image_id)))
class ScrubDBQueue(ScrubQueue):
"""Database-based image scrub queue class."""
def __init__(self):
super(ScrubDBQueue, self).__init__()
admin_tenant_name = CONF.admin_tenant_name
admin_token = self.registry.auth_token
self.admin_context = context.RequestContext(user=CONF.admin_user,
tenant=admin_tenant_name,
auth_token=admin_token)
def add_location(self, image_id, location, user_context=None):
"""Adding image location to scrub queue.
:param image_id: The opaque image identifier
:param location: The opaque image location
:param user_context: The user's request context
:retval A boolean value to indicate success or not
"""
loc_id = location.get('id')
if loc_id:
db_api.get_api().image_location_delete(self.admin_context,
image_id, loc_id,
'pending_delete')
return True
else:
return False
def _get_images_page(self, marker):
filters = {'deleted': True,
'is_public': 'none',
'status': 'pending_delete'}
if marker:
return self.registry.get_images_detailed(filters=filters,
marker=marker)
else:
return self.registry.get_images_detailed(filters=filters)
def _get_all_images(self):
"""Generator to fetch all appropriate images, paging as needed."""
marker = None
while True:
images = self._get_images_page(marker)
if len(images) == 0:
break
marker = images[-1]['id']
for image in images:
yield image
def _walk_all_locations(self, remove=False):
"""Returns a list of image id and location tuple from scrub queue.
:param remove: Whether remove location from queue or not after walk
:retval a list of image id, location id and uri tuple from scrub queue
"""
ret = []
for image in self._get_all_images():
deleted_at = image.get('deleted_at')
if not deleted_at:
continue
# NOTE: Strip off microseconds which may occur after the last '.,'
# Example: 2012-07-07T19:14:34.974216
date_str = deleted_at.rsplit('.', 1)[0].rsplit(',', 1)[0]
delete_time = calendar.timegm(time.strptime(date_str,
"%Y-%m-%dT%H:%M:%S"))
if delete_time + self.scrub_time > time.time():
continue
for loc in image['location_data']:
if loc['status'] != 'pending_delete':
continue
if self.metadata_encryption_key:
uri = crypt.urlsafe_encrypt(self.metadata_encryption_key,
loc['url'], 64)
else:
uri = loc['url']
ret.append((image['id'], loc['id'], uri))
if remove:
db_api.get_api().image_location_delete(self.admin_context,
image['id'],
loc['id'],
'deleted')
self.registry.update_image(image['id'],
{'status': 'deleted'})
return ret
def get_all_locations(self):
"""Returns a list of image id and location tuple from scrub queue.
:retval a list of image id and location tuple from scrub queue
"""
return self._walk_all_locations()
def pop_all_locations(self):
"""Pop out a list of image id and location tuple from scrub queue.
:retval a list of image id and location tuple from scrub queue
"""
return self._walk_all_locations(remove=True)
def has_image(self, image_id):
"""Returns whether the queue contains an image or not.
:param image_id: The opaque image identifier
:retval a boolean value to inform including or not
"""
try:
image = self.registry.get_image(image_id)
return image['status'] == 'pending_delete'
except exception.NotFound:
return False
_file_queue = None
_db_queue = None
def get_scrub_queues():
global _file_queue, _db_queue
if not _file_queue:
_file_queue = ScrubFileQueue()
if not _db_queue:
_db_queue = ScrubDBQueue()
return (_file_queue, _db_queue)
class Daemon(object):
def __init__(self, wakeup_time=300, threads=1000):
LOG.info(_LI("Starting Daemon: wakeup_time=%(wakeup_time)s "
"threads=%(threads)s"),
{'wakeup_time': wakeup_time, 'threads': threads})
self.wakeup_time = wakeup_time
self.event = eventlet.event.Event()
self.pool = eventlet.greenpool.GreenPool(threads)
def start(self, application):
self._run(application)
def wait(self):
try:
self.event.wait()
except KeyboardInterrupt:
msg = _LI("Daemon Shutdown on KeyboardInterrupt")
LOG.info(msg)
def _run(self, application):
LOG.debug("Running application")
self.pool.spawn_n(application.run, self.pool, self.event)
eventlet.spawn_after(self.wakeup_time, self._run, application)
LOG.debug("Next run scheduled in %s seconds" % self.wakeup_time)
class Scrubber(object):
def __init__(self, store_api):
LOG.info(_LI("Initializing scrubber with configuration: %s") %
six.text_type({'scrubber_datadir': CONF.scrubber_datadir,
'cleanup': CONF.cleanup_scrubber,
'cleanup_time': CONF.cleanup_scrubber_time,
'registry_host': CONF.registry_host,
'registry_port': CONF.registry_port}))
utils.safe_mkdirs(CONF.scrubber_datadir)
self.store_api = store_api
registry.configure_registry_client()
registry.configure_registry_admin_creds()
self.registry = registry.get_registry_client(context.RequestContext())
# Here we create a request context with credentials to support
# delayed delete when using multi-tenant backend storage
admin_tenant = CONF.admin_tenant_name
auth_token = self.registry.auth_token
self.admin_context = context.RequestContext(user=CONF.admin_user,
tenant=admin_tenant,
auth_token=auth_token)
(self.file_queue, self.db_queue) = get_scrub_queues()
def _get_delete_jobs(self, queue, pop):
try:
if pop:
records = queue.pop_all_locations()
else:
records = queue.get_all_locations()
except Exception as err:
LOG.error(_LE("Can not %(op)s scrub jobs from queue: %(err)s") %
{'op': 'pop' if pop else 'get',
'err': utils.exception_to_str(err)})
return {}
delete_jobs = {}
for image_id, loc_id, loc_uri in records:
if image_id not in delete_jobs:
delete_jobs[image_id] = []
delete_jobs[image_id].append((image_id, loc_id, loc_uri))
return delete_jobs
def _merge_delete_jobs(self, file_jobs, db_jobs):
ret = {}
for image_id, file_job_items in file_jobs.iteritems():
ret[image_id] = file_job_items
db_job_items = db_jobs.get(image_id, [])
for db_item in db_job_items:
if db_item not in file_job_items:
ret[image_id].append(db_item)
for image_id, db_job_items in db_jobs.iteritems():
if image_id not in ret:
ret[image_id] = db_job_items
return ret
def run(self, pool, event=None):
file_jobs = self._get_delete_jobs(self.file_queue, True)
db_jobs = self._get_delete_jobs(self.db_queue, False)
delete_jobs = self._merge_delete_jobs(file_jobs, db_jobs)
if delete_jobs:
for image_id, jobs in six.iteritems(delete_jobs):
self._scrub_image(pool, image_id, jobs)
if CONF.cleanup_scrubber:
self._cleanup(pool)
def _scrub_image(self, pool, image_id, delete_jobs):
if len(delete_jobs) == 0:
return
LOG.info(_LI("Scrubbing image %(id)s from %(count)d locations.") %
{'id': image_id, 'count': len(delete_jobs)})
# NOTE(bourke): The starmap must be iterated to do work
list(pool.starmap(self._delete_image_location_from_backend,
delete_jobs))
image = self.registry.get_image(image_id)
if (image['status'] == 'pending_delete' and
not self.file_queue.has_image(image_id)):
self.registry.update_image(image_id, {'status': 'deleted'})
def _delete_image_location_from_backend(self, image_id, loc_id, uri):
if CONF.metadata_encryption_key:
uri = crypt.urlsafe_decrypt(CONF.metadata_encryption_key, uri)
try:
LOG.debug("Deleting URI from image %s." % image_id)
self.store_api.delete_from_backend(uri, self.admin_context)
if loc_id != '-':
db_api.get_api().image_location_delete(self.admin_context,
image_id,
int(loc_id),
'deleted')
LOG.info(_LI("Image %s has been deleted.") % image_id)
except Exception:
LOG.warn(_LW("Unable to delete URI from image %s.") % image_id)
def _read_cleanup_file(self, file_path):
"""Reading cleanup to get latest cleanup timestamp.
:param file_path: Cleanup status file full path
:retval latest cleanup timestamp
"""
try:
if not os.path.exists(file_path):
msg = _("%s file is not exists.") % six.text_type(file_path)
raise Exception(msg)
atime = int(os.path.getatime(file_path))
mtime = int(os.path.getmtime(file_path))
if atime != mtime:
msg = _("%s file contains conflicting cleanup "
"timestamp.") % six.text_type(file_path)
raise Exception(msg)
return atime
except Exception as e:
LOG.error(utils.exception_to_str(e))
return None
def _update_cleanup_file(self, file_path, cleanup_time):
"""Update latest cleanup timestamp to cleanup file.
:param file_path: Cleanup status file full path
:param cleanup_time: The Latest cleanup timestamp
"""
try:
open(file_path, 'w').close()
os.chmod(file_path, 0o600)
os.utime(file_path, (cleanup_time, cleanup_time))
except Exception:
LOG.error(_LE("%s file can not be created.") %
six.text_type(file_path))
def _cleanup(self, pool):
now = time.time()
cleanup_file = os.path.join(CONF.scrubber_datadir, ".cleanup")
if not os.path.exists(cleanup_file):
self._update_cleanup_file(cleanup_file, now)
return
last_cleanup_time = self._read_cleanup_file(cleanup_file)
cleanup_time = last_cleanup_time + CONF.cleanup_scrubber_time
if cleanup_time > now:
return
LOG.info(_LI("Getting images deleted before %s") %
CONF.cleanup_scrubber_time)
self._update_cleanup_file(cleanup_file, now)
delete_jobs = self._get_delete_jobs(self.db_queue, False)
if not delete_jobs:
return
for image_id, jobs in six.iteritems(delete_jobs):
with lockutils.lock("scrubber-%s" % image_id,
lock_file_prefix='daisy-', external=True):
if not self.file_queue.has_image(image_id):
# NOTE(zhiyan): scrubber should not cleanup this image
# since a queue file be created for this 'pending_delete'
# image concurrently before the code get lock and
# reach here. The checking only be worth if daisy-api and
# daisy-scrubber service be deployed on a same host.
self._scrub_image(pool, image_id, jobs)

25
code/daisy/doc/source/conf.py Executable file → Normal file
View File

@ -115,30 +115,6 @@ modindex_common_prefix = ['glance.']
# Grouping the document tree for man pages.
# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual'
man_pages = [
('man/glanceapi', 'glance-api', u'Glance API Server',
[u'OpenStack'], 1),
('man/glancecachecleaner', 'glance-cache-cleaner', u'Glance Cache Cleaner',
[u'OpenStack'], 1),
('man/glancecachemanage', 'glance-cache-manage', u'Glance Cache Manager',
[u'OpenStack'], 1),
('man/glancecacheprefetcher', 'glance-cache-prefetcher',
u'Glance Cache Pre-fetcher', [u'OpenStack'], 1),
('man/glancecachepruner', 'glance-cache-pruner', u'Glance Cache Pruner',
[u'OpenStack'], 1),
('man/glancecontrol', 'glance-control', u'Glance Daemon Control Helper ',
[u'OpenStack'], 1),
('man/glancemanage', 'glance-manage', u'Glance Management Utility',
[u'OpenStack'], 1),
('man/glanceregistry', 'glance-registry', u'Glance Registry Server',
[u'OpenStack'], 1),
('man/glancereplicator', 'glance-replicator', u'Glance Replicator',
[u'OpenStack'], 1),
('man/glancescrubber', 'glance-scrubber', u'Glance Scrubber Service',
[u'OpenStack'], 1)
]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
@ -248,3 +224,4 @@ latex_documents = [
# If false, no module index is generated.
# latex_use_modindex = True

View File

@ -1,58 +0,0 @@
==============
daisy-control
==============
--------------------------------------
daisy daemon start/stop/reload helper
--------------------------------------
:Author: daisy@lists.launchpad.net
:Date: 2014-01-16
:Copyright: OpenStack LLC
:Version: 2014.1
:Manual section: 1
:Manual group: cloud computing
SYNOPSIS
========
daisy-control [options] <SERVER> <COMMAND> [CONFPATH]
Where <SERVER> is one of:
all, api, daisy-api, registry, daisy-registry, scrubber, daisy-scrubber
And command is one of:
start, status, stop, shutdown, restart, reload, force-reload
And CONFPATH is the optional configuration file to use.
OPTIONS
========
**General Options**
.. include:: general_options.rst
**--pid-file=PATH**
File to use as pid file. Default:
/var/run/daisy/$server.pid
**--await-child DELAY**
Period to wait for service death in order to report
exit code (default is to not wait at all)
**--capture-output**
Capture stdout/err in syslog instead of discarding
**--nocapture-output**
The inverse of --capture-output
**--norespawn**
The inverse of --respawn
**--respawn**
Restart service on unexpected death
.. include:: footer.rst

View File

@ -1,63 +0,0 @@
===============
daisy-scrubber
===============
--------------------
daisy scrub service
--------------------
:Author: daisy@lists.launchpad.net
:Date: 2014-01-16
:Copyright: OpenStack LLC
:Version: 2014.1
:Manual section: 1
:Manual group: cloud computing
SYNOPSIS
========
daisy-scrubber [options]
DESCRIPTION
===========
daisy-scrubber is a utility that cleans up images that have been deleted. The
mechanics of this differ depending on the backend store and pending_deletion
options chosen.
Multiple daisy-scrubbers can be run in a single deployment, but only one of
them may be designated as the 'cleanup_scrubber' in the daisy-scrubber.conf
file. The 'cleanup_scrubber' coordinates other daisy-scrubbers by maintaining
the master queue of images that need to be removed.
The daisy-scubber.conf file also specifies important configuration items such
as the time between runs ('wakeup_time' in seconds), length of time images
can be pending before their deletion ('cleanup_scrubber_time' in seconds) as
well as registry connectivity options.
daisy-scrubber can run as a periodic job or long-running daemon.
OPTIONS
=======
**General options**
.. include:: general_options.rst
**-D, --daemon**
Run as a long-running process. When not specified (the
default) run the scrub operation once and then exits.
When specified do not exit and run scrub on
wakeup_time interval as specified in the config.
**--nodaemon**
The inverse of --daemon. Runs the scrub operation once and
then exits. This is the default.
FILES
======
**/etc/daisy/daisy-scrubber.conf**
Default configuration file for the daisy Scrubber
.. include:: footer.rst

View File

@ -278,10 +278,6 @@ delayed_delete = False
# Delayed delete time in seconds
scrub_time = 43200
# Directory that the scrubber will use to remind itself of what to delete
# Make sure this is also set in glance-scrubber.conf
scrubber_datadir = /var/lib/glance/scrubber
# =============== Quota Options ==================================
# The maximum number of image members allowed per image
@ -429,14 +425,6 @@ image_cache_dir = /var/lib/glance/image-cache/
# Deprecated group/name - [DEFAULT]/disable_process_locking
#disable_process_locking = false
# Directory to use for lock files. For security, the specified
# directory should only be writable by the user running the processes
# that need locking. It could be read from environment variable
# OSLO_LOCK_PATH. This setting needs to be the same for both
# glance-scrubber and glance-api service. Default to a temp directory.
# Deprecated group/name - [DEFAULT]/lock_path (string value)
#lock_path = /tmp
[keystone_authtoken]
identity_uri = http://127.0.0.1:35357
admin_tenant_name = %SERVICE_TENANT_NAME%

View File

@ -1,132 +0,0 @@
[DEFAULT]
# Show more verbose log output (sets INFO log level output)
#verbose = False
# Show debugging output in logs (sets DEBUG log level output)
#debug = False
# Log to this file. Make sure you do not set the same log file for both the API
# and registry servers!
#
# If `log_file` is omitted and `use_syslog` is false, then log messages are
# sent to stdout as a fallback.
log_file = /var/log/daisy/scrubber.log
# Send logs to syslog (/dev/log) instead of to file specified by `log_file`
#use_syslog = False
# Should we run our own loop or rely on cron/scheduler to run us
daemon = False
# Loop time between checking for new items to schedule for delete
wakeup_time = 300
# Directory that the scrubber will use to remind itself of what to delete
# Make sure this is also set in glance-api.conf
scrubber_datadir = /var/lib/daisy/scrubber
# Only one server in your deployment should be designated the cleanup host
cleanup_scrubber = False
# pending_delete items older than this time are candidates for cleanup
cleanup_scrubber_time = 86400
# Address to find the registry server for cleanups
registry_host = 0.0.0.0
# Port the registry server is listening on
registry_port = 9191
# Auth settings if using Keystone
# auth_url = http://127.0.0.1:5000/v2.0/
# admin_tenant_name = %SERVICE_TENANT_NAME%
# admin_user = %SERVICE_USER%
# admin_password = %SERVICE_PASSWORD%
# API to use for accessing data. Default value points to sqlalchemy
# package, it is also possible to use: glance.db.registry.api
#data_api = glance.db.sqlalchemy.api
# ================= Security Options ==========================
# AES key for encrypting store 'location' metadata, including
# -- if used -- Swift or S3 credentials
# Should be set to a random string of length 16, 24 or 32 bytes
#metadata_encryption_key = <16, 24 or 32 char registry metadata key>
# =============== Policy Options ==============================
# The JSON file that defines policies.
#policy_file = policy.json
# Default rule. Enforced when a requested rule is not found.
#policy_default_rule = default
# Directories where policy configuration files are stored.
# They can be relative to any directory in the search path
# defined by the config_dir option, or absolute paths.
# The file defined by policy_file must exist for these
# directories to be searched.
#policy_dirs = policy.d
# ================= Database Options ===============+==========
[database]
# The SQLAlchemy connection string used to connect to the
# database (string value)
#connection=sqlite:////glance/openstack/common/db/$sqlite_db
# The SQLAlchemy connection string used to connect to the
# slave database (string value)
#slave_connection=
# timeout before idle sql connections are reaped (integer
# value)
#idle_timeout=3600
# Minimum number of SQL connections to keep open in a pool
# (integer value)
#min_pool_size=1
# Maximum number of SQL connections to keep open in a pool
# (integer value)
#max_pool_size=<None>
# maximum db connection retries during startup. (setting -1
# implies an infinite retry count) (integer value)
#max_retries=10
# interval between retries of opening a sql connection
# (integer value)
#retry_interval=10
# If set, use this value for max_overflow with sqlalchemy
# (integer value)
#max_overflow=<None>
# Verbosity of SQL debugging information. 0=None,
# 100=Everything (integer value)
#connection_debug=0
# Add python stack traces to SQL as comment strings (boolean
# value)
#connection_trace=false
# If set, use this value for pool_timeout with sqlalchemy
# (integer value)
#pool_timeout=<None>
[oslo_concurrency]
# Enables or disables inter-process locks. (boolean value)
# Deprecated group/name - [DEFAULT]/disable_process_locking
#disable_process_locking = false
# Directory to use for lock files. For security, the specified
# directory should only be writable by the user running the processes
# that need locking. It could be read from environment variable
# OSLO_LOCK_PATH. This setting needs to be the same for both
# glance-scrubber and glance-api service. Default to a temp directory.
# Deprecated group/name - [DEFAULT]/lock_path (string value)
#lock_path = /tmp

View File

@ -1,8 +0,0 @@
[DEFAULT]
output_file = etc/daisy-scrubber.conf.sample
namespace = daisy.scrubber
namespace = oslo_concurrency
namespace = oslo_db
namespace = oslo_db.concurrency
namespace = oslo_log
namespace = oslo_policy

View File

@ -27,22 +27,18 @@ console_scripts =
daisy-cache-pruner = daisy.cmd.cache_pruner:main
daisy-cache-manage = daisy.cmd.cache_manage:main
daisy-cache-cleaner = daisy.cmd.cache_cleaner:main
daisy-control = daisy.cmd.control:main
daisy-search = daisy.cmd.search:main
daisy-index = daisy.cmd.index:main
daisy-manage = daisy.cmd.manage:main
daisy-registry = daisy.cmd.registry:main
daisy-replicator = daisy.cmd.replicator:main
daisy-scrubber = daisy.cmd.scrubber:main
daisy-orchestration = daisy.cmd.orchestration:main
daisy.common.image_location_strategy.modules =
location_order_strategy = daisy.common.location_strategy.location_order
store_type_strategy = daisy.common.location_strategy.store_type
oslo_config.opts =
daisy.api = daisy.opts:list_api_opts
daisy.registry = daisy.opts:list_registry_opts
daisy.scrubber = daisy.opts:list_scrubber_opts
daisy.cache= daisy.opts:list_cache_opts
daisy.manage = daisy.opts:list_manage_opts
daisy.database.migration_backend =

View File

@ -32,7 +32,6 @@ commands = {posargs}
commands =
oslo-config-generator --config-file etc/oslo-config-generator/daisy-api.conf
oslo-config-generator --config-file etc/oslo-config-generator/daisy-registry.conf
oslo-config-generator --config-file etc/oslo-config-generator/daisy-scrubber.conf
oslo-config-generator --config-file etc/oslo-config-generator/daisy-cache.conf
oslo-config-generator --config-file etc/oslo-config-generator/daisy-manage.conf
oslo-config-generator --config-file etc/oslo-config-generator/daisy-search.conf

View File

@ -1,15 +0,0 @@
Metadata-Version: 1.1
Name: ironic-discoverd
Version: 1.0.2
Summary: Hardware introspection for OpenStack Ironic
Home-page: https://pypi.python.org/pypi/ironic-discoverd
Author: Dmitry Tantsur
Author-email: dtantsur@redhat.com
License: APL 2.0
Description: UNKNOWN
Platform: UNKNOWN
Classifier: Development Status :: 5 - Production/Stable
Classifier: Environment :: OpenStack
Classifier: Intended Audience :: System Administrators
Classifier: License :: OSI Approved :: Apache Software License
Classifier: Operating System :: POSIX

View File

@ -1,15 +0,0 @@
Metadata-Version: 1.1
Name: ironic-discoverd
Version: 1.0.2
Summary: Hardware introspection for OpenStack Ironic
Home-page: https://pypi.python.org/pypi/ironic-discoverd
Author: Dmitry Tantsur
Author-email: dtantsur@redhat.com
License: APL 2.0
Description: UNKNOWN
Platform: UNKNOWN
Classifier: Development Status :: 5 - Production/Stable
Classifier: Environment :: OpenStack
Classifier: Intended Audience :: System Administrators
Classifier: License :: OSI Approved :: Apache Software License
Classifier: Operating System :: POSIX

View File

@ -1,52 +0,0 @@
.gitignore
.gitreview
CONTRIBUTING.rst
LICENSE
MANIFEST.in
README.rst
example.conf
ironic-discoverd.8
requirements.txt
setup.py
test-requirements.txt
tox.ini
functest/run.py
functest/env/cpuinfo.txt
functest/env/dmidecode
functest/env/fdisk
functest/env/get_kernel_parameter
functest/env/ip
functest/env/ipmitool
functest/env/lscpu
functest/env/modprobe
functest/env/poweroff
functest/env/sleep
functest/env/troubleshoot
ironic_discoverd/__init__.py
ironic_discoverd/client.py
ironic_discoverd/conf.py
ironic_discoverd/firewall.py
ironic_discoverd/introspect.py
ironic_discoverd/main.py
ironic_discoverd/node_cache.py
ironic_discoverd/process.py
ironic_discoverd/utils.py
ironic_discoverd.egg-info/PKG-INFO
ironic_discoverd.egg-info/SOURCES.txt
ironic_discoverd.egg-info/dependency_links.txt
ironic_discoverd.egg-info/entry_points.txt
ironic_discoverd.egg-info/pbr.json
ironic_discoverd.egg-info/requires.txt
ironic_discoverd.egg-info/top_level.txt
ironic_discoverd/plugins/__init__.py
ironic_discoverd/plugins/base.py
ironic_discoverd/plugins/example.py
ironic_discoverd/plugins/standard.py
ironic_discoverd/test/__init__.py
ironic_discoverd/test/base.py
ironic_discoverd/test/test_client.py
ironic_discoverd/test/test_introspect.py
ironic_discoverd/test/test_main.py
ironic_discoverd/test/test_node_cache.py
ironic_discoverd/test/test_process.py
ironic_discoverd/test/test_utils.py

View File

@ -1,9 +0,0 @@
[ironic_discoverd.hooks]
validate_interfaces = ironic_discoverd.plugins.standard:ValidateInterfacesHook
ramdisk_error = ironic_discoverd.plugins.standard:RamdiskErrorHook
scheduler = ironic_discoverd.plugins.standard:SchedulerHook
example = ironic_discoverd.plugins.example:ExampleProcessingHook
[console_scripts]
ironic-discoverd = ironic_discoverd.main:main

View File

@ -1 +0,0 @@
{"is_release": false, "git_version": "280ade3"}

View File

@ -1,8 +0,0 @@
eventlet
Flask
keystonemiddleware
python-ironicclient
python-keystoneclient
requests
six
stevedore

View File

@ -1 +0,0 @@
ironic_discoverd

View File

@ -1,5 +1,8 @@
[egg_info]
tag_build =
tag_date = 0
tag_svn_revision = 0
[metadata]
name = daisy-discoveryd
summary = Daisy discovery agent
description-file =
README.rst
author = OpenStack
author-email = openstack-dev@lists.openstack.org
home-page = http://www.openstack.org/

View File

@ -15,13 +15,11 @@ Source0: http://launchpad.net/%{service}/%{release_name}/%{version}/+do
Source1: daisy-api.service
Source2: daisy-registry.service
Source3: daisy-scrubber.service
Source4: daisy.logrotate
Source5: daisy-api-dist.conf
Source6: daisy-registry-dist.conf
Source7: daisy-cache-dist.conf
Source8: daisy-scrubber-dist.conf
Source9: daisy-orchestration.service
Source10: daisy-orchestration.conf
@ -153,7 +151,6 @@ rm -rf {test-,}requirements.txt tools/{pip,test}-requires
api_dist=%{SOURCE5}
registry_dist=%{SOURCE6}
cache_dist=%{SOURCE7}
scrubber_dist=%{SOURCE8}
%build
%{__python2} setup.py build
@ -202,8 +199,6 @@ install -p -D -m 644 etc/daisy-registry-paste.ini %{buildroot}%{_datadir}/daisy/
install -p -D -m 644 etc/daisy-registry-paste.ini %{buildroot}%{_sysconfdir}/daisy/daisy-registry-paste.ini
install -p -D -m 640 etc/daisy-cache.conf %{buildroot}%{_sysconfdir}/daisy/daisy-cache.conf
install -p -D -m 644 %{SOURCE7} %{buildroot}%{_datadir}/daisy/daisy-cache-dist.conf
install -p -D -m 640 etc/daisy-scrubber.conf %{buildroot}%{_sysconfdir}/daisy/daisy-scrubber.conf
install -p -D -m 644 %{SOURCE8} %{buildroot}%{_datadir}/daisy/daisy-scrubber-dist.conf
install -p -D -m 640 etc/policy.json %{buildroot}%{_sysconfdir}/daisy/policy.json
install -p -D -m 640 etc/schema-image.json %{buildroot}%{_sysconfdir}/daisy/schema-image.json
@ -211,7 +206,6 @@ install -p -D -m 640 etc/schema-image.json %{buildroot}%{_sysconfdir}/daisy/sche
# systemd services
install -p -D -m 644 %{SOURCE1} %{buildroot}%{_unitdir}/daisy-api.service
install -p -D -m 644 %{SOURCE2} %{buildroot}%{_unitdir}/daisy-registry.service
install -p -D -m 644 %{SOURCE3} %{buildroot}%{_unitdir}/daisy-scrubber.service
install -p -D -m 644 %{SOURCE9} %{buildroot}%{_unitdir}/daisy-orchestration.service
# Logrotate config
@ -243,20 +237,17 @@ exit 0
# Initial installation
%systemd_post daisy-api.service
%systemd_post daisy-registry.service
%systemd_post daisy-scrubber.service
%systemd_post daisy-orchestration.service
%preun
%systemd_preun daisy-api.service
%systemd_preun daisy-registry.service
%systemd_preun daisy-scrubber.service
%systemd_preun daisy-orchestration.service
%postun
%systemd_postun_with_restart daisy-api.service
%systemd_postun_with_restart daisy-registry.service
%systemd_postun_with_restart daisy-scrubber.service
%systemd_postun_with_restart daisy-orchestration.service
if [ $1 -eq 0 ] ; then
@ -271,14 +262,12 @@ fi
/etc/daisy/daisy-registry-paste.ini
%doc README.rst
%{_bindir}/daisy-api
%{_bindir}/daisy-control
%{_bindir}/daisy-manage
%{_bindir}/daisy-registry
%{_bindir}/daisy-cache-cleaner
%{_bindir}/daisy-cache-manage
%{_bindir}/daisy-cache-prefetcher
%{_bindir}/daisy-cache-pruner
%{_bindir}/daisy-scrubber
%{_bindir}/daisy-replicator
%{_bindir}/daisy-index
%{_bindir}/daisy-search
@ -287,13 +276,11 @@ fi
%{_datadir}/daisy/daisy-api-dist.conf
%{_datadir}/daisy/daisy-registry-dist.conf
%{_datadir}/daisy/daisy-cache-dist.conf
%{_datadir}/daisy/daisy-scrubber-dist.conf
%{_datadir}/daisy/daisy-api-dist-paste.ini
%{_datadir}/daisy/daisy-registry-dist-paste.ini
%{_unitdir}/daisy-api.service
%{_unitdir}/daisy-registry.service
%{_unitdir}/daisy-scrubber.service
%{_unitdir}/daisy-orchestration.service
@ -305,7 +292,6 @@ fi
%config(noreplace) %attr(-, root, daisy) %{_sysconfdir}/daisy/daisy-registry.conf
%config(noreplace) %attr(-, root, daisy) %{_sysconfdir}/daisy/daisy-orchestration.conf
%config(noreplace) %attr(-, root, daisy) %{_sysconfdir}/daisy/daisy-cache.conf
%config(noreplace) %attr(-, root, daisy) %{_sysconfdir}/daisy/daisy-scrubber.conf
%config(noreplace) %attr(-, root, daisy) %{_sysconfdir}/daisy/policy.json
%config(noreplace) %attr(-, root, daisy) %{_sysconfdir}/daisy/schema-image.json
%config(noreplace) %attr(-, root, daisy) %{_sysconfdir}/logrotate.d/daisy

View File

@ -4,7 +4,6 @@ verbose = True
use_stderr = False
log_file = /var/log/daisy/api.log
filesystem_store_datadir = /var/lib/daisy/images/
scrubber_datadir = /var/lib/daisy/scrubber
image_cache_dir = /var/lib/daisy/image-cache/
[database]

View File

@ -1,6 +0,0 @@
[DEFAULT]
debug = False
verbose = True
log_file = /var/log/daisy/scrubber.log
scrubber_datadir = /var/lib/daisy/scrubber

View File

@ -1,15 +0,0 @@
[Unit]
Description=OpenStack Image Service deferred image deletion service
After=syslog.target network.target
[Service]
Type=notify
NotifyAccess=all
Restart=always
User=daisy
ExecStart=/usr/bin/daisy-scrubber
PrivateTmp=true
[Install]
WantedBy=multi-user.target