Adopt glance.store library in Glance

This commits removes the old `store` package from glance and adopts the
usage of the new glance.store library. The library was designed to
preserve backwards compatibility as much as possible. In fact, most of
the changes in this patch are related to function args ordering and not
function renames or workflow changes.

Some changes that are worth mentioning:

1. Glance store doesn't rely on a global config object. All config
options must be explicitly registered.

2. All store operations now accepted an optional context. This is a
fallout from the context not being required in the `Store` constructor
anymore.

3. Store drivers are behind a private package called `_drivers` and
they're not suppose to be accessed directly. Instead, functions like
`get_store_from_scheme` should be used.

4. Stores are disabled by default

5. All the store specific options are under the `glance_store` group.

DocImpact:
The old store related configuration options have been moved under the
`glance_store` section. However, the old options will go through a
deprecation path. That is, they'll still be read from the `DEFAULT`
section to give deployers enough time to update their config files.

In k-2, the deprecated options will be completely obsolete.

Closes-bug: #1291848
Implements-blueprint: create-store-package

Change-Id: Iaacc70993ad5da292b93de42bbecda73d53b19fd
This commit is contained in:
Flavio Percoco 2014-06-17 16:04:15 +02:00
parent 5a74548f46
commit f6e7992a68
79 changed files with 347 additions and 10381 deletions

View File

@ -15,10 +15,12 @@
import webob.exc
import glance_store as store
from glance.common import exception
import glance.openstack.common.log as logging
import glance.registry.client.v1.api as registry
import glance.store as store
LOG = logging.getLogger(__name__)
@ -74,10 +76,11 @@ class BaseController(object):
write_tenants.append(member['member_id'])
else:
read_tenants.append(member['member_id'])
store.set_acls(req.context, location_uri, public=public,
store.set_acls(location_uri, public=public,
read_tenants=read_tenants,
write_tenants=write_tenants)
except exception.UnknownScheme:
write_tenants=write_tenants,
context=req.context)
except store.UnknownScheme:
msg = _("Store for image_id not found: %s") % image_id
raise webob.exc.HTTPBadRequest(explanation=msg,
request=req,

View File

@ -20,6 +20,8 @@
import copy
import eventlet
import glance_store as store
import glance_store.location
from oslo.config import cfg
import six.moves.urllib.parse as urlparse
from webob.exc import HTTPBadRequest
@ -47,12 +49,6 @@ from glance.openstack.common import gettextutils
import glance.openstack.common.log as logging
from glance.openstack.common import strutils
import glance.registry.client.v1.api as registry
from glance.store import get_from_backend
from glance.store import get_known_schemes
from glance.store import get_size_from_backend
from glance.store import get_store_from_location
from glance.store import get_store_from_scheme
from glance.store import validate_location
LOG = logging.getLogger(__name__)
_LI = gettextutils._LI
@ -425,7 +421,7 @@ class Controller(controller.BaseController):
"""
if source:
pieces = urlparse.urlparse(source)
schemes = [scheme for scheme in get_known_schemes()
schemes = [scheme for scheme in store.get_known_schemes()
if scheme != 'file']
for scheme in schemes:
if pieces.scheme == scheme:
@ -451,8 +447,14 @@ class Controller(controller.BaseController):
@staticmethod
def _get_from_store(context, where, dest=None):
try:
image_data, image_size = get_from_backend(
context, where, dest=dest)
loc = glance_store.location.get_location_from_uri(where)
src_store = store.get_store_from_uri(where)
if dest is not None:
src_store.READ_CHUNKSIZE = dest.WRITE_CHUNKSIZE
image_data, image_size = src_store.get(loc, context=context)
except exception.NotFound as e:
raise HTTPNotFound(explanation=e.msg)
image_size = int(image_size) if image_size else None
@ -492,7 +494,6 @@ class Controller(controller.BaseController):
image_meta['location'])
image_iterator = utils.cooperative_iter(image_iterator)
image_meta['size'] = size or image_meta['size']
image_meta = redact_loc(image_meta)
return {
'image_iterator': image_iterator,
@ -513,9 +514,9 @@ class Controller(controller.BaseController):
:raises HTTPBadRequest if image metadata is not valid
"""
location = self._external_source(image_meta, req)
store = image_meta.get('store')
if store and store not in get_known_schemes():
msg = "Required store %s is invalid" % store
scheme = image_meta.get('store')
if scheme and scheme not in store.get_known_schemes():
msg = "Required store %s is invalid" % scheme
LOG.debug(msg)
raise HTTPBadRequest(explanation=msg,
content_type='text/plain')
@ -525,8 +526,8 @@ class Controller(controller.BaseController):
if location:
try:
store = get_store_from_location(location)
except exception.BadStoreUri:
backend = store.get_store_from_location(location)
except store.BadStoreUri:
msg = "Invalid location %s" % location
LOG.debug(msg)
raise HTTPBadRequest(explanation=msg,
@ -534,7 +535,7 @@ class Controller(controller.BaseController):
content_type="text/plain")
# check the store exists before we hit the registry, but we
# don't actually care what it is at this point
self.get_store_or_400(req, store)
self.get_store_or_400(req, backend)
# retrieve the image size from remote store (if not provided)
image_meta['size'] = self._get_size(req.context, image_meta,
@ -585,7 +586,9 @@ class Controller(controller.BaseController):
:retval The location where the image was stored
"""
scheme = req.headers.get('x-image-meta-store', CONF.default_store)
scheme = req.headers.get('x-image-meta-store',
CONF.glance_store.default_store)
store = self.get_store_or_400(req, scheme)
copy_from = self._copy_from(req)
@ -691,8 +694,8 @@ class Controller(controller.BaseController):
# retrieve the image size from remote store (if not provided)
try:
return (image_meta.get('size', 0) or
get_size_from_backend(context, location))
except (exception.NotFound, exception.BadStoreUri) as e:
store.get_size_from_backend(location, context=context))
except (exception.NotFound, store.BadStoreUri) as e:
LOG.debug(e)
raise HTTPBadRequest(explanation=e.msg, content_type="text/plain")
@ -718,16 +721,17 @@ class Controller(controller.BaseController):
else:
if location:
try:
validate_location(req.context, location)
except exception.BadStoreUri as bse:
store.validate_location(location, context=req.context)
except store.BadStoreUri as bse:
raise HTTPBadRequest(explanation=bse.msg,
request=req)
self._validate_image_for_activation(req, image_id, image_meta)
image_size_meta = image_meta.get('size')
if image_size_meta:
image_size_store = get_size_from_backend(req.context,
location)
image_size_store = store.get_size_from_backend(
location,
context=req.context)
# NOTE(zhiyan): A returned size of zero usually means
# the driver encountered an error. In this case the
# size provided by the client will be used as-is.
@ -902,7 +906,7 @@ class Controller(controller.BaseController):
try:
self.update_store_acls(req, id, orig_or_updated_loc,
public=is_public)
except exception.BadStoreUri:
except store.BadStoreUri:
msg = "Invalid location %s" % location
LOG.debug(msg)
raise HTTPBadRequest(explanation=msg,
@ -1049,6 +1053,7 @@ class Controller(controller.BaseController):
with excutils.save_and_reraise_exception():
registry.update_image_metadata(req.context, id,
{'status': ori_status})
registry.delete_image_metadata(req.context, id)
except exception.NotFound as e:
msg = (_("Failed to find image to delete: %s") %
@ -1089,7 +1094,7 @@ class Controller(controller.BaseController):
:raises HTTPNotFound if store does not exist
"""
try:
return get_store_from_scheme(request.context, scheme)
return store.get_store_from_scheme(scheme)
except exception.UnknownScheme:
msg = "Store for scheme %s not found" % scheme
LOG.debug(msg)

View File

@ -16,6 +16,8 @@
from oslo.config import cfg
import webob.exc
import glance_store as store_api
from glance.common import exception
from glance.common import store_utils
from glance.common import utils
@ -24,7 +26,6 @@ from glance.openstack.common import excutils
from glance.openstack.common import gettextutils
import glance.openstack.common.log as logging
import glance.registry.client.v1.api as registry
import glance.store as store_api
CONF = cfg.CONF
@ -192,7 +193,7 @@ def upload_data_to_store(req, image_meta, image_data, store, notifier):
request=req,
content_type="text/plain")
except exception.StorageFull as e:
except store_api.StorageFull as e:
msg = _("Image storage media is full: %s") % utils.exception_to_str(e)
LOG.error(msg)
safe_kill(req, image_id, 'saving')
@ -201,7 +202,7 @@ def upload_data_to_store(req, image_meta, image_data, store, notifier):
request=req,
content_type='text/plain')
except exception.StorageWriteDenied as e:
except store_api.StorageWriteDenied as e:
msg = (_("Insufficient permissions on image storage media: %s") %
utils.exception_to_str(e))
LOG.error(msg)

View File

@ -15,6 +15,8 @@
import webob.exc
import glance_store
import glance.api.policy
from glance.common import exception
from glance.common import utils
@ -25,7 +27,7 @@ import glance.notifier
from glance.openstack.common import excutils
from glance.openstack.common import gettextutils
import glance.openstack.common.log as logging
import glance.store
LOG = logging.getLogger(__name__)
_LE = gettextutils._LE
@ -37,7 +39,7 @@ class ImageDataController(object):
gateway=None):
if gateway is None:
db_api = db_api or glance.db.get_api()
store_api = store_api or glance.store
store_api = store_api or glance_store
policy = policy_enforcer or glance.api.policy.Enforcer()
notifier = notifier or glance.notifier.Notifier()
gateway = glance.gateway.Gateway(db_api, store_api,
@ -110,7 +112,7 @@ class ImageDataController(object):
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.msg)
except exception.StorageFull as e:
except glance_store.StorageFull as e:
msg = _("Image storage media "
"is full: %s") % utils.exception_to_str(e)
LOG.error(msg)
@ -134,7 +136,7 @@ class ImageDataController(object):
raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg,
request=req)
except exception.StorageWriteDenied as e:
except glance_store.StorageWriteDenied as e:
msg = _("Insufficient permissions on image "
"storage media: %s") % utils.exception_to_str(e)
LOG.error(msg)

View File

@ -17,6 +17,8 @@ import copy
import six
import webob
import glance_store
from glance.api import policy
from glance.common import exception
from glance.common import utils
@ -27,7 +29,6 @@ import glance.notifier
from glance.openstack.common import jsonutils
from glance.openstack.common import timeutils
import glance.schema
import glance.store
class ImageMembersController(object):
@ -36,7 +37,7 @@ class ImageMembersController(object):
self.db_api = db_api or glance.db.get_api()
self.policy = policy_enforcer or policy.Enforcer()
self.notifier = notifier or glance.notifier.Notifier()
self.store_api = store_api or glance.store
self.store_api = store_api or glance_store
self.gateway = glance.gateway.Gateway(self.db_api, self.store_api,
self.notifier, self.policy)

View File

@ -15,6 +15,8 @@
import webob.exc
import glance_store
from glance.api import policy
from glance.common import exception
from glance.common import utils
@ -22,7 +24,6 @@ from glance.common import wsgi
import glance.db
import glance.gateway
import glance.notifier
import glance.store
class Controller(object):
@ -31,7 +32,7 @@ class Controller(object):
self.db_api = db_api or glance.db.get_api()
self.policy = policy_enforcer or policy.Enforcer()
self.notifier = notifier or glance.notifier.Notifier()
self.store_api = store_api or glance.store
self.store_api = store_api or glance_store
self.gateway = glance.gateway.Gateway(self.db_api, self.store_api,
self.notifier, self.policy)

View File

@ -15,6 +15,7 @@
import re
import glance_store
from oslo.config import cfg
import six
import six.moves.urllib.parse as urlparse
@ -33,7 +34,6 @@ from glance.openstack.common import jsonutils as json
import glance.openstack.common.log as logging
from glance.openstack.common import timeutils
import glance.schema
import glance.store
LOG = logging.getLogger(__name__)
_LI = gettextutils._LI
@ -51,7 +51,7 @@ class ImagesController(object):
self.db_api = db_api or glance.db.get_api()
self.policy = policy_enforcer or policy.Enforcer()
self.notifier = notifier or glance.notifier.Notifier()
self.store_api = store_api or glance.store
self.store_api = store_api or glance_store
self.gateway = glance.gateway.Gateway(self.db_api, self.store_api,
self.notifier, self.policy)

View File

@ -37,7 +37,6 @@ import glance.notifier
from glance.openstack.common import jsonutils as json
import glance.openstack.common.log as logging
import glance.schema
import glance.store
LOG = logging.getLogger(__name__)
_LE = i18n._LE

View File

@ -33,7 +33,6 @@ import glance.notifier
from glance.openstack.common import jsonutils as json
import glance.openstack.common.log as logging
import glance.schema
import glance.store
LOG = logging.getLogger(__name__)
_LE = i18n._LE

View File

@ -33,7 +33,6 @@ import glance.notifier
from glance.openstack.common import jsonutils as json
import glance.openstack.common.log as logging
import glance.schema
import glance.store
LOG = logging.getLogger(__name__)
_LE = i18n._LE

View File

@ -17,6 +17,7 @@
import copy
import webob.exc
import glance_store
from oslo.config import cfg
import six
import six.moves.urllib.parse as urlparse
@ -33,7 +34,6 @@ import glance.openstack.common.jsonutils as json
import glance.openstack.common.log as logging
from glance.openstack.common import timeutils
import glance.schema
import glance.store
LOG = logging.getLogger(__name__)
_LI = gettextutils._LI
@ -50,7 +50,7 @@ class TasksController(object):
self.db_api = db_api or glance.db.get_api()
self.policy = policy_enforcer or policy.Enforcer()
self.notifier = notifier or glance.notifier.Notifier()
self.store_api = store_api or glance.store
self.store_api = store_api or glance_store
self.gateway = glance.gateway.Gateway(self.db_api, self.store_api,
self.notifier, self.policy)

View File

@ -39,6 +39,7 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')):
sys.path.insert(0, possible_topdir)
import glance_store
from oslo.config import cfg
import osprofiler.notifier
import osprofiler.web
@ -48,7 +49,6 @@ from glance.common import exception
from glance.common import wsgi
from glance import notifier
from glance.openstack.common import log
import glance.store
CONF = cfg.CONF
CONF.import_group("profiler", "glance.common.wsgi")
@ -65,8 +65,9 @@ def main():
wsgi.set_eventlet_hub()
log.setup('glance')
glance.store.create_stores()
glance.store.verify_default_store()
glance_store.register_opts(config.CONF)
glance_store.create_stores(config.CONF)
glance_store.verify_default_store()
if cfg.CONF.profiler.enabled:
_notifier = osprofiler.notifier.create("Messaging",

View File

@ -33,10 +33,11 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')):
sys.path.insert(0, possible_topdir)
import glance_store
from glance.common import config
from glance.image_cache import prefetcher
from glance.openstack.common import log
import glance.store
def main():
@ -44,8 +45,9 @@ def main():
config.parse_cache_args()
log.setup('glance')
glance.store.create_stores()
glance.store.verify_default_store()
glance_store.register_opts(config.CONF)
glance_store.create_stores(config.CONF)
glance_store.verify_default_store()
app = prefetcher.Prefetcher()
app.run()

View File

@ -30,12 +30,13 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')):
sys.path.insert(0, possible_topdir)
import glance_store
from oslo.config import cfg
from glance.common import config
from glance.openstack.common import log
from glance import scrubber
import glance.store
CONF = cfg.CONF
@ -57,10 +58,11 @@ def main():
config.parse_args()
log.setup('glance')
glance.store.create_stores()
glance.store.verify_default_store()
glance_store.register_opts(config.CONF)
glance_store.create_stores(config.CONF)
glance_store.verify_default_store()
app = scrubber.Scrubber(glance.store)
app = scrubber.Scrubber(glance_store)
if CONF.daemon:
server = scrubber.Daemon(CONF.wakeup_time)

View File

@ -72,10 +72,6 @@ class NotFound(GlanceException):
message = _("An object with the specified identifier was not found.")
class UnknownScheme(GlanceException):
message = _("Unknown scheme '%(scheme)s' found in URI")
class BadStoreUri(GlanceException):
message = _("The Store URI was malformed.")
@ -89,19 +85,11 @@ class Conflict(GlanceException):
"operated on.")
class StorageFull(GlanceException):
message = _("There is not enough disk space on the image storage media.")
class StorageQuotaFull(GlanceException):
message = _("The size of the data %(image_size)s will exceed the limit. "
"%(remaining)s bytes remaining.")
class StorageWriteDenied(GlanceException):
message = _("Permission to write image storage media denied.")
class AuthBadRequest(GlanceException):
message = _("Connect error/bad request to Auth service at URL %(url)s.")
@ -241,37 +229,11 @@ class BadRegistryConnectionConfiguration(GlanceException):
"Reason: %(reason)s")
class BadStoreConfiguration(GlanceException):
message = _("Store %(store_name)s could not be configured correctly. "
"Reason: %(reason)s")
class BadDriverConfiguration(GlanceException):
message = _("Driver %(driver_name)s could not be configured correctly. "
"Reason: %(reason)s")
class StoreDeleteNotSupported(GlanceException):
message = _("Deleting images from this store is not supported.")
class StoreGetNotSupported(GlanceException):
message = _("Getting images from this store is not supported.")
class StoreAddNotSupported(GlanceException):
message = _("Adding images to this store is not supported.")
class StoreAddDisabled(GlanceException):
message = _("Configuration for store failed. Adding images to this "
"store is disabled.")
class StoreNotConfigured(GlanceException):
message = _("Store is not configured.")
class MaxRedirectsExceeded(GlanceException):
message = _("Maximum redirects (%(redirects)s) was exceeded.")

View File

@ -66,7 +66,7 @@ def _load_strategies():
_available_strategies = _load_strategies()
# TODO(kadachi): Not used but don't remove this until glance.store
# TODO(kadachi): Not used but don't remove this until glance_store
# development/migration stage.
def verify_location_strategy(conf=None, strategies=_available_strategies):
"""Validate user configured 'location_strategy' option value."""

View File

@ -14,15 +14,14 @@
import sys
import glance_store as store_api
from oslo.config import cfg
from glance.common import exception
from glance.common import utils
import glance.db as db_api
from glance.openstack.common import gettextutils
import glance.openstack.common.log as logging
from glance import scrubber
import glance.store as store_api
_LE = gettextutils._LE
_LW = gettextutils._LW
@ -53,16 +52,16 @@ def safe_delete_from_backend(context, image_id, location):
"""
try:
ret = store_api.delete_from_backend(context, location['url'])
ret = store_api.delete_from_backend(location['url'], context=context)
location['status'] = 'deleted'
if 'id' in location:
db_api.get_api().image_location_delete(context, image_id,
location['id'], 'deleted')
return ret
except exception.NotFound:
except store_api.NotFound:
msg = _LW('Failed to delete image %s in store from URI') % image_id
LOG.warn(msg)
except exception.StoreDeleteNotSupported as e:
except store_api.StoreDeleteNotSupported as e:
LOG.warn(utils.exception_to_str(e))
except store_api.UnsupportedBackend:
exc_type = sys.exc_info()[0].__name__

View File

@ -28,6 +28,8 @@ Fixes bug #1081043
"""
import types # noqa
#NOTE(flaper87): This is bad but there ain't better way to do it.
from glance_store._drivers import swift # noqa
from oslo.config import cfg
import six.moves.urllib.parse as urlparse
import sqlalchemy
@ -37,7 +39,6 @@ from glance.common import exception
from glance.common import utils
from glance.openstack.common import gettextutils
import glance.openstack.common.log as logging
import glance.store.swift # noqa
LOG = logging.getLogger(__name__)
_LE = gettextutils._LE

View File

@ -38,12 +38,12 @@ _delayed_delete_imported = False
def _import_delayed_delete():
# glance.store (indirectly) imports glance.domain therefore we can't put
# glance_store (indirectly) imports glance.domain therefore we can't put
# the CONF.import_opt outside - we have to do it in a convoluted/indirect
# way!
global _delayed_delete_imported
if not _delayed_delete_imported:
CONF.import_opt('delayed_delete', 'glance.store')
CONF.import_opt('delayed_delete', 'glance_store')
_delayed_delete_imported = True

View File

@ -24,14 +24,14 @@ import glance.domain
import glance.location
import glance.notifier
import glance.quota
import glance.store
import glance_store
class Gateway(object):
def __init__(self, db_api=None, store_api=None, notifier=None,
policy_enforcer=None):
self.db_api = db_api or glance.db.get_api()
self.store_api = store_api or glance.store
self.store_api = store_api or glance_store
self.store_utils = store_utils
self.notifier = notifier or glance.notifier.Notifier()
self.policy = policy_enforcer or policy.Enforcer()

View File

@ -19,13 +19,14 @@ Prefetches images into the Image Cache
import eventlet
import glance_store
from glance.common import exception
from glance import context
from glance.image_cache import base
from glance.openstack.common import gettextutils
import glance.openstack.common.log as logging
import glance.registry.client.v1.api as registry
import glance.store
LOG = logging.getLogger(__name__)
_LI = gettextutils._LI
@ -54,8 +55,9 @@ class Prefetcher(base.CacheApp):
return False
location = image_meta['location']
image_data, image_size = glance.store.get_from_backend(ctx, location)
LOG.debug("Caching image '%s'" % image_id)
image_data, image_size = glance_store.get_from_backend(location,
context=ctx)
LOG.debug("Caching image '%s'", image_id)
cache_tee_iter = self.cache.cache_tee_iter(image_id, image_data,
image_meta['checksum'])
# Image is tee'd into cache and checksum verified

View File

@ -16,6 +16,7 @@
import collections
import copy
import glance_store as store
from oslo.config import cfg
from glance.common import exception
@ -24,7 +25,7 @@ import glance.domain.proxy
from glance.openstack.common import excutils
from glance.openstack.common import gettextutils
import glance.openstack.common.log as logging
from glance import store
_LE = gettextutils._LE
@ -50,8 +51,9 @@ class ImageRepoProxy(glance.domain.proxy.Repo):
member_repo = image.get_member_repo()
member_ids = [m.member_id for m in member_repo.list()]
for location in image.locations:
self.store_api.set_acls(self.context, location['url'], public,
read_tenants=member_ids)
self.store_api.set_acls(location['url'], public=public,
read_tenants=member_ids,
context=self.context)
def add(self, image):
result = super(ImageRepoProxy, self).add(image)
@ -73,10 +75,10 @@ def _check_location_uri(context, store_api, uri):
"""
is_ok = True
try:
size = store_api.get_size_from_backend(context, uri)
size = store_api.get_size_from_backend(uri, context=context)
# NOTE(zhiyan): Some stores return zero when it catch exception
is_ok = size > 0
except (exception.UnknownScheme, exception.NotFound):
except (store.UnknownScheme, store.NotFound):
is_ok = False
if not is_ok:
reason = _('Invalid location')
@ -92,7 +94,8 @@ def _set_image_size(context, image, locations):
if not image.size:
for location in locations:
size_from_backend = store.get_size_from_backend(
context, location['url'])
location['url'], context=context)
if size_from_backend:
# NOTE(flwang): This assumes all locations have the same size
image.size = size_from_backend
@ -353,11 +356,12 @@ class ImageProxy(glance.domain.proxy.Image):
if size is None:
size = 0 # NOTE(markwash): zero -> unknown size
location, size, checksum, loc_meta = self.store_api.add_to_backend(
self.context, CONF.default_store,
CONF,
self.image.image_id,
utils.LimitingReader(utils.CooperativeReader(data),
CONF.image_size_cap),
size)
size,
context=self.context)
self.image.locations = [{'url': location, 'metadata': loc_meta,
'status': 'active'}]
self.image.size = size
@ -366,12 +370,13 @@ class ImageProxy(glance.domain.proxy.Image):
def get_data(self):
if not self.image.locations:
raise exception.NotFound(_("No image data could be found"))
raise store.NotFound(_("No image data could be found"))
err = None
for loc in self.image.locations:
try:
data, size = self.store_api.get_from_backend(self.context,
loc['url'])
data, size = self.store_api.get_from_backend(
loc['url'],
context=self.context)
return data
except Exception as e:
@ -398,8 +403,9 @@ class ImageMemberRepoProxy(glance.domain.proxy.Repo):
if self.image.locations and not public:
member_ids = [m.member_id for m in self.repo.list()]
for location in self.image.locations:
self.store_api.set_acls(self.context, location['url'],
public, read_tenants=member_ids)
self.store_api.set_acls(location['url'], public=public,
read_tenants=member_ids,
context=self.context)
def add(self, member):
super(ImageMemberRepoProxy, self).add(member)

View File

@ -14,6 +14,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import glance_store
from oslo.config import cfg
from oslo import messaging
import webob
@ -195,12 +196,12 @@ class ImageProxy(glance.domain.proxy.Image):
self.notifier.info('image.prepare', payload)
try:
self.image.set_data(data, size)
except exception.StorageFull as e:
except glance_store.StorageFull as e:
msg = (_("Image storage media is full: %s") %
utils.exception_to_str(e))
self.notifier.error('image.upload', msg)
raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg)
except exception.StorageWriteDenied as e:
except glance_store.StorageWriteDenied as e:
msg = (_("Insufficient permissions on image storage media: %s")
% utils.exception_to_str(e))
self.notifier.error('image.upload', msg)

View File

@ -16,6 +16,7 @@ import copy
import six
import glance_store as store
from oslo.config import cfg
import glance.api.common
@ -57,9 +58,9 @@ def _calc_required_size(context, image, locations):
for location in locations:
size_from_backend = None
try:
size_from_backend = glance.store.get_size_from_backend(
context, location['url'])
except (exception.UnknownScheme, exception.NotFound):
size_from_backend = store.get_size_from_backend(
location['url'], context=context)
except (store.UnknownScheme, store.NotFound):
pass
if size_from_backend:
required_size = size_from_backend * len(locations)

View File

@ -53,6 +53,8 @@ scrubber_opts = [
'clean up the files it uses for taking data. Only '
'one server in your deployment should be designated '
'the cleanup host.')),
cfg.BoolOpt('delayed_delete', default=False,
help=_('Turn on/off delayed delete.')),
cfg.IntOpt('cleanup_scrubber_time', default=86400,
help=_('Items must have a modified time that is older than '
'this value in order to be candidates for cleanup.'))

View File

@ -1,384 +0,0 @@
# Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
import six
from glance.common import exception
from glance.common import utils
import glance.context
import glance.domain.proxy
from glance.openstack.common import importutils
import glance.openstack.common.log as logging
from glance.store import location
LOG = logging.getLogger(__name__)
store_opts = [
cfg.ListOpt('known_stores',
default=[
'glance.store.filesystem.Store',
'glance.store.http.Store'
],
help=_('List of which store classes and store class locations '
'are currently known to glance at startup.')),
cfg.StrOpt('default_store', default='file',
help=_("Default scheme to use to store image data. The "
"scheme must be registered by one of the stores "
"defined by the 'known_stores' config option.")),
cfg.BoolOpt('delayed_delete', default=False,
help=_('Turn on/off delayed delete.')),
]
REGISTERED_STORES = set()
CONF = cfg.CONF
CONF.register_opts(store_opts)
_ALL_STORES = [
'glance.store.filesystem.Store',
'glance.store.http.Store',
'glance.store.rbd.Store',
'glance.store.s3.Store',
'glance.store.swift.Store',
'glance.store.sheepdog.Store',
'glance.store.cinder.Store',
'glance.store.gridfs.Store',
'glance.store.vmware_datastore.Store'
]
class BackendException(Exception):
pass
class UnsupportedBackend(BackendException):
pass
class Indexable(object):
"""
Wrapper that allows an iterator or filelike be treated as an indexable
data structure. This is required in the case where the return value from
Store.get() is passed to Store.add() when adding a Copy-From image to a
Store where the client library relies on eventlet GreenSockets, in which
case the data to be written is indexed over.
"""
def __init__(self, wrapped, size):
"""
Initialize the object
:param wrapped: the wrapped iterator or filelike.
:param size: the size of data available
"""
self.wrapped = wrapped
self.size = int(size) if size else (wrapped.len
if hasattr(wrapped, 'len') else 0)
self.cursor = 0
self.chunk = None
def __iter__(self):
"""
Delegate iteration to the wrapped instance.
"""
for self.chunk in self.wrapped:
yield self.chunk
def __getitem__(self, i):
"""
Index into the next chunk (or previous chunk in the case where
the last data returned was not fully consumed).
:param i: a slice-to-the-end
"""
start = i.start if isinstance(i, slice) else i
if start < self.cursor:
return self.chunk[(start - self.cursor):]
self.chunk = self.another()
if self.chunk:
self.cursor += len(self.chunk)
return self.chunk
def another(self):
"""Implemented by subclasses to return the next element"""
raise NotImplementedError
def getvalue(self):
"""
Return entire string value... used in testing
"""
return self.wrapped.getvalue()
def __len__(self):
"""
Length accessor.
"""
return self.size
def _register_stores(store_classes):
"""
Given a set of store names, add them to a globally available set
of store names.
"""
for store_cls in store_classes:
REGISTERED_STORES.add(store_cls.__module__.split('.')[2])
# NOTE (spredzy): The actual class name is filesystem but in order
# to maintain backward compatibility we need to keep the 'file' store
# as a known store
if 'filesystem' in REGISTERED_STORES:
REGISTERED_STORES.add('file')
def _get_store_class(store_entry):
store_cls = None
try:
LOG.debug("Attempting to import store %s", store_entry)
store_cls = importutils.import_class(store_entry)
except exception.NotFound:
raise BackendException('Unable to load store. '
'Could not find a class named %s.'
% store_entry)
return store_cls
def create_stores():
"""
Registers all store modules and all schemes
from the given config. Duplicates are not re-registered.
"""
store_count = 0
store_classes = set()
for store_entry in set(CONF.known_stores + _ALL_STORES):
store_entry = store_entry.strip()
if not store_entry:
continue
store_cls = _get_store_class(store_entry)
try:
store_instance = store_cls()
except exception.BadStoreConfiguration as e:
if store_entry in CONF.known_stores:
LOG.warn(_("%s Skipping store driver.") %
utils.exception_to_str(e))
continue
finally:
# NOTE(flaper87): To be removed in Juno
if store_entry not in CONF.known_stores:
LOG.deprecated(_("%s not found in `known_store`. "
"Stores need to be explicitly enabled in "
"the configuration file.") % store_entry)
schemes = store_instance.get_schemes()
if not schemes:
raise BackendException('Unable to register store %s. '
'No schemes associated with it.'
% store_cls)
else:
if store_cls not in store_classes:
LOG.debug("Registering store %(cls)s with schemes "
"%(schemes)s", {'cls': store_cls,
'schemes': schemes})
store_classes.add(store_cls)
scheme_map = {}
for scheme in schemes:
loc_cls = store_instance.get_store_location_class()
scheme_map[scheme] = {
'store_class': store_cls,
'location_class': loc_cls,
}
location.register_scheme_map(scheme_map)
store_count += 1
else:
LOG.debug("Store %s already registered", store_cls)
_register_stores(store_classes)
return store_count
def verify_default_store():
scheme = cfg.CONF.default_store
context = glance.context.RequestContext()
try:
get_store_from_scheme(context, scheme, configure=False)
except exception.UnknownScheme:
msg = _("Store for scheme %s not found") % scheme
raise RuntimeError(msg)
def get_known_schemes():
"""Returns list of known schemes"""
return location.SCHEME_TO_CLS_MAP.keys()
def get_known_stores():
"""Returns list of known stores"""
return list(REGISTERED_STORES)
def get_store_from_scheme(context, scheme, loc=None, configure=True):
"""
Given a scheme, return the appropriate store object
for handling that scheme.
"""
if scheme not in location.SCHEME_TO_CLS_MAP:
raise exception.UnknownScheme(scheme=scheme)
scheme_info = location.SCHEME_TO_CLS_MAP[scheme]
store = scheme_info['store_class'](context, loc, configure)
return store
def get_store_from_uri(context, uri, loc=None):
"""
Given a URI, return the store object that would handle
operations on the URI.
:param uri: URI to analyze
"""
scheme = uri[0:uri.find('/') - 1]
store = get_store_from_scheme(context, scheme, loc)
return store
def get_from_backend(context, uri, **kwargs):
"""Yields chunks of data from backend specified by uri"""
loc = location.get_location_from_uri(uri)
src_store = get_store_from_uri(context, uri, loc)
dest_store = kwargs.get('dest')
if dest_store is not None:
src_store.READ_CHUNKSIZE = dest_store.WRITE_CHUNKSIZE
try:
return src_store.get(loc)
except NotImplementedError:
raise exception.StoreGetNotSupported
def get_size_from_backend(context, uri):
"""Retrieves image size from backend specified by uri"""
loc = location.get_location_from_uri(uri)
store = get_store_from_uri(context, uri, loc)
return store.get_size(loc)
def validate_location(context, uri):
loc = location.get_location_from_uri(uri)
store = get_store_from_uri(context, uri, loc)
store.validate_location(uri)
def delete_from_backend(context, uri, **kwargs):
"""Removes chunks of data from backend specified by uri"""
loc = location.get_location_from_uri(uri)
store = get_store_from_uri(context, uri, loc)
try:
return store.delete(loc)
except NotImplementedError:
raise exception.StoreDeleteNotSupported
def get_store_from_location(uri):
"""
Given a location (assumed to be a URL), attempt to determine
the store from the location. We use here a simple guess that
the scheme of the parsed URL is the store...
:param uri: Location to check for the store
"""
loc = location.get_location_from_uri(uri)
return loc.store_name
def check_location_metadata(val, key=''):
if isinstance(val, dict):
for key in val:
check_location_metadata(val[key], key=key)
elif isinstance(val, list):
ndx = 0
for v in val:
check_location_metadata(v, key='%s[%d]' % (key, ndx))
ndx = ndx + 1
elif not isinstance(val, six.text_type):
raise BackendException(_("The image metadata key %(key)s has an "
"invalid type of %(val)s. Only dict, list, "
"and unicode are supported.") %
{'key': key,
'val': type(val)})
def store_add_to_backend(image_id, data, size, store):
"""
A wrapper around a call to each stores add() method. This gives glance
a common place to check the output
:param image_id: The image add to which data is added
:param data: The data to be stored
:param size: The length of the data in bytes
:param store: The store to which the data is being added
:return: The url location of the file,
the size amount of data,
the checksum of the data
the storage systems metadata dictionary for the location
"""
(location, size, checksum, metadata) = store.add(image_id, data, size)
if metadata is not None:
if not isinstance(metadata, dict):
msg = (_("The storage driver %(store)s returned invalid metadata "
"%(metadata)s. This must be a dictionary type") %
{'store': six.text_type(store),
'metadata': six.text_type(metadata)})
LOG.error(msg)
raise BackendException(msg)
try:
check_location_metadata(metadata)
except BackendException as e:
e_msg = (_("A bad metadata structure was returned from the "
"%(store)s storage driver: %(metadata)s. %(error)s.") %
{'store': six.text_type(store),
'metadata': six.text_type(metadata),
'error': utils.exception_to_str(e)})
LOG.error(e_msg)
raise BackendException(e_msg)
return (location, size, checksum, metadata)
def add_to_backend(context, scheme, image_id, data, size):
store = get_store_from_scheme(context, scheme)
try:
return store_add_to_backend(image_id, data, size, store)
except NotImplementedError:
raise exception.StoreAddNotSupported
def set_acls(context, location_uri, public=False, read_tenants=None,
write_tenants=None):
if read_tenants is None:
read_tenants = []
if write_tenants is None:
write_tenants = []
loc = location.get_location_from_uri(location_uri)
scheme = get_store_from_location(location_uri)
store = get_store_from_scheme(context, scheme, loc)
try:
store.set_acls(loc, public=public, read_tenants=read_tenants,
write_tenants=write_tenants)
except NotImplementedError:
LOG.debug("Skipping store.set_acls... not implemented.")

View File

@ -1,176 +0,0 @@
# Copyright 2011 OpenStack Foundation
# Copyright 2012 RedHat Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base class for all storage backends"""
from glance.common import exception
from glance.common import utils
from glance.openstack.common import importutils
import glance.openstack.common.log as logging
from glance.openstack.common import units
LOG = logging.getLogger(__name__)
class Store(object):
READ_CHUNKSIZE = 16 * units.Mi # 16M
WRITE_CHUNKSIZE = READ_CHUNKSIZE
@staticmethod
def _unconfigured(*args, **kwargs):
raise exception.StoreNotConfigured
def __init__(self, context=None, location=None, configure=True):
"""
Initialize the Store
"""
self.store_location_class = None
self.context = context
if not configure:
self.add = Store._unconfigured
self.get = Store._unconfigured
self.get_size = Store._unconfigured
self.add_disabled = Store._unconfigured
self.delete = Store._unconfigured
self.set_acls = Store._unconfigured
return
self.configure()
try:
self.configure_add()
except exception.BadStoreConfiguration as e:
self.add = self.add_disabled
msg = (_(u"Failed to configure store correctly: %s "
"Disabling add method.") % utils.exception_to_str(e))
LOG.warn(msg)
def configure(self):
"""
Configure the Store to use the stored configuration options
Any store that needs special configuration should implement
this method.
"""
pass
def get_schemes(self):
"""
Returns a tuple of schemes which this store can handle.
"""
raise NotImplementedError
def get_store_location_class(self):
"""
Returns the store location class that is used by this store.
"""
if not self.store_location_class:
class_name = "%s.StoreLocation" % (self.__module__)
LOG.debug("Late loading location class %s", class_name)
self.store_location_class = importutils.import_class(class_name)
return self.store_location_class
def configure_add(self):
"""
This is like `configure` except that it's specifically for
configuring the store to accept objects.
If the store was not able to successfully configure
itself, it should raise `exception.BadStoreConfiguration`.
"""
pass
def validate_location(self, location):
"""
Takes a location and validates it for the presence
of any account references
"""
pass
def get(self, location):
"""
Takes a `glance.store.location.Location` object that indicates
where to find the image file, and returns a tuple of generator
(for reading the image file) and image_size
:param location `glance.store.location.Location` object, supplied
from glance.store.location.get_location_from_uri()
:raises `glance.exception.NotFound` if image does not exist
"""
raise NotImplementedError
def get_size(self, location):
"""
Takes a `glance.store.location.Location` object that indicates
where to find the image file, and returns the size
:param location `glance.store.location.Location` object, supplied
from glance.store.location.get_location_from_uri()
:raises `glance.exception.NotFound` if image does not exist
"""
raise NotImplementedError
def add_disabled(self, *args, **kwargs):
"""
Add method that raises an exception because the Store was
not able to be configured properly and therefore the add()
method would error out.
"""
raise exception.StoreAddDisabled
def add(self, image_id, image_file, image_size):
"""
Stores an image file with supplied identifier to the backend
storage system and returns a tuple containing information
about the stored image.
:param image_id: The opaque image identifier
:param image_file: The image data to write, as a file-like object
:param image_size: The size of the image data to write, in bytes
:retval tuple of URL in backing store, bytes written, checksum
and a dictionary with storage system specific information
:raises `glance.common.exception.Duplicate` if the image already
existed
"""
raise NotImplementedError
def delete(self, location):
"""
Takes a `glance.store.location.Location` object that indicates
where to find the image file to delete
:location `glance.store.location.Location` object, supplied
from glance.store.location.get_location_from_uri()
:raises `glance.exception.NotFound` if image does not exist
"""
raise NotImplementedError
def set_acls(self, location, public=False, read_tenants=None,
write_tenants=None):
"""
Sets the read and write access control list for an image in the
backend store.
:location `glance.store.location.Location` object, supplied
from glance.store.location.get_location_from_uri()
:public A boolean indicating whether the image should be public.
:read_tenants A list of tenant strings which should be granted
read access for an image.
:write_tenants A list of tenant strings which should be granted
write access for an image.
"""
raise NotImplementedError

View File

@ -1,179 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Storage backend for Cinder"""
from cinderclient import exceptions as cinder_exception
from cinderclient import service_catalog
from cinderclient.v2 import client as cinderclient
from oslo.config import cfg
from glance.common import exception
from glance.common import utils
import glance.openstack.common.log as logging
from glance.openstack.common import units
import glance.store.base
import glance.store.location
LOG = logging.getLogger(__name__)
cinder_opts = [
cfg.StrOpt('cinder_catalog_info',
default='volume:cinder:publicURL',
help='Info to match when looking for cinder in the service '
'catalog. Format is: separated values of the form: '
'<service_type>:<service_name>:<endpoint_type>.'),
cfg.StrOpt('cinder_endpoint_template',
help='Override service catalog lookup with template for cinder '
'endpoint e.g. http://localhost:8776/v1/%(project_id)s.'),
cfg.StrOpt('os_region_name',
help='Region name of this node.'),
cfg.StrOpt('cinder_ca_certificates_file',
help='Location of CA certicates file to use for cinder client '
'requests.'),
cfg.IntOpt('cinder_http_retries',
default=3,
help='Number of cinderclient retries on failed http calls.'),
cfg.BoolOpt('cinder_api_insecure',
default=False,
help='Allow to perform insecure SSL requests to cinder.'),
]
CONF = cfg.CONF
CONF.register_opts(cinder_opts)
def get_cinderclient(context):
if CONF.cinder_endpoint_template:
url = CONF.cinder_endpoint_template % context.to_dict()
else:
info = CONF.cinder_catalog_info
service_type, service_name, endpoint_type = info.split(':')
# extract the region if set in configuration
if CONF.os_region_name:
attr = 'region'
filter_value = CONF.os_region_name
else:
attr = None
filter_value = None
# FIXME: the cinderclient ServiceCatalog object is mis-named.
# It actually contains the entire access blob.
# Only needed parts of the service catalog are passed in, see
# nova/context.py.
compat_catalog = {
'access': {'serviceCatalog': context.service_catalog or []}}
sc = service_catalog.ServiceCatalog(compat_catalog)
url = sc.url_for(attr=attr,
filter_value=filter_value,
service_type=service_type,
service_name=service_name,
endpoint_type=endpoint_type)
LOG.debug('Cinderclient connection created using URL: %s' % url)
c = cinderclient.Client(context.user,
context.auth_tok,
project_id=context.tenant,
auth_url=url,
insecure=CONF.cinder_api_insecure,
retries=CONF.cinder_http_retries,
cacert=CONF.cinder_ca_certificates_file)
# noauth extracts user_id:project_id from auth_token
c.client.auth_token = context.auth_tok or '%s:%s' % (context.user,
context.tenant)
c.client.management_url = url
return c
class StoreLocation(glance.store.location.StoreLocation):
"""Class describing a Cinder URI"""
def process_specs(self):
self.scheme = self.specs.get('scheme', 'cinder')
self.volume_id = self.specs.get('volume_id')
def get_uri(self):
return "cinder://%s" % self.volume_id
def parse_uri(self, uri):
if not uri.startswith('cinder://'):
reason = _("URI must start with 'cinder://'")
LOG.info(reason)
raise exception.BadStoreUri(message=reason)
self.scheme = 'cinder'
self.volume_id = uri[9:]
if not utils.is_uuid_like(self.volume_id):
reason = _("URI contains invalid volume ID")
LOG.info(reason)
raise exception.BadStoreUri(message=reason)
class Store(glance.store.base.Store):
"""Cinder backend store adapter."""
EXAMPLE_URL = "cinder://volume-id"
def get_schemes(self):
return ('cinder',)
def configure_add(self):
"""
Configure the Store to use the stored configuration options
Any store that needs special configuration should implement
this method. If the store was not able to successfully configure
itself, it should raise `exception.BadStoreConfiguration`
"""
if self.context is None:
reason = _("Cinder storage requires a context.")
raise exception.BadStoreConfiguration(store_name="cinder",
reason=reason)
if self.context.service_catalog is None:
reason = _("Cinder storage requires a service catalog.")
raise exception.BadStoreConfiguration(store_name="cinder",
reason=reason)
def get_size(self, location):
"""
Takes a `glance.store.location.Location` object that indicates
where to find the image file and returns the image size
:param location `glance.store.location.Location` object, supplied
from glance.store.location.get_location_from_uri()
:raises `glance.exception.NotFound` if image does not exist
:rtype int
"""
loc = location.store_location
try:
volume = get_cinderclient(self.context).volumes.get(loc.volume_id)
# GB unit convert to byte
return volume.size * units.Gi
except cinder_exception.NotFound as e:
reason = _("Failed to get image size due to "
"volume can not be found: %s") % self.volume_id
LOG.error(reason)
raise exception.NotFound(reason)
except Exception as e:
LOG.exception(_("Failed to get image size due to "
"internal error: %s") % utils.exception_to_str(e))
return 0

View File

@ -1,468 +0,0 @@
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A simple filesystem-backed store
"""
import errno
import hashlib
import os
from oslo.config import cfg
import six.moves.urllib.parse as urlparse
from glance.common import exception
from glance.common import utils
from glance.openstack.common import excutils
from glance.openstack.common import jsonutils
import glance.openstack.common.log as logging
from glance.openstack.common import processutils
from glance.openstack.common import units
import glance.store
import glance.store.base
import glance.store.location
LOG = logging.getLogger(__name__)
filesystem_opts = [
cfg.StrOpt('filesystem_store_datadir',
help=_('Directory to which the Filesystem backend '
'store writes images.')),
cfg.MultiStrOpt('filesystem_store_datadirs',
help=_("List of directories and its priorities to which "
"the Filesystem backend store writes images.")),
cfg.StrOpt('filesystem_store_metadata_file',
help=_("The path to a file which contains the "
"metadata to be returned with any location "
"associated with this store. The file must "
"contain a valid JSON dict."))]
CONF = cfg.CONF
CONF.register_opts(filesystem_opts)
class StoreLocation(glance.store.location.StoreLocation):
"""Class describing a Filesystem URI"""
def process_specs(self):
self.scheme = self.specs.get('scheme', 'file')
self.path = self.specs.get('path')
def get_uri(self):
return "file://%s" % self.path
def parse_uri(self, uri):
"""
Parse URLs. This method fixes an issue where credentials specified
in the URL are interpreted differently in Python 2.6.1+ than prior
versions of Python.
"""
pieces = urlparse.urlparse(uri)
assert pieces.scheme in ('file', 'filesystem')
self.scheme = pieces.scheme
path = (pieces.netloc + pieces.path).strip()
if path == '':
reason = _("No path specified in URI")
LOG.info(reason)
raise exception.BadStoreUri(message=reason)
self.path = path
class ChunkedFile(object):
"""
We send this back to the Glance API server as
something that can iterate over a large file
"""
def __init__(self, filepath):
self.filepath = filepath
self.fp = open(self.filepath, 'rb')
def __iter__(self):
"""Return an iterator over the image file"""
try:
if self.fp:
while True:
chunk = self.fp.read(Store.READ_CHUNKSIZE)
if chunk:
yield chunk
else:
break
finally:
self.close()
def close(self):
"""Close the internal file pointer"""
if self.fp:
self.fp.close()
self.fp = None
class Store(glance.store.base.Store):
READ_CHUNKSIZE = 64 * units.Ki
WRITE_CHUNKSIZE = READ_CHUNKSIZE
def get_schemes(self):
return ('file', 'filesystem')
def _check_write_permission(self, datadir):
"""
Checks if directory created to write image files has
write permission.
:datadir is a directory path in which glance wites image files.
:raise BadStoreConfiguration exception if datadir is read-only.
"""
if not os.access(datadir, os.W_OK):
msg = (_("Permission to write in %s denied") % datadir)
LOG.exception(msg)
raise exception.BadStoreConfiguration(
store_name="filesystem", reason=msg)
def _create_image_directories(self, directory_paths):
"""
Create directories to write image files if
it does not exist.
:directory_paths is a list of directories belonging to glance store.
:raise BadStoreConfiguration exception if creating a directory fails.
"""
for datadir in directory_paths:
if os.path.exists(datadir):
self._check_write_permission(datadir)
else:
msg = _("Directory to write image files does not exist "
"(%s). Creating.") % datadir
LOG.info(msg)
try:
os.makedirs(datadir)
self._check_write_permission(datadir)
except (IOError, OSError):
if os.path.exists(datadir):
# NOTE(markwash): If the path now exists, some other
# process must have beat us in the race condition.
# But it doesn't hurt, so we can safely ignore
# the error.
self._check_write_permission(datadir)
continue
reason = _("Unable to create datadir: %s") % datadir
LOG.error(reason)
raise exception.BadStoreConfiguration(
store_name="filesystem", reason=reason)
def configure_add(self):
"""
Configure the Store to use the stored configuration options
Any store that needs special configuration should implement
this method. If the store was not able to successfully configure
itself, it should raise `exception.BadStoreConfiguration`
"""
if not (CONF.filesystem_store_datadir
or CONF.filesystem_store_datadirs):
reason = (_("Specify at least 'filesystem_store_datadir' or "
"'filesystem_store_datadirs' option"))
LOG.error(reason)
raise exception.BadStoreConfiguration(store_name="filesystem",
reason=reason)
if CONF.filesystem_store_datadir and CONF.filesystem_store_datadirs:
reason = (_("Specify either 'filesystem_store_datadir' or "
"'filesystem_store_datadirs' option"))
LOG.error(reason)
raise exception.BadStoreConfiguration(store_name="filesystem",
reason=reason)
self.multiple_datadirs = False
directory_paths = set()
if CONF.filesystem_store_datadir:
self.datadir = CONF.filesystem_store_datadir
directory_paths.add(self.datadir)
else:
self.multiple_datadirs = True
self.priority_data_map = {}
for datadir in CONF.filesystem_store_datadirs:
(datadir_path,
priority) = self._get_datadir_path_and_priority(datadir)
self._check_directory_paths(datadir_path, directory_paths)
directory_paths.add(datadir_path)
self.priority_data_map.setdefault(int(priority),
[]).append(datadir_path)
self.priority_list = sorted(self.priority_data_map,
reverse=True)
self._create_image_directories(directory_paths)
def _check_directory_paths(self, datadir_path, directory_paths):
"""
Checks if directory_path is already present in directory_paths.
:datadir_path is directory path.
:datadir_paths is set of all directory paths.
:raise BadStoreConfiguration exception if same directory path is
already present in directory_paths.
"""
if datadir_path in directory_paths:
msg = (_("Directory %(datadir_path)s specified "
"multiple times in filesystem_store_datadirs "
"option of filesystem configuration") %
{'datadir_path': datadir_path})
LOG.exception(msg)
raise exception.BadStoreConfiguration(
store_name="filesystem", reason=msg)
def _get_datadir_path_and_priority(self, datadir):
"""
Gets directory paths and its priority from
filesystem_store_datadirs option in glance-api.conf.
:datadir is directory path with its priority.
:returns datadir_path as directory path
priority as priority associated with datadir_path
:raise BadStoreConfiguration exception if priority is invalid or
empty directory path is specified.
"""
priority = 0
parts = map(lambda x: x.strip(), datadir.rsplit(":", 1))
datadir_path = parts[0]
if len(parts) == 2 and parts[1]:
priority = parts[1]
if not priority.isdigit():
msg = (_("Invalid priority value %(priority)s in "
"filesystem configuration") % {'priority': priority})
LOG.exception(msg)
raise exception.BadStoreConfiguration(
store_name="filesystem", reason=msg)
if not datadir_path:
msg = _("Invalid directory specified in filesystem configuration")
LOG.exception(msg)
raise exception.BadStoreConfiguration(
store_name="filesystem", reason=msg)
return datadir_path, priority
@staticmethod
def _resolve_location(location):
filepath = location.store_location.path
if not os.path.exists(filepath):
raise exception.NotFound(_("Image file %s not found") % filepath)
filesize = os.path.getsize(filepath)
return filepath, filesize
def _get_metadata(self):
if CONF.filesystem_store_metadata_file is None:
return {}
try:
with open(CONF.filesystem_store_metadata_file, 'r') as fptr:
metadata = jsonutils.load(fptr)
glance.store.check_location_metadata(metadata)
return metadata
except glance.store.BackendException as bee:
LOG.error(_('The JSON in the metadata file %(file)s could not be '
'used: %(error)s An empty dictionary will be '
'returned to the client.') %
{'file': CONF.filesystem_store_metadata_file,
'error': utils.exception_to_str(bee)})
return {}
except IOError as ioe:
LOG.error(_('The path for the metadata file %(file)s could not be '
'opened: %(error)s An empty dictionary will be '
'returned to the client.') %
{'file': CONF.filesystem_store_metadata_file,
'error': utils.exception_to_str(ioe)})
return {}
except Exception as ex:
LOG.exception(_('An error occurred processing the storage systems '
'meta data file: %s. An empty dictionary will be '
'returned to the client.') %
utils.exception_to_str(ex))
return {}
def get(self, location):
"""
Takes a `glance.store.location.Location` object that indicates
where to find the image file, and returns a tuple of generator
(for reading the image file) and image_size
:param location `glance.store.location.Location` object, supplied
from glance.store.location.get_location_from_uri()
:raises `glance.exception.NotFound` if image does not exist
"""
filepath, filesize = self._resolve_location(location)
msg = "Found image at %s. Returning in ChunkedFile." % filepath
LOG.debug(msg)
return (ChunkedFile(filepath), filesize)
def get_size(self, location):
"""
Takes a `glance.store.location.Location` object that indicates
where to find the image file and returns the image size
:param location `glance.store.location.Location` object, supplied
from glance.store.location.get_location_from_uri()
:raises `glance.exception.NotFound` if image does not exist
:rtype int
"""
filepath, filesize = self._resolve_location(location)
msg = "Found image at %s." % filepath
LOG.debug(msg)
return filesize
def delete(self, location):
"""
Takes a `glance.store.location.Location` object that indicates
where to find the image file to delete
:location `glance.store.location.Location` object, supplied
from glance.store.location.get_location_from_uri()
:raises NotFound if image does not exist
:raises Forbidden if cannot delete because of permissions
"""
loc = location.store_location
fn = loc.path
if os.path.exists(fn):
try:
LOG.debug("Deleting image at %(fn)s", {'fn': fn})
os.unlink(fn)
except OSError:
raise exception.Forbidden(_("You cannot delete file %s") % fn)
else:
raise exception.NotFound(_("Image file %s does not exist") % fn)
def _get_capacity_info(self, mount_point):
"""Calculates total available space for given mount point.
:mount_point is path of glance data directory
"""
#Calculate total available space
df = processutils.execute("df", "-k", "-P",
mount_point)[0].strip("'\n'")
total_available_space = int(df.split('\n')[1].split()[3]) * units.Ki
return max(0, total_available_space)
def _find_best_datadir(self, image_size):
"""Finds the best datadir by priority and free space.
Traverse directories returning the first one that has sufficient
free space, in priority order. If two suitable directories have
the same priority, choose the one with the most free space
available.
:image_size size of image being uploaded.
:returns best_datadir as directory path of the best priority datadir.
:raises exception.StorageFull if there is no datadir in
self.priority_data_map that can accommodate the image.
"""
if not self.multiple_datadirs:
return self.datadir
best_datadir = None
max_free_space = 0
for priority in self.priority_list:
for datadir in self.priority_data_map.get(priority):
free_space = self._get_capacity_info(datadir)
if free_space >= image_size and free_space > max_free_space:
max_free_space = free_space
best_datadir = datadir
# If datadir is found which can accommodate image and has maximum
# free space for the given priority then break the loop,
# else continue to lookup further.
if best_datadir:
break
else:
msg = (_("There is no enough disk space left on the image "
"storage media. requested=%s") % image_size)
LOG.exception(msg)
raise exception.StorageFull(message=msg)
return best_datadir
def add(self, image_id, image_file, image_size):
"""
Stores an image file with supplied identifier to the backend
storage system and returns a tuple containing information
about the stored image.
:param image_id: The opaque image identifier
:param image_file: The image data to write, as a file-like object
:param image_size: The size of the image data to write, in bytes
:retval tuple of URL in backing store, bytes written, checksum
and a dictionary with storage system specific information
:raises `glance.common.exception.Duplicate` if the image already
existed
:note By default, the backend writes the image data to a file
`/<DATADIR>/<ID>`, where <DATADIR> is the value of
the filesystem_store_datadir configuration option and <ID>
is the supplied image ID.
"""
datadir = self._find_best_datadir(image_size)
filepath = os.path.join(datadir, str(image_id))
if os.path.exists(filepath):
raise exception.Duplicate(_("Image file %s already exists!")
% filepath)
checksum = hashlib.md5()
bytes_written = 0
try:
with open(filepath, 'wb') as f:
for buf in utils.chunkreadable(image_file,
self.WRITE_CHUNKSIZE):
bytes_written += len(buf)
checksum.update(buf)
f.write(buf)
except IOError as e:
if e.errno != errno.EACCES:
self._delete_partial(filepath, image_id)
exceptions = {errno.EFBIG: exception.StorageFull(),
errno.ENOSPC: exception.StorageFull(),
errno.EACCES: exception.StorageWriteDenied()}
raise exceptions.get(e.errno, e)
except Exception:
with excutils.save_and_reraise_exception():
self._delete_partial(filepath, image_id)
checksum_hex = checksum.hexdigest()
metadata = self._get_metadata()
LOG.debug("Wrote %(bytes_written)d bytes to %(filepath)s with "
"checksum %(checksum_hex)s",
{'bytes_written': bytes_written,
'filepath': filepath,
'checksum_hex': checksum_hex})
return ('file://%s' % filepath, bytes_written, checksum_hex, metadata)
@staticmethod
def _delete_partial(filepath, id):
try:
os.unlink(filepath)
except Exception as e:
msg = _('Unable to remove partial image data for image %(id)s: '
'%(error)s')
LOG.error(msg % {'id': id,
'error': utils.exception_to_str(e)})

View File

@ -1,215 +0,0 @@
# Copyright 2013 Red Hat, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Storage backend for GridFS"""
from __future__ import absolute_import
from oslo.config import cfg
import six.moves.urllib.parse as urlparse
from glance.common import exception
from glance.openstack.common import excutils
import glance.openstack.common.log as logging
import glance.store.base
import glance.store.location
try:
import gridfs
import gridfs.errors
import pymongo
import pymongo.uri_parser as uri_parser
except ImportError:
pymongo = None
LOG = logging.getLogger(__name__)
gridfs_opts = [
cfg.StrOpt('mongodb_store_uri',
help="Hostname or IP address of the instance to connect to, "
"or a mongodb URI, or a list of hostnames / mongodb URIs. "
"If host is an IPv6 literal it must be enclosed "
"in '[' and ']' characters following the RFC2732 "
"URL syntax (e.g. '[::1]' for localhost)."),
cfg.StrOpt('mongodb_store_db', help='Database to use.'),
]
CONF = cfg.CONF
CONF.register_opts(gridfs_opts)
class StoreLocation(glance.store.location.StoreLocation):
"""
Class describing an gridfs URI:
gridfs://<IMAGE_ID>
Connection information has been consciously omitted for
security reasons, since this location will be stored in glance's
database and can be queried from outside.
Note(flaper87): Make connection info available if user wants so
by adding a new configuration parameter `mongdb_store_insecure`.
"""
def get_uri(self):
return "gridfs://%s" % self.specs.get("image_id")
def parse_uri(self, uri):
"""
This method should fix any issue with the passed URI. Right now,
it just sets image_id value in the specs dict.
:param uri: Current set URI
"""
parsed = urlparse.urlparse(uri)
assert parsed.scheme in ('gridfs',)
self.specs["image_id"] = parsed.netloc
class Store(glance.store.base.Store):
"""GridFS adapter"""
EXAMPLE_URL = "gridfs://<IMAGE_ID>"
def get_schemes(self):
return ('gridfs',)
def configure_add(self):
"""
Configure the Store to use the stored configuration options
Any store that needs special configuration should implement
this method. If the store was not able to successfully configure
itself, it should raise `exception.BadStoreConfiguration`
"""
if pymongo is None:
msg = _("Missing dependencies: pymongo")
raise exception.BadStoreConfiguration(store_name="gridfs",
reason=msg)
self.mongodb_uri = self._option_get('mongodb_store_uri')
parsed = uri_parser.parse_uri(self.mongodb_uri)
self.mongodb_db = self._option_get('mongodb_store_db') or \
parsed.get("database")
self.mongodb = pymongo.MongoClient(self.mongodb_uri)
self.fs = gridfs.GridFS(self.mongodb[self.mongodb_db])
def _option_get(self, param):
result = getattr(CONF, param)
if not result:
reason = (_("Could not find %(param)s in configuration "
"options.") % {'param': param})
LOG.debug(reason)
raise exception.BadStoreConfiguration(store_name="gridfs",
reason=reason)
return result
def get(self, location):
"""
Takes a `glance.store.location.Location` object that indicates
where to find the image file, and returns a tuple of generator
(for reading the image file) and image_size
:param location `glance.store.location.Location` object, supplied
from glance.store.location.get_location_from_uri()
:raises `glance.exception.NotFound` if image does not exist
"""
image = self._get_file(location)
return (image, image.length)
def get_size(self, location):
"""
Takes a `glance.store.location.Location` object that indicates
where to find the image file, and returns the image_size (or 0
if unavailable)
:param location `glance.store.location.Location` object, supplied
from glance.store.location.get_location_from_uri()
"""
try:
key = self._get_file(location)
return key.length
except Exception:
return 0
def _get_file(self, location):
store_location = location
if isinstance(location, glance.store.location.Location):
store_location = location.store_location
try:
parsed = urlparse.urlparse(store_location.get_uri())
return self.fs.get(parsed.netloc)
except gridfs.errors.NoFile:
msg = ("Could not find %s image in GridFS"
% store_location.get_uri())
LOG.debug(msg)
raise exception.NotFound(msg)
def add(self, image_id, image_file, image_size):
"""
Stores an image file with supplied identifier to the backend
storage system and returns a tuple containing information
about the stored image.
:param image_id: The opaque image identifier
:param image_file: The image data to write, as a file-like object
:param image_size: The size of the image data to write, in bytes
:retval tuple of URL in backing store, bytes written, checksum
and a dictionary with storage system specific information
:raises `glance.common.exception.Duplicate` if the image already
existed
"""
loc = StoreLocation({'image_id': image_id})
if self.fs.exists(image_id):
raise exception.Duplicate(_("GridFS already has an image at "
"location %s") % loc.get_uri())
LOG.debug("Adding a new image to GridFS with id %(id)s and "
"size %(size)s" % {'id': image_id,
'size': image_size})
try:
self.fs.put(image_file, _id=image_id)
image = self._get_file(loc)
except Exception:
# Note(zhiyan): clean up already received data when
# error occurs such as ImageSizeLimitExceeded exception.
with excutils.save_and_reraise_exception():
self.fs.delete(image_id)
LOG.debug("Uploaded image %(id)s, md5 %(md5)s, length %(length)s "
"to GridFS" % {'id': image._id,
'md5': image.md5,
'length': image.length})
return (loc.get_uri(), image.length, image.md5, {})
def delete(self, location):
"""
Takes a `glance.store.location.Location` object that indicates
where to find the image file to delete
:location `glance.store.location.Location` object, supplied
from glance.store.location.get_location_from_uri()
:raises NotFound if image does not exist
"""
image = self._get_file(location)
self.fs.delete(image._id)
LOG.debug("Deleted image %s from GridFS", image._id)

View File

@ -1,204 +0,0 @@
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httplib
import socket
import six.moves.urllib.parse as urlparse
from glance.common import exception
import glance.openstack.common.log as logging
import glance.store.base
import glance.store.location
LOG = logging.getLogger(__name__)
MAX_REDIRECTS = 5
class StoreLocation(glance.store.location.StoreLocation):
"""Class describing an HTTP(S) URI"""
def process_specs(self):
self.scheme = self.specs.get('scheme', 'http')
self.netloc = self.specs['netloc']
self.user = self.specs.get('user')
self.password = self.specs.get('password')
self.path = self.specs.get('path')
def _get_credstring(self):
if self.user:
return '%s:%s@' % (self.user, self.password)
return ''
def get_uri(self):
return "%s://%s%s%s" % (
self.scheme,
self._get_credstring(),
self.netloc,
self.path)
def parse_uri(self, uri):
"""
Parse URLs. This method fixes an issue where credentials specified
in the URL are interpreted differently in Python 2.6.1+ than prior
versions of Python.
"""
pieces = urlparse.urlparse(uri)
assert pieces.scheme in ('https', 'http')
self.scheme = pieces.scheme
netloc = pieces.netloc
path = pieces.path
try:
if '@' in netloc:
creds, netloc = netloc.split('@')
else:
creds = None
except ValueError:
# Python 2.6.1 compat
# see lp659445 and Python issue7904
if '@' in path:
creds, path = path.split('@')
else:
creds = None
if creds:
try:
self.user, self.password = creds.split(':')
except ValueError:
reason = _("Credentials are not well-formatted.")
LOG.info(reason)
raise exception.BadStoreUri(message=reason)
else:
self.user = None
if netloc == '':
reason = _("No address specified in HTTP URL")
LOG.info(reason)
raise exception.BadStoreUri(message=reason)
self.netloc = netloc
self.path = path
def http_response_iterator(conn, response, size):
"""
Return an iterator for a file-like object.
:param conn: HTTP(S) Connection
:param response: httplib.HTTPResponse object
:param size: Chunk size to iterate with
"""
chunk = response.read(size)
while chunk:
yield chunk
chunk = response.read(size)
conn.close()
class Store(glance.store.base.Store):
"""An implementation of the HTTP(S) Backend Adapter"""
def get(self, location):
"""
Takes a `glance.store.location.Location` object that indicates
where to find the image file, and returns a tuple of generator
(for reading the image file) and image_size
:param location `glance.store.location.Location` object, supplied
from glance.store.location.get_location_from_uri()
"""
conn, resp, content_length = self._query(location, 'GET')
iterator = http_response_iterator(conn, resp, self.READ_CHUNKSIZE)
class ResponseIndexable(glance.store.Indexable):
def another(self):
try:
return self.wrapped.next()
except StopIteration:
return ''
return (ResponseIndexable(iterator, content_length), content_length)
def get_schemes(self):
return ('http', 'https')
def get_size(self, location):
"""
Takes a `glance.store.location.Location` object that indicates
where to find the image file, and returns the size
:param location `glance.store.location.Location` object, supplied
from glance.store.location.get_location_from_uri()
"""
try:
size = self._query(location, 'HEAD')[2]
except socket.error:
reason = _("The HTTP URL is invalid.")
LOG.info(reason)
raise exception.BadStoreUri(message=reason)
except Exception:
# NOTE(flaper87): Catch more granular exceptions,
# keeping this branch for backwards compatibility.
return 0
return size
def _query(self, location, verb, depth=0):
if depth > MAX_REDIRECTS:
reason = ("The HTTP URL exceeded %s maximum "
"redirects." % MAX_REDIRECTS)
LOG.debug(reason)
raise exception.MaxRedirectsExceeded(redirects=MAX_REDIRECTS)
loc = location.store_location
conn_class = self._get_conn_class(loc)
conn = conn_class(loc.netloc)
conn.request(verb, loc.path, "", {})
resp = conn.getresponse()
# Check for bad status codes
if resp.status >= 400:
if resp.status == httplib.NOT_FOUND:
reason = _("HTTP datastore could not find image at URI.")
LOG.debug(reason)
raise exception.NotFound(reason)
reason = _("HTTP URL returned a %s status code.") % resp.status
LOG.info(reason)
raise exception.BadStoreUri(message=reason)
location_header = resp.getheader("location")
if location_header:
if resp.status not in (301, 302):
reason = (_("The HTTP URL attempted to redirect with an "
"invalid %s status code.") % resp.status)
LOG.info(reason)
raise exception.BadStoreUri(message=reason)
location_class = glance.store.location.Location
new_loc = location_class(location.store_name,
location.store_location.__class__,
uri=location_header,
image_id=location.image_id,
store_specs=location.store_specs)
return self._query(new_loc, verb, depth + 1)
content_length = int(resp.getheader('content-length', 0))
return (conn, resp, content_length)
def _get_conn_class(self, loc):
"""
Returns connection class for accessing the resource. Useful
for dependency injection and stubouts in testing...
"""
return {'http': httplib.HTTPConnection,
'https': httplib.HTTPSConnection}[loc.scheme]

View File

@ -1,165 +0,0 @@
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A class that describes the location of an image in Glance.
In Glance, an image can either be **stored** in Glance, or it can be
**registered** in Glance but actually be stored somewhere else.
We needed a class that could support the various ways that Glance
describes where exactly an image is stored.
An image in Glance has two location properties: the image URI
and the image storage URI.
The image URI is essentially the permalink identifier for the image.
It is displayed in the output of various Glance API calls and,
while read-only, is entirely user-facing. It shall **not** contain any
security credential information at all. The Glance image URI shall
be the host:port of that Glance API server along with /images/<IMAGE_ID>.
The Glance storage URI is an internal URI structure that Glance
uses to maintain critical information about how to access the images
that it stores in its storage backends. It **may contain** security
credentials and is **not** user-facing.
"""
import six.moves.urllib.parse as urlparse
from glance.common import exception
import glance.openstack.common.log as logging
LOG = logging.getLogger(__name__)
SCHEME_TO_CLS_MAP = {}
def get_location_from_uri(uri):
"""
Given a URI, return a Location object that has had an appropriate
store parse the URI.
:param uri: A URI that could come from the end-user in the Location
attribute/header
Example URIs:
https://user:pass@example.com:80/images/some-id
http://images.oracle.com/123456
swift://example.com/container/obj-id
swift://user:account:pass@authurl.com/container/obj-id
swift+http://user:account:pass@authurl.com/container/obj-id
s3://accesskey:secretkey@s3.amazonaws.com/bucket/key-id
s3+https://accesskey:secretkey@s3.amazonaws.com/bucket/key-id
file:///var/lib/glance/images/1
cinder://volume-id
vsphere://server_host/folder/file_path?dcPath=dc_path&dsName=ds_name
"""
pieces = urlparse.urlparse(uri)
if pieces.scheme not in SCHEME_TO_CLS_MAP.keys():
raise exception.UnknownScheme(scheme=pieces.scheme)
scheme_info = SCHEME_TO_CLS_MAP[pieces.scheme]
return Location(pieces.scheme, uri=uri,
store_location_class=scheme_info['location_class'])
def register_scheme_map(scheme_map):
"""
Given a mapping of 'scheme' to store_name, adds the mapping to the
known list of schemes if it does not already exist.
"""
for (k, v) in scheme_map.items():
if k not in SCHEME_TO_CLS_MAP:
LOG.debug("Registering scheme %(k)s with %(v)s" % {'k': k,
'v': v})
SCHEME_TO_CLS_MAP[k] = v
class Location(object):
"""
Class describing the location of an image that Glance knows about
"""
def __init__(self, store_name, store_location_class,
uri=None, image_id=None, store_specs=None):
"""
Create a new Location object.
:param store_name: The string identifier/scheme of the storage backend
:param store_location_class: The store location class to use
for this location instance.
:param image_id: The identifier of the image in whatever storage
backend is used.
:param uri: Optional URI to construct location from
:param store_specs: Dictionary of information about the location
of the image that is dependent on the backend
store
"""
self.store_name = store_name
self.image_id = image_id
self.store_specs = store_specs or {}
self.store_location = store_location_class(self.store_specs)
if uri:
self.store_location.parse_uri(uri)
def get_store_uri(self):
"""
Returns the Glance image URI, which is the host:port of the API server
along with /images/<IMAGE_ID>
"""
return self.store_location.get_uri()
def get_uri(self):
return None
class StoreLocation(object):
"""
Base class that must be implemented by each store
"""
def __init__(self, store_specs):
self.specs = store_specs
if self.specs:
self.process_specs()
def process_specs(self):
"""
Subclasses should implement any processing of the self.specs collection
such as storing credentials and possibly establishing connections.
"""
pass
def get_uri(self):
"""
Subclasses should implement a method that returns an internal URI that,
when supplied to the StoreLocation instance, can be interpreted by the
StoreLocation's parse_uri() method. The URI returned from this method
shall never be public and only used internally within Glance, so it is
fine to encode credentials in this URI.
"""
raise NotImplementedError("StoreLocation subclass must implement "
"get_uri()")
def parse_uri(self, uri):
"""
Subclasses should implement a method that accepts a string URI and
sets appropriate internal fields such that a call to get_uri() will
return a proper internal URI
"""
raise NotImplementedError("StoreLocation subclass must implement "
"parse_uri()")

View File

@ -1,395 +0,0 @@
# Copyright 2010-2011 Josh Durgin
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Storage backend for RBD
(RADOS (Reliable Autonomic Distributed Object Store) Block Device)"""
from __future__ import absolute_import
from __future__ import with_statement
import hashlib
import math
from oslo.config import cfg
import six
import six.moves.urllib.parse as urlparse
from six import text_type
from glance.common import exception
from glance.common import utils
from glance import i18n
from glance.openstack.common import excutils
import glance.openstack.common.log as logging
from glance.openstack.common import units
import glance.store.base
import glance.store.location
try:
import rados
import rbd
except ImportError:
rados = None
rbd = None
DEFAULT_POOL = 'images'
DEFAULT_CONFFILE = '/etc/ceph/ceph.conf'
DEFAULT_USER = None # let librados decide based on the Ceph conf file
DEFAULT_CHUNKSIZE = 8 # in MiB
DEFAULT_SNAPNAME = 'snap'
LOG = logging.getLogger(__name__)
_LI = i18n._LI
rbd_opts = [
cfg.IntOpt('rbd_store_chunk_size', default=DEFAULT_CHUNKSIZE,
help=_('RADOS images will be chunked into objects of this size '
'(in megabytes). For best performance, this should be '
'a power of two.')),
cfg.StrOpt('rbd_store_pool', default=DEFAULT_POOL,
help=_('RADOS pool in which images are stored.')),
cfg.StrOpt('rbd_store_user', default=DEFAULT_USER,
help=_('RADOS user to authenticate as (only applicable if '
'using Cephx. If <None>, a default will be chosen based '
'on the client. section in rbd_store_ceph_conf).')),
cfg.StrOpt('rbd_store_ceph_conf', default=DEFAULT_CONFFILE,
help=_('Ceph configuration file path. '
'If <None>, librados will locate the default config. '
'If using cephx authentication, this file should '
'include a reference to the right keyring '
'in a client.<USER> section.')),
]
CONF = cfg.CONF
CONF.register_opts(rbd_opts)
class StoreLocation(glance.store.location.StoreLocation):
"""
Class describing a RBD URI. This is of the form:
rbd://image
or
rbd://fsid/pool/image/snapshot
"""
def process_specs(self):
# convert to ascii since librbd doesn't handle unicode
for key, value in six.iteritems(self.specs):
self.specs[key] = str(value)
self.fsid = self.specs.get('fsid')
self.pool = self.specs.get('pool')
self.image = self.specs.get('image')
self.snapshot = self.specs.get('snapshot')
def get_uri(self):
if self.fsid and self.pool and self.snapshot:
# ensure nothing contains / or any other url-unsafe character
safe_fsid = urlparse.quote(self.fsid, '')
safe_pool = urlparse.quote(self.pool, '')
safe_image = urlparse.quote(self.image, '')
safe_snapshot = urlparse.quote(self.snapshot, '')
return "rbd://%s/%s/%s/%s" % (safe_fsid, safe_pool,
safe_image, safe_snapshot)
else:
return "rbd://%s" % self.image
def parse_uri(self, uri):
prefix = 'rbd://'
if not uri.startswith(prefix):
reason = _('URI must start with rbd://')
msg = _LI("Invalid URI: %s") % reason
LOG.info(msg)
raise exception.BadStoreUri(message=reason)
# convert to ascii since librbd doesn't handle unicode
try:
ascii_uri = str(uri)
except UnicodeError:
reason = _('URI contains non-ascii characters')
msg = _LI("Invalid URI: %s") % reason
LOG.info(msg)
raise exception.BadStoreUri(message=reason)
pieces = ascii_uri[len(prefix):].split('/')
if len(pieces) == 1:
self.fsid, self.pool, self.image, self.snapshot = \
(None, None, pieces[0], None)
elif len(pieces) == 4:
self.fsid, self.pool, self.image, self.snapshot = \
map(urlparse.unquote, pieces)
else:
reason = _('URI must have exactly 1 or 4 components')
msg = _LI("Invalid URI: %s") % reason
LOG.info(msg)
raise exception.BadStoreUri(message=reason)
if any(map(lambda p: p == '', pieces)):
reason = _('URI cannot contain empty components')
msg = _LI("Invalid URI: %s") % reason
LOG.info(msg)
raise exception.BadStoreUri(message=reason)
class ImageIterator(object):
"""
Reads data from an RBD image, one chunk at a time.
"""
def __init__(self, name, store):
self.name = name
self.pool = store.pool
self.user = store.user
self.conf_file = store.conf_file
self.chunk_size = store.READ_CHUNKSIZE
def __iter__(self):
try:
with rados.Rados(conffile=self.conf_file,
rados_id=self.user) as conn:
with conn.open_ioctx(self.pool) as ioctx:
with rbd.Image(ioctx, self.name) as image:
img_info = image.stat()
size = img_info['size']
bytes_left = size
while bytes_left > 0:
length = min(self.chunk_size, bytes_left)
data = image.read(size - bytes_left, length)
bytes_left -= len(data)
yield data
raise StopIteration()
except rbd.ImageNotFound:
raise exception.NotFound(
_('RBD image %s does not exist') % self.name)
class Store(glance.store.base.Store):
"""An implementation of the RBD backend adapter."""
EXAMPLE_URL = "rbd://<FSID>/<POOL>/<IMAGE>/<SNAP>"
def get_schemes(self):
return ('rbd',)
def configure_add(self):
"""
Configure the Store to use the stored configuration options
Any store that needs special configuration should implement
this method. If the store was not able to successfully configure
itself, it should raise `exception.BadStoreConfiguration`
"""
try:
self.READ_CHUNKSIZE = CONF.rbd_store_chunk_size * units.Mi
self.WRITE_CHUNKSIZE = self.READ_CHUNKSIZE
# these must not be unicode since they will be passed to a
# non-unicode-aware C library
self.pool = str(CONF.rbd_store_pool)
self.user = str(CONF.rbd_store_user)
self.conf_file = str(CONF.rbd_store_ceph_conf)
except cfg.ConfigFileValueError as e:
reason = (_("Error in store configuration: %s") %
utils.exception_to_str(e))
LOG.error(reason)
raise exception.BadStoreConfiguration(store_name='rbd',
reason=reason)
def get(self, location):
"""
Takes a `glance.store.location.Location` object that indicates
where to find the image file, and returns a tuple of generator
(for reading the image file) and image_size
:param location `glance.store.location.Location` object, supplied
from glance.store.location.get_location_from_uri()
:raises `glance.exception.NotFound` if image does not exist
"""
loc = location.store_location
return (ImageIterator(loc.image, self), self.get_size(location))
def get_size(self, location):
"""
Takes a `glance.store.location.Location` object that indicates
where to find the image file, and returns the size
:param location `glance.store.location.Location` object, supplied
from glance.store.location.get_location_from_uri()
:raises `glance.exception.NotFound` if image does not exist
"""
loc = location.store_location
with rados.Rados(conffile=self.conf_file,
rados_id=self.user) as conn:
with conn.open_ioctx(self.pool) as ioctx:
try:
with rbd.Image(ioctx, loc.image,
snapshot=loc.snapshot) as image:
img_info = image.stat()
return img_info['size']
except rbd.ImageNotFound:
msg = 'RBD image %s does not exist' % loc.get_uri()
LOG.debug(msg)
raise exception.NotFound(msg)
def _create_image(self, fsid, ioctx, image_name, size, order):
"""
Create an rbd image. If librbd supports it,
make it a cloneable snapshot, so that copy-on-write
volumes can be created from it.
:param image_name Image's name
:retval `glance.store.rbd.StoreLocation` object
"""
librbd = rbd.RBD()
if hasattr(rbd, 'RBD_FEATURE_LAYERING'):
librbd.create(ioctx, image_name, size, order, old_format=False,
features=rbd.RBD_FEATURE_LAYERING)
return StoreLocation({
'fsid': fsid,
'pool': self.pool,
'image': image_name,
'snapshot': DEFAULT_SNAPNAME,
})
else:
librbd.create(ioctx, image_name, size, order, old_format=True)
return StoreLocation({'image': image_name})
def _delete_image(self, image_name, snapshot_name=None):
"""
Delete RBD image and snapshot.
:param image_name Image's name
:param snapshot_name Image snapshot's name
:raises NotFound if image does not exist;
InUseByStore if image is in use or snapshot unprotect failed
"""
with rados.Rados(conffile=self.conf_file, rados_id=self.user) as conn:
with conn.open_ioctx(self.pool) as ioctx:
try:
# First remove snapshot.
if snapshot_name is not None:
with rbd.Image(ioctx, image_name) as image:
try:
image.unprotect_snap(snapshot_name)
except rbd.ImageBusy:
log_msg = ("snapshot %(image)s@%(snap)s "
"could not be unprotected because "
"it is in use")
LOG.debug(log_msg %
{'image': image_name,
'snap': snapshot_name})
raise exception.InUseByStore()
image.remove_snap(snapshot_name)
# Then delete image.
rbd.RBD().remove(ioctx, image_name)
except rbd.ImageNotFound:
raise exception.NotFound(
_("RBD image %s does not exist") % image_name)
except rbd.ImageBusy:
log_msg = ("image %s could not be removed "
"because it is in use")
LOG.debug(log_msg % image_name)
raise exception.InUseByStore()
def add(self, image_id, image_file, image_size):
"""
Stores an image file with supplied identifier to the backend
storage system and returns a tuple containing information
about the stored image.
:param image_id: The opaque image identifier
:param image_file: The image data to write, as a file-like object
:param image_size: The size of the image data to write, in bytes
:retval tuple of URL in backing store, bytes written, checksum
and a dictionary with storage system specific information
:raises `glance.common.exception.Duplicate` if the image already
existed
"""
checksum = hashlib.md5()
image_name = str(image_id)
with rados.Rados(conffile=self.conf_file, rados_id=self.user) as conn:
fsid = None
if hasattr(conn, 'get_fsid'):
fsid = conn.get_fsid()
with conn.open_ioctx(self.pool) as ioctx:
order = int(math.log(self.WRITE_CHUNKSIZE, 2))
LOG.debug('creating image %(name)s with order %(order)d and '
'size %(size)d',
{'name': text_type(image_name),
'order': order,
'size': image_size})
if image_size == 0:
LOG.warning(_("since image size is zero we will be doing "
"resize-before-write for each chunk which "
"will be considerably slower than normal"))
try:
loc = self._create_image(fsid, ioctx, image_name,
image_size, order)
except rbd.ImageExists:
raise exception.Duplicate(
_('RBD image %s already exists') % image_id)
try:
with rbd.Image(ioctx, image_name) as image:
bytes_written = 0
offset = 0
chunks = utils.chunkreadable(image_file,
self.WRITE_CHUNKSIZE)
for chunk in chunks:
# If the image size provided is zero we need to do
# a resize for the amount we are writing. This will
# be slower so setting a higher chunk size may
# speed things up a bit.
if image_size == 0:
chunk_length = len(chunk)
length = offset + chunk_length
bytes_written += chunk_length
LOG.debug("resizing image to %s KiB" %
(length / units.Ki))
image.resize(length)
LOG.debug("writing chunk at offset %s" %
(offset))
offset += image.write(chunk, offset)
checksum.update(chunk)
if loc.snapshot:
image.create_snap(loc.snapshot)
image.protect_snap(loc.snapshot)
except Exception:
with excutils.save_and_reraise_exception():
# Delete image if one was created
try:
self._delete_image(loc.image, loc.snapshot)
except exception.NotFound:
pass
# Make sure we send back the image size whether provided or inferred.
if image_size == 0:
image_size = bytes_written
return (loc.get_uri(), image_size, checksum.hexdigest(), {})
def delete(self, location):
"""
Takes a `glance.store.location.Location` object that indicates
where to find the image file to delete.
:location `glance.store.location.Location` object, supplied
from glance.store.location.get_location_from_uri()
:raises NotFound if image does not exist;
InUseByStore if image is in use or snapshot unprotect failed
"""
loc = location.store_location
self._delete_image(loc.image, loc.snapshot)

View File

@ -1,734 +0,0 @@
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Storage backend for S3 or Storage Servers that follow the S3 Protocol"""
import hashlib
import httplib
import math
import re
import tempfile
import boto.exception
import eventlet
from oslo.config import cfg
import six
import six.moves.urllib.parse as urlparse
from glance.common import exception
from glance.common import utils
from glance import i18n
import glance.openstack.common.log as logging
from glance.openstack.common import units
import glance.store
import glance.store.base
import glance.store.location
LOG = logging.getLogger(__name__)
_LE = i18n._LE
_LI = i18n._LI
DEFAULT_LARGE_OBJECT_SIZE = 100 # 100M
DEFAULT_LARGE_OBJECT_CHUNK_SIZE = 10 # 10M
DEFAULT_LARGE_OBJECT_MIN_CHUNK_SIZE = 5 # 5M
DEFAULT_THREAD_POOLS = 10 # 10 pools
s3_opts = [
cfg.StrOpt('s3_store_host',
help=_('The host where the S3 server is listening.')),
cfg.StrOpt('s3_store_access_key', secret=True,
help=_('The S3 query token access key.')),
cfg.StrOpt('s3_store_secret_key', secret=True,
help=_('The S3 query token secret key.')),
cfg.StrOpt('s3_store_bucket',
help=_('The S3 bucket to be used to store the Glance data.')),
cfg.StrOpt('s3_store_object_buffer_dir',
help=_('The local directory where uploads will be staged '
'before they are transferred into S3.')),
cfg.BoolOpt('s3_store_create_bucket_on_put', default=False,
help=_('A boolean to determine if the S3 bucket should be '
'created on upload if it does not exist or if '
'an error should be returned to the user.')),
cfg.StrOpt('s3_store_bucket_url_format', default='subdomain',
help=_('The S3 calling format used to determine the bucket. '
'Either subdomain or path can be used.')),
cfg.IntOpt('s3_store_large_object_size',
default=DEFAULT_LARGE_OBJECT_SIZE,
help=_('What size, in MB, should S3 start chunking image files '
'and do a multipart upload in S3.')),
cfg.IntOpt('s3_store_large_object_chunk_size',
default=DEFAULT_LARGE_OBJECT_CHUNK_SIZE,
help=_('What multipart upload part size, in MB, should S3 use '
'when uploading parts. The size must be greater than or '
'equal to 5M.')),
cfg.IntOpt('s3_store_thread_pools', default=DEFAULT_THREAD_POOLS,
help=_('The number of thread pools to perform a multipart '
'upload in S3.')),
]
CONF = cfg.CONF
CONF.register_opts(s3_opts)
class UploadPart:
"""
The class for the upload part
"""
def __init__(self, mpu, fp, partnum, chunks):
self.mpu = mpu
self.partnum = partnum
self.fp = fp
self.size = 0
self.chunks = chunks
self.etag = {} # partnum -> etag
self.success = True
def run_upload(part):
"""
Upload the upload part into S3 and set returned etag and size
to its part info.
"""
pnum = part.partnum
bsize = part.chunks
LOG.info(_LI("Uploading upload part in S3 partnum=%(pnum)d, "
"size=%(bsize)d, key=%(key)s, UploadId=%(UploadId)s") %
{'pnum': pnum,
'bsize': bsize,
'key': part.mpu.key_name,
'UploadId': part.mpu.id})
try:
key = part.mpu.upload_part_from_file(part.fp,
part_num=part.partnum,
size=bsize)
part.etag[part.partnum] = key.etag
part.size = key.size
except boto.exception.BotoServerError as e:
status = e.status
reason = e.reason
LOG.error(_LE("Failed to upload part in S3 partnum=%(pnum)d, "
"size=%(bsize)d, status=%(status)d, "
"reason=%(reason)s") %
{'pnum': pnum,
'bsize': bsize,
'status': status,
'reason': reason})
part.success = False
except Exception as e:
LOG.error(_LE("Failed to upload part in S3 partnum=%(pnum)d, "
"size=%(bsize)d due to internal error: %(err)s") %
{'pnum': pnum,
'bsize': bsize,
'err': utils.exception_to_str(e)})
part.success = False
finally:
part.fp.close()
class StoreLocation(glance.store.location.StoreLocation):
"""
Class describing an S3 URI. An S3 URI can look like any of
the following:
s3://accesskey:secretkey@s3.amazonaws.com/bucket/key-id
s3+http://accesskey:secretkey@s3.amazonaws.com/bucket/key-id
s3+https://accesskey:secretkey@s3.amazonaws.com/bucket/key-id
The s3+https:// URIs indicate there is an HTTPS s3service URL
"""
def process_specs(self):
self.scheme = self.specs.get('scheme', 's3')
self.accesskey = self.specs.get('accesskey')
self.secretkey = self.specs.get('secretkey')
s3_host = self.specs.get('s3serviceurl')
self.bucket = self.specs.get('bucket')
self.key = self.specs.get('key')
if s3_host.startswith('https://'):
self.scheme = 's3+https'
s3_host = s3_host[8:].strip('/')
elif s3_host.startswith('http://'):
s3_host = s3_host[7:].strip('/')
self.s3serviceurl = s3_host.strip('/')
def _get_credstring(self):
if self.accesskey:
return '%s:%s@' % (self.accesskey, self.secretkey)
return ''
def get_uri(self):
return "%s://%s%s/%s/%s" % (
self.scheme,
self._get_credstring(),
self.s3serviceurl,
self.bucket,
self.key)
def parse_uri(self, uri):
"""
Parse URLs. This method fixes an issue where credentials specified
in the URL are interpreted differently in Python 2.6.1+ than prior
versions of Python.
Note that an Amazon AWS secret key can contain the forward slash,
which is entirely retarded, and breaks urlparse miserably.
This function works around that issue.
"""
# Make sure that URIs that contain multiple schemes, such as:
# s3://accesskey:secretkey@https://s3.amazonaws.com/bucket/key-id
# are immediately rejected.
if uri.count('://') != 1:
reason = _("URI cannot contain more than one occurrence "
"of a scheme. If you have specified a URI like "
"s3://accesskey:secretkey@"
"https://s3.amazonaws.com/bucket/key-id"
", you need to change it to use the "
"s3+https:// scheme, like so: "
"s3+https://accesskey:secretkey@"
"s3.amazonaws.com/bucket/key-id")
LOG.info(_LI("Invalid store uri: %s") % reason)
raise exception.BadStoreUri(message=reason)
pieces = urlparse.urlparse(uri)
assert pieces.scheme in ('s3', 's3+http', 's3+https')
self.scheme = pieces.scheme
path = pieces.path.strip('/')
netloc = pieces.netloc.strip('/')
entire_path = (netloc + '/' + path).strip('/')
if '@' in uri:
creds, path = entire_path.split('@')
cred_parts = creds.split(':')
try:
access_key = cred_parts[0]
secret_key = cred_parts[1]
# NOTE(jaypipes): Need to encode to UTF-8 here because of a
# bug in the HMAC library that boto uses.
# See: http://bugs.python.org/issue5285
# See: http://trac.edgewall.org/ticket/8083
access_key = access_key.encode('utf-8')
secret_key = secret_key.encode('utf-8')
self.accesskey = access_key
self.secretkey = secret_key
except IndexError:
reason = _("Badly formed S3 credentials")
LOG.info(reason)
raise exception.BadStoreUri(message=reason)
else:
self.accesskey = None
path = entire_path
try:
path_parts = path.split('/')
self.key = path_parts.pop()
self.bucket = path_parts.pop()
if path_parts:
self.s3serviceurl = '/'.join(path_parts).strip('/')
else:
reason = _("Badly formed S3 URI. Missing s3 service URL.")
raise exception.BadStoreUri(message=reason)
except IndexError:
reason = _("Badly formed S3 URI")
LOG.info(reason)
raise exception.BadStoreUri(message=reason)
class ChunkedFile(object):
"""
We send this back to the Glance API server as
something that can iterate over a ``boto.s3.key.Key``
"""
def __init__(self, fp):
self.fp = fp
def __iter__(self):
"""Return an iterator over the image file"""
try:
if self.fp:
while True:
chunk = self.fp.read(Store.READ_CHUNKSIZE)
if chunk:
yield chunk
else:
break
finally:
self.close()
def getvalue(self):
"""Return entire string value... used in testing."""
data = ""
self.len = 0
for chunk in self:
read_bytes = len(chunk)
data = data + chunk
self.len = self.len + read_bytes
return data
def close(self):
"""Close the internal file pointer."""
if self.fp:
self.fp.close()
self.fp = None
class Store(glance.store.base.Store):
"""An implementation of the s3 adapter."""
READ_CHUNKSIZE = 64 * units.Ki
WRITE_CHUNKSIZE = READ_CHUNKSIZE
EXAMPLE_URL = "s3://<ACCESS_KEY>:<SECRET_KEY>@<S3_URL>/<BUCKET>/<OBJ>"
def get_schemes(self):
return ('s3', 's3+http', 's3+https')
def configure_add(self):
"""
Configure the Store to use the stored configuration options
Any store that needs special configuration should implement
this method. If the store was not able to successfully configure
itself, it should raise `exception.BadStoreConfiguration`
"""
self.s3_host = self._option_get('s3_store_host')
access_key = self._option_get('s3_store_access_key')
secret_key = self._option_get('s3_store_secret_key')
# NOTE(jaypipes): Need to encode to UTF-8 here because of a
# bug in the HMAC library that boto uses.
# See: http://bugs.python.org/issue5285
# See: http://trac.edgewall.org/ticket/8083
self.access_key = access_key.encode('utf-8')
self.secret_key = secret_key.encode('utf-8')
self.bucket = self._option_get('s3_store_bucket')
self.scheme = 's3'
if self.s3_host.startswith('https://'):
self.scheme = 's3+https'
self.full_s3_host = self.s3_host
elif self.s3_host.startswith('http://'):
self.full_s3_host = self.s3_host
else: # Defaults http
self.full_s3_host = 'http://' + self.s3_host
self.s3_store_object_buffer_dir = CONF.s3_store_object_buffer_dir
_s3_obj_size = CONF.s3_store_large_object_size
self.s3_store_large_object_size = _s3_obj_size * units.Mi
_s3_ck_size = CONF.s3_store_large_object_chunk_size
_s3_ck_min = DEFAULT_LARGE_OBJECT_MIN_CHUNK_SIZE
if _s3_ck_size < _s3_ck_min:
reason = (_("s3_store_large_object_chunk_size must be at "
"least %(_s3_ck_min)d MB. "
"You configured it as %(_s3_ck_size)d MB") %
{'_s3_ck_min': _s3_ck_min,
'_s3_ck_size': _s3_ck_size})
LOG.error(reason)
raise exception.BadStoreConfiguration(store_name="s3",
reason=reason)
self.s3_store_large_object_chunk_size = _s3_ck_size * units.Mi
if CONF.s3_store_thread_pools <= 0:
reason = (_("s3_store_thread_pools must be a positive "
"integer. %s") % CONF.s3_store_thread_pools)
LOG.error(reason)
raise exception.BadStoreConfiguration(store_name="s3",
reason=reason)
def _option_get(self, param):
result = getattr(CONF, param)
if not result:
reason = ("Could not find %(param)s in configuration "
"options." % {'param': param})
LOG.debug(reason)
raise exception.BadStoreConfiguration(store_name="s3",
reason=reason)
return result
def get(self, location):
"""
Takes a `glance.store.location.Location` object that indicates
where to find the image file, and returns a tuple of generator
(for reading the image file) and image_size
:param location `glance.store.location.Location` object, supplied
from glance.store.location.get_location_from_uri()
:raises `glance.exception.NotFound` if image does not exist
"""
key = self._retrieve_key(location)
key.BufferSize = self.READ_CHUNKSIZE
class ChunkedIndexable(glance.store.Indexable):
def another(self):
return (self.wrapped.fp.read(self.READ_CHUNKSIZE)
if self.wrapped.fp else None)
return (ChunkedIndexable(ChunkedFile(key), key.size), key.size)
def get_size(self, location):
"""
Takes a `glance.store.location.Location` object that indicates
where to find the image file, and returns the image_size (or 0
if unavailable)
:param location `glance.store.location.Location` object, supplied
from glance.store.location.get_location_from_uri()
"""
try:
key = self._retrieve_key(location)
return key.size
except Exception:
return 0
def _retrieve_key(self, location):
loc = location.store_location
from boto.s3.connection import S3Connection
s3_conn = S3Connection(loc.accesskey, loc.secretkey,
host=loc.s3serviceurl,
is_secure=(loc.scheme == 's3+https'),
calling_format=get_calling_format())
bucket_obj = get_bucket(s3_conn, loc.bucket)
key = get_key(bucket_obj, loc.key)
msg = ("Retrieved image object from S3 using (s3_host=%(s3_host)s, "
"access_key=%(accesskey)s, bucket=%(bucket)s, "
"key=%(obj_name)s)" % ({'s3_host': loc.s3serviceurl,
'accesskey': loc.accesskey,
'bucket': loc.bucket,
'obj_name': loc.key}))
LOG.debug(msg)
return key
def add(self, image_id, image_file, image_size):
"""
Stores an image file with supplied identifier to the backend
storage system and returns a tuple containing information
about the stored image.
:param image_id: The opaque image identifier
:param image_file: The image data to write, as a file-like object
:param image_size: The size of the image data to write, in bytes
:retval tuple of URL in backing store, bytes written, checksum
and a dictionary with storage system specific information
:raises `glance.common.exception.Duplicate` if the image already
existed
S3 writes the image data using the scheme:
s3://<ACCESS_KEY>:<SECRET_KEY>@<S3_URL>/<BUCKET>/<OBJ>
where:
<USER> = ``s3_store_user``
<KEY> = ``s3_store_key``
<S3_HOST> = ``s3_store_host``
<BUCKET> = ``s3_store_bucket``
<ID> = The id of the image being added
"""
from boto.s3.connection import S3Connection
loc = StoreLocation({'scheme': self.scheme,
'bucket': self.bucket,
'key': image_id,
's3serviceurl': self.full_s3_host,
'accesskey': self.access_key,
'secretkey': self.secret_key})
s3_conn = S3Connection(loc.accesskey, loc.secretkey,
host=loc.s3serviceurl,
is_secure=(loc.scheme == 's3+https'),
calling_format=get_calling_format())
create_bucket_if_missing(self.bucket, s3_conn)
bucket_obj = get_bucket(s3_conn, self.bucket)
obj_name = str(image_id)
def _sanitize(uri):
return re.sub('//.*:.*@',
'//s3_store_secret_key:s3_store_access_key@',
uri)
key = bucket_obj.get_key(obj_name)
if key and key.exists():
raise exception.Duplicate(_("S3 already has an image at "
"location %s") %
_sanitize(loc.get_uri()))
msg = ("Adding image object to S3 using (s3_host=%(s3_host)s, "
"access_key=%(access_key)s, bucket=%(bucket)s, "
"key=%(obj_name)s)" % ({'s3_host': self.s3_host,
'access_key': self.access_key,
'bucket': self.bucket,
'obj_name': obj_name}))
LOG.debug(msg)
LOG.debug("Uploading an image file to S3 for %s" %
_sanitize(loc.get_uri()))
if image_size < self.s3_store_large_object_size:
key = bucket_obj.new_key(obj_name)
# We need to wrap image_file, which is a reference to the
# webob.Request.body_file, with a seekable file-like object,
# otherwise the call to set_contents_from_file() will die
# with an error about Input object has no method 'seek'. We
# might want to call webob.Request.make_body_seekable(), but
# unfortunately, that method copies the entire image into
# memory and results in LP Bug #818292 occurring. So, here
# we write temporary file in as memory-efficient manner as
# possible and then supply the temporary file to S3. We also
# take this opportunity to calculate the image checksum while
# writing the tempfile, so we don't need to call key.compute_md5()
msg = ("Writing request body file to temporary file "
"for %s") % _sanitize(loc.get_uri())
LOG.debug(msg)
tmpdir = self.s3_store_object_buffer_dir
temp_file = tempfile.NamedTemporaryFile(dir=tmpdir)
checksum = hashlib.md5()
for chunk in utils.chunkreadable(image_file, self.WRITE_CHUNKSIZE):
checksum.update(chunk)
temp_file.write(chunk)
temp_file.flush()
msg = ("Uploading temporary file to S3 "
"for %s") % _sanitize(loc.get_uri())
LOG.debug(msg)
# OK, now upload the data into the key
key.set_contents_from_file(open(temp_file.name, 'rb'),
replace=False)
size = key.size
checksum_hex = checksum.hexdigest()
LOG.debug("Wrote %(size)d bytes to S3 key named %(obj_name)s "
"with checksum %(checksum_hex)s" %
{'size': size,
'obj_name': obj_name,
'checksum_hex': checksum_hex})
return (loc.get_uri(), size, checksum_hex, {})
else:
checksum = hashlib.md5()
parts = int(math.ceil(float(image_size) /
float(self.s3_store_large_object_chunk_size)))
threads = parts
pool_size = CONF.s3_store_thread_pools
pool = eventlet.greenpool.GreenPool(size=pool_size)
mpu = bucket_obj.initiate_multipart_upload(obj_name)
LOG.debug("Multipart initiate key=%(obj_name)s, "
"UploadId=%(UploadId)s" %
{'obj_name': obj_name,
'UploadId': mpu.id})
cstart = 0
plist = []
it = utils.chunkreadable(image_file,
self.s3_store_large_object_chunk_size)
for p in range(threads):
chunk = next(it)
clen = len(chunk)
checksum.update(chunk)
fp = six.BytesIO(chunk)
fp.seek(0)
part = UploadPart(mpu, fp, cstart + 1, clen)
pool.spawn_n(run_upload, part)
plist.append(part)
cstart += 1
pedict = {}
total_size = 0
pool.waitall()
for part in plist:
pedict.update(part.etag)
total_size += part.size
success = True
for part in plist:
if not part.success:
success = False
if success:
# Complete
xml = get_mpu_xml(pedict)
bucket_obj.complete_multipart_upload(obj_name,
mpu.id,
xml)
checksum_hex = checksum.hexdigest()
LOG.info(_LI("Multipart complete key=%(obj_name)s "
"UploadId=%(UploadId)s "
"Wrote %(total_size)d bytes to S3 key"
"named %(obj_name)s "
"with checksum %(checksum_hex)s") %
{'obj_name': obj_name,
'UploadId': mpu.id,
'total_size': total_size,
'obj_name': obj_name,
'checksum_hex': checksum_hex})
return (loc.get_uri(), total_size, checksum_hex, {})
else:
# Abort
bucket_obj.cancel_multipart_upload(obj_name, mpu.id)
LOG.error(_LE("Some parts failed to upload to S3. "
"Aborted the object key=%(obj_name)s") %
{'obj_name': obj_name})
msg = (_("Failed to add image object to S3. "
"key=%(obj_name)s") % {'obj_name': obj_name})
raise glance.store.BackendException(msg)
def delete(self, location):
"""
Takes a `glance.store.location.Location` object that indicates
where to find the image file to delete
:location `glance.store.location.Location` object, supplied
from glance.store.location.get_location_from_uri()
:raises NotFound if image does not exist
"""
loc = location.store_location
from boto.s3.connection import S3Connection
s3_conn = S3Connection(loc.accesskey, loc.secretkey,
host=loc.s3serviceurl,
is_secure=(loc.scheme == 's3+https'),
calling_format=get_calling_format())
bucket_obj = get_bucket(s3_conn, loc.bucket)
# Close the key when we're through.
key = get_key(bucket_obj, loc.key)
msg = ("Deleting image object from S3 using (s3_host=%(s3_host)s, "
"access_key=%(accesskey)s, bucket=%(bucket)s, "
"key=%(obj_name)s)" % ({'s3_host': loc.s3serviceurl,
'accesskey': loc.accesskey,
'bucket': loc.bucket,
'obj_name': loc.key}))
LOG.debug(msg)
return key.delete()
def get_bucket(conn, bucket_id):
"""
Get a bucket from an s3 connection
:param conn: The ``boto.s3.connection.S3Connection``
:param bucket_id: ID of the bucket to fetch
:raises ``glance.exception.NotFound`` if bucket is not found.
"""
bucket = conn.get_bucket(bucket_id)
if not bucket:
msg = "Could not find bucket with ID %s" % bucket_id
LOG.debug(msg)
raise exception.NotFound(msg)
return bucket
def get_s3_location(s3_host):
from boto.s3.connection import Location
locations = {
's3.amazonaws.com': Location.DEFAULT,
's3-eu-west-1.amazonaws.com': Location.EU,
's3-us-west-1.amazonaws.com': Location.USWest,
's3-ap-southeast-1.amazonaws.com': Location.APSoutheast,
's3-ap-northeast-1.amazonaws.com': Location.APNortheast,
}
# strip off scheme and port if present
key = re.sub('^(https?://)?(?P<host>[^:]+)(:[0-9]+)?$',
'\g<host>',
s3_host)
return locations.get(key, Location.DEFAULT)
def create_bucket_if_missing(bucket, s3_conn):
"""
Creates a missing bucket in S3 if the
``s3_store_create_bucket_on_put`` option is set.
:param bucket: Name of bucket to create
:param s3_conn: Connection to S3
"""
from boto.exception import S3ResponseError
try:
s3_conn.get_bucket(bucket)
except S3ResponseError as e:
if e.status == httplib.NOT_FOUND:
if CONF.s3_store_create_bucket_on_put:
location = get_s3_location(CONF.s3_store_host)
try:
s3_conn.create_bucket(bucket, location=location)
except S3ResponseError as e:
msg = (_("Failed to add bucket to S3.\n"
"Got error from S3: %s") %
utils.exception_to_str(e))
raise glance.store.BackendException(msg)
else:
msg = (_("The bucket %(bucket)s does not exist in "
"S3. Please set the "
"s3_store_create_bucket_on_put option "
"to add bucket to S3 automatically.")
% {'bucket': bucket})
raise glance.store.BackendException(msg)
def get_key(bucket, obj):
"""
Get a key from a bucket
:param bucket: The ``boto.s3.Bucket``
:param obj: Object to get the key for
:raises ``glance.exception.NotFound`` if key is not found.
"""
key = bucket.get_key(obj)
if not key or not key.exists():
msg = ("Could not find key %(obj)s in bucket %(bucket)s" %
{'obj': obj, 'bucket': bucket})
LOG.debug(msg)
raise exception.NotFound(msg)
return key
def get_calling_format(bucket_format=None):
import boto.s3.connection
if bucket_format is None:
bucket_format = CONF.s3_store_bucket_url_format
if bucket_format.lower() == 'path':
return boto.s3.connection.OrdinaryCallingFormat()
else:
return boto.s3.connection.SubdomainCallingFormat()
def get_mpu_xml(pedict):
xml = '<CompleteMultipartUpload>\n'
for pnum, etag in pedict.iteritems():
xml += ' <Part>\n'
xml += ' <PartNumber>%d</PartNumber>\n' % pnum
xml += ' <ETag>%s</ETag>\n' % etag
xml += ' </Part>\n'
xml += '</CompleteMultipartUpload>'
return xml

View File

@ -1,319 +0,0 @@
# Copyright 2013 Taobao Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Storage backend for Sheepdog storage system"""
import hashlib
from oslo.config import cfg
from glance.common import exception
from glance.common import utils
from glance.openstack.common import excutils
import glance.openstack.common.log as logging
from glance.openstack.common import processutils
from glance.openstack.common import units
import glance.store
import glance.store.base
import glance.store.location
LOG = logging.getLogger(__name__)
DEFAULT_ADDR = '127.0.0.1'
DEFAULT_PORT = 7000
DEFAULT_CHUNKSIZE = 64 # in MiB
LOG = logging.getLogger(__name__)
sheepdog_opts = [
cfg.IntOpt('sheepdog_store_chunk_size', default=DEFAULT_CHUNKSIZE,
help=_('Images will be chunked into objects of this size '
'(in megabytes). For best performance, this should be '
'a power of two.')),
cfg.IntOpt('sheepdog_store_port', default=DEFAULT_PORT,
help=_('Port of sheep daemon.')),
cfg.StrOpt('sheepdog_store_address', default=DEFAULT_ADDR,
help=_('IP address of sheep daemon.'))
]
CONF = cfg.CONF
CONF.register_opts(sheepdog_opts)
class SheepdogImage:
"""Class describing an image stored in Sheepdog storage."""
def __init__(self, addr, port, name, chunk_size):
self.addr = addr
self.port = port
self.name = name
self.chunk_size = chunk_size
def _run_command(self, command, data, *params):
cmd = ["collie", "vdi"]
cmd.extend(command)
cmd.extend(["-a", self.addr, "-p", self.port, self.name])
cmd.extend(params)
try:
return processutils.execute(*cmd, process_input=data)[0]
except (processutils.ProcessExecutionError, OSError) as exc:
LOG.error(exc)
raise glance.store.BackendException(exc)
def get_size(self):
"""
Return the size of the this iamge
Sheepdog Usage: collie vdi list -r -a address -p port image
"""
out = self._run_command(["list", "-r"], None)
return long(out.split(' ')[3])
def read(self, offset, count):
"""
Read up to 'count' bytes from this image starting at 'offset' and
return the data.
Sheepdog Usage: collie vdi read -a address -p port image offset len
"""
return self._run_command(["read"], None, str(offset), str(count))
def write(self, data, offset, count):
"""
Write up to 'count' bytes from the data to this image starting at
'offset'
Sheepdog Usage: collie vdi write -a address -p port image offset len
"""
self._run_command(["write"], data, str(offset), str(count))
def create(self, size):
"""
Create this image in the Sheepdog cluster with size 'size'.
Sheepdog Usage: collie vdi create -a address -p port image size
"""
self._run_command(["create"], None, str(size))
def delete(self):
"""
Delete this image in the Sheepdog cluster
Sheepdog Usage: collie vdi delete -a address -p port image
"""
self._run_command(["delete"], None)
def exist(self):
"""
Check if this image exists in the Sheepdog cluster via 'list' command
Sheepdog Usage: collie vdi list -r -a address -p port image
"""
out = self._run_command(["list", "-r"], None)
if not out:
return False
else:
return True
class StoreLocation(glance.store.location.StoreLocation):
"""
Class describing a Sheepdog URI. This is of the form:
sheepdog://image-id
"""
def process_specs(self):
self.image = self.specs.get('image')
def get_uri(self):
return "sheepdog://%s" % self.image
def parse_uri(self, uri):
valid_schema = 'sheepdog://'
if not uri.startswith(valid_schema):
reason = _("URI must start with '%s://'") % valid_schema
raise exception.BadStoreUri(message=reason)
self.image = uri[len(valid_schema):]
if not utils.is_uuid_like(self.image):
reason = _("URI must contains well-formated image id")
raise exception.BadStoreUri(message=reason)
class ImageIterator(object):
"""
Reads data from an Sheepdog image, one chunk at a time.
"""
def __init__(self, image):
self.image = image
def __iter__(self):
image = self.image
total = left = image.get_size()
while left > 0:
length = min(image.chunk_size, left)
data = image.read(total - left, length)
left -= len(data)
yield data
raise StopIteration()
class Store(glance.store.base.Store):
"""Sheepdog backend adapter."""
EXAMPLE_URL = "sheepdog://image"
def get_schemes(self):
return ('sheepdog',)
def configure(self):
"""
Configure the Store to use the stored configuration options
Any store that needs special configuration should implement
this method. If the store was not able to successfully configure
itself, it should raise `exception.BadStoreConfiguration`
"""
try:
self.READ_CHUNKSIZE = CONF.sheepdog_store_chunk_size * units.Mi
self.WRITE_CHUNKSIZE = self.READ_CHUNKSIZE
self.addr = CONF.sheepdog_store_address.strip()
self.port = CONF.sheepdog_store_port
except cfg.ConfigFileValueError as e:
reason = (_("Error in store configuration: %s") %
utils.exception_to_str(e))
LOG.error(reason)
raise exception.BadStoreConfiguration(store_name='sheepdog',
reason=reason)
if ' ' in self.addr:
reason = (_("Invalid address configuration of sheepdog store: %s")
% self.addr)
LOG.error(reason)
raise exception.BadStoreConfiguration(store_name='sheepdog',
reason=reason)
try:
cmd = ["collie", "vdi", "list", "-a", self.addr, "-p", self.port]
processutils.execute(*cmd)
except Exception as e:
reason = (_("Error in store configuration: %s") %
utils.exception_to_str(e))
LOG.error(reason)
raise exception.BadStoreConfiguration(store_name='sheepdog',
reason=reason)
def get(self, location):
"""
Takes a `glance.store.location.Location` object that indicates
where to find the image file, and returns a generator for reading
the image file
:param location `glance.store.location.Location` object, supplied
from glance.store.location.get_location_from_uri()
:raises `glance.exception.NotFound` if image does not exist
"""
loc = location.store_location
image = SheepdogImage(self.addr, self.port, loc.image,
self.READ_CHUNKSIZE)
if not image.exist():
raise exception.NotFound(_("Sheepdog image %s does not exist")
% image.name)
return (ImageIterator(image), image.get_size())
def get_size(self, location):
"""
Takes a `glance.store.location.Location` object that indicates
where to find the image file and returns the image size
:param location `glance.store.location.Location` object, supplied
from glance.store.location.get_location_from_uri()
:raises `glance.exception.NotFound` if image does not exist
:rtype int
"""
loc = location.store_location
image = SheepdogImage(self.addr, self.port, loc.image,
self.READ_CHUNKSIZE)
if not image.exist():
raise exception.NotFound(_("Sheepdog image %s does not exist")
% image.name)
return image.get_size()
def add(self, image_id, image_file, image_size):
"""
Stores an image file with supplied identifier to the backend
storage system and returns a tuple containing information
about the stored image.
:param image_id: The opaque image identifier
:param image_file: The image data to write, as a file-like object
:param image_size: The size of the image data to write, in bytes
:retval tuple of URL in backing store, bytes written, and checksum
:raises `glance.common.exception.Duplicate` if the image already
existed
"""
image = SheepdogImage(self.addr, self.port, image_id,
self.WRITE_CHUNKSIZE)
if image.exist():
raise exception.Duplicate(_("Sheepdog image %s already exists")
% image_id)
location = StoreLocation({'image': image_id})
checksum = hashlib.md5()
image.create(image_size)
try:
total = left = image_size
while left > 0:
length = min(self.WRITE_CHUNKSIZE, left)
data = image_file.read(length)
image.write(data, total - left, length)
left -= length
checksum.update(data)
except Exception:
# Note(zhiyan): clean up already received data when
# error occurs such as ImageSizeLimitExceeded exception.
with excutils.save_and_reraise_exception():
image.delete()
return (location.get_uri(), image_size, checksum.hexdigest(), {})
def delete(self, location):
"""
Takes a `glance.store.location.Location` object that indicates
where to find the image file to delete
:location `glance.store.location.Location` object, supplied
from glance.store.location.get_location_from_uri()
:raises NotFound if image does not exist
"""
loc = location.store_location
image = SheepdogImage(self.addr, self.port, loc.image,
self.WRITe_CHUNKSIZE)
if not image.exist():
raise exception.NotFound(_("Sheepdog image %s does not exist") %
loc.image)
image.delete()

View File

@ -1,826 +0,0 @@
# Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Storage backend for SWIFT"""
from __future__ import absolute_import
import hashlib
import httplib
import math
from oslo.config import cfg
import six.moves.urllib.parse as urlparse
import urllib
from glance.common import auth
from glance.common import exception
from glance.common import swift_store_utils
from glance.common import utils
from glance import i18n
from glance.openstack.common import excutils
import glance.openstack.common.log as logging
from glance.openstack.common import units
import glance.store
import glance.store.base
import glance.store.location
try:
import swiftclient
except ImportError:
pass
LOG = logging.getLogger(__name__)
_LI = i18n._LI
DEFAULT_CONTAINER = 'glance'
DEFAULT_LARGE_OBJECT_SIZE = 5 * 1024 # 5GB
DEFAULT_LARGE_OBJECT_CHUNK_SIZE = 200 # 200M
ONE_MB = 1000 * 1024
swift_opts = [
cfg.BoolOpt('swift_enable_snet', default=False,
help=_('Whether to use ServiceNET to communicate with the '
'Swift storage servers.')),
cfg.StrOpt('swift_store_auth_version', default='2',
help=_('Version of the authentication service to use. '
'Valid versions are 2 for keystone and 1 for swauth '
'and rackspace. (deprecated)')),
cfg.BoolOpt('swift_store_auth_insecure', default=False,
help=_('If True, swiftclient won\'t check for a valid SSL '
'certificate when authenticating.')),
cfg.StrOpt('swift_store_region',
help=_('The region of the swift endpoint to be used for '
'single tenant. This setting is only necessary if the '
'tenant has multiple swift endpoints.')),
cfg.StrOpt('swift_store_endpoint_type', default='publicURL',
help=_('A string giving the endpoint type of the swift '
'service to use (publicURL, adminURL or internalURL). '
'This setting is only used if swift_store_auth_version '
'is 2.')),
cfg.StrOpt('swift_store_service_type', default='object-store',
help=_('A string giving the service type of the swift service '
'to use. This setting is only used if '
'swift_store_auth_version is 2.')),
cfg.StrOpt('swift_store_container',
default=DEFAULT_CONTAINER,
help=_('Container within the account that the account should '
'use for storing images in Swift.')),
cfg.IntOpt('swift_store_large_object_size',
default=DEFAULT_LARGE_OBJECT_SIZE,
help=_('The size, in MB, that Glance will start chunking image '
'files and do a large object manifest in Swift.')),
cfg.IntOpt('swift_store_large_object_chunk_size',
default=DEFAULT_LARGE_OBJECT_CHUNK_SIZE,
help=_('The amount of data written to a temporary disk buffer '
'during the process of chunking the image file.')),
cfg.BoolOpt('swift_store_create_container_on_put', default=False,
help=_('A boolean value that determines if we create the '
'container if it does not exist.')),
cfg.BoolOpt('swift_store_multi_tenant', default=False,
help=_('If set to True, enables multi-tenant storage '
'mode which causes Glance images to be stored in '
'tenant specific Swift accounts.')),
cfg.ListOpt('swift_store_admin_tenants', default=[],
help=_('A list of tenants that will be granted read/write '
'access on all Swift containers created by Glance in '
'multi-tenant mode.')),
cfg.BoolOpt('swift_store_ssl_compression', default=True,
help=_('If set to False, disables SSL layer compression of '
'https swift requests. Setting to False may improve '
'performance for images which are already in a '
'compressed format, eg qcow2.')),
cfg.IntOpt('swift_store_retry_get_count', default=0,
help=_('The number of times a Swift download will be retried '
'before the request fails.'))
]
CONF = cfg.CONF
CONF.register_opts(swift_opts)
SWIFT_STORE_REF_PARAMS = swift_store_utils.SwiftParams().params
def swift_retry_iter(resp_iter, length, store, location):
length = length if length else (resp_iter.len
if hasattr(resp_iter, 'len') else 0)
retries = 0
bytes_read = 0
while retries <= CONF.swift_store_retry_get_count:
try:
for chunk in resp_iter:
yield chunk
bytes_read += len(chunk)
except swiftclient.ClientException as e:
LOG.warn(_("Swift exception raised %s") %
utils.exception_to_str(e))
if bytes_read != length:
if retries == CONF.swift_store_retry_get_count:
# terminate silently and let higher level decide
LOG.error(_("Stopping Swift retries after %d "
"attempts") % retries)
break
else:
retries += 1
LOG.info(_("Retrying Swift connection "
"(%(retries)d/%(max_retries)d) with "
"range=%(start)d-%(end)d") %
{'retries': retries,
'max_retries': CONF.swift_store_retry_get_count,
'start': bytes_read,
'end': length})
(resp_headers, resp_iter) = store._get_object(location, None,
bytes_read)
else:
break
class StoreLocation(glance.store.location.StoreLocation):
"""
Class describing a Swift URI. A Swift URI can look like any of
the following:
swift://user:pass@authurl.com/container/obj-id
swift://account:user:pass@authurl.com/container/obj-id
swift+http://user:pass@authurl.com/container/obj-id
swift+https://user:pass@authurl.com/container/obj-id
When using multi-tenant a URI might look like this (a storage URL):
swift+https://example.com/container/obj-id
The swift+http:// URIs indicate there is an HTTP authentication URL.
The default for Swift is an HTTPS authentication URL, so swift:// and
swift+https:// are the same...
"""
def process_specs(self):
self.scheme = self.specs.get('scheme', 'swift+https')
self.user = self.specs.get('user')
self.key = self.specs.get('key')
self.auth_or_store_url = self.specs.get('auth_or_store_url')
self.container = self.specs.get('container')
self.obj = self.specs.get('obj')
def _get_credstring(self):
if self.user and self.key:
return '%s:%s' % (urllib.quote(self.user), urllib.quote(self.key))
return ''
def get_uri(self, credentials_included=True):
auth_or_store_url = self.auth_or_store_url
if auth_or_store_url.startswith('http://'):
auth_or_store_url = auth_or_store_url[len('http://'):]
elif auth_or_store_url.startswith('https://'):
auth_or_store_url = auth_or_store_url[len('https://'):]
credstring = self._get_credstring()
auth_or_store_url = auth_or_store_url.strip('/')
container = self.container.strip('/')
obj = self.obj.strip('/')
if not credentials_included:
#Used only in case of an add
#Get the current store from config
store = CONF.default_swift_reference
return '%s://%s/%s/%s' % ('swift+config', store, container, obj)
if self.scheme == 'swift+config':
if self.ssl_enabled == True:
self.scheme = 'swift+https'
else:
self.scheme = 'swift+http'
if credstring != '':
credstring = "%s@" % credstring
return '%s://%s%s/%s/%s' % (self.scheme, credstring, auth_or_store_url,
container, obj)
def _get_conf_value_from_account_ref(self, netloc):
try:
self.user = SWIFT_STORE_REF_PARAMS[netloc]['user']
self.key = SWIFT_STORE_REF_PARAMS[netloc]['key']
netloc = SWIFT_STORE_REF_PARAMS[netloc]['auth_address']
self.ssl_enabled = True
if netloc != '':
if netloc.startswith('http://'):
self.ssl_enabled = False
netloc = netloc[len('http://'):]
elif netloc.startswith('https://'):
netloc = netloc[len('https://'):]
except KeyError:
reason = _("Badly formed Swift URI. Credentials not found for "
"account reference")
LOG.info(reason)
raise exception.BadStoreUri(message=reason)
return netloc
def _form_uri_parts(self, netloc, path):
if netloc != '':
# > Python 2.6.1
if '@' in netloc:
creds, netloc = netloc.split('@')
else:
creds = None
else:
# Python 2.6.1 compat
# see lp659445 and Python issue7904
if '@' in path:
creds, path = path.split('@')
else:
creds = None
netloc = path[0:path.find('/')].strip('/')
path = path[path.find('/'):].strip('/')
if creds:
cred_parts = creds.split(':')
if len(cred_parts) < 2:
reason = _("Badly formed credentials in Swift URI.")
LOG.info(reason)
raise exception.BadStoreUri(message=reason)
key = cred_parts.pop()
user = ':'.join(cred_parts)
creds = urllib.unquote(creds)
try:
self.user, self.key = creds.rsplit(':', 1)
except exception.BadStoreConfiguration:
self.user = urllib.unquote(user)
self.key = urllib.unquote(key)
else:
self.user = None
self.key = None
return netloc, path
def _form_auth_or_store_url(self, netloc, path):
path_parts = path.split('/')
try:
self.obj = path_parts.pop()
self.container = path_parts.pop()
if not netloc.startswith('http'):
# push hostname back into the remaining to build full authurl
path_parts.insert(0, netloc)
self.auth_or_store_url = '/'.join(path_parts)
except IndexError:
reason = _("Badly formed Swift URI.")
LOG.info(reason)
raise exception.BadStoreUri(message=reason)
def parse_uri(self, uri):
"""
Parse URLs. This method fixes an issue where credentials specified
in the URL are interpreted differently in Python 2.6.1+ than prior
versions of Python. It also deals with the peculiarity that new-style
Swift URIs have where a username can contain a ':', like so:
swift://account:user:pass@authurl.com/container/obj
and for system created locations with account reference
swift+config://account_reference/container/obj
"""
# Make sure that URIs that contain multiple schemes, such as:
# swift://user:pass@http://authurl.com/v1/container/obj
# are immediately rejected.
if uri.count('://') != 1:
reason = _("URI cannot contain more than one occurrence "
"of a scheme. If you have specified a URI like "
"swift://user:pass@http://authurl.com/v1/container/obj"
", you need to change it to use the "
"swift+http:// scheme, like so: "
"swift+http://user:pass@authurl.com/v1/container/obj")
LOG.info(_LI("Invalid store URI: %(reason)s"), {'reason': reason})
raise exception.BadStoreUri(message=reason)
pieces = urlparse.urlparse(uri)
assert pieces.scheme in ('swift', 'swift+http', 'swift+https',
'swift+config')
self.scheme = pieces.scheme
netloc = pieces.netloc
path = pieces.path.lstrip('/')
# NOTE(Sridevi): Fix to map the account reference to the
# corresponding CONF value
if self.scheme == 'swift+config':
netloc = self._get_conf_value_from_account_ref(netloc)
else:
netloc, path = self._form_uri_parts(netloc, path)
self._form_auth_or_store_url(netloc, path)
@property
def swift_url(self):
"""
Creates a fully-qualified auth address that the Swift client library
can use. The scheme for the auth_address is determined using the scheme
included in the `location` field.
HTTPS is assumed, unless 'swift+http' is specified.
"""
if self.auth_or_store_url.startswith('http'):
return self.auth_or_store_url
else:
if self.scheme == 'swift+config':
if self.ssl_enabled == True:
self.scheme = 'swift+https'
else:
self.scheme = 'swift+http'
if self.scheme in ('swift+https', 'swift'):
auth_scheme = 'https://'
else:
auth_scheme = 'http://'
return ''.join([auth_scheme, self.auth_or_store_url])
def Store(context=None, loc=None, configure=True):
if (CONF.swift_store_multi_tenant and
(loc is None or loc.store_location.user is None)):
return MultiTenantStore(context, loc, configure=configure)
return SingleTenantStore(context, loc, configure=configure)
class BaseStore(glance.store.base.Store):
READ_CHUNKSIZE = 64 * units.Ki
def get_schemes(self):
return ('swift+https', 'swift', 'swift+http', 'swift+config')
def configure(self):
_obj_size = self._option_get('swift_store_large_object_size')
self.large_object_size = _obj_size * ONE_MB
_chunk_size = self._option_get('swift_store_large_object_chunk_size')
self.large_object_chunk_size = _chunk_size * ONE_MB
self.admin_tenants = CONF.swift_store_admin_tenants
self.region = CONF.swift_store_region
self.service_type = CONF.swift_store_service_type
self.endpoint_type = CONF.swift_store_endpoint_type
self.snet = CONF.swift_enable_snet
self.insecure = CONF.swift_store_auth_insecure
self.ssl_compression = CONF.swift_store_ssl_compression
def _get_object(self, location, connection=None, start=None):
if not connection:
connection = self.get_connection(location)
headers = {}
if start is not None:
bytes_range = 'bytes=%d-' % start
headers = {'Range': bytes_range}
try:
resp_headers, resp_body = connection.get_object(
container=location.container, obj=location.obj,
resp_chunk_size=self.READ_CHUNKSIZE, headers=headers)
except swiftclient.ClientException as e:
if e.http_status == httplib.NOT_FOUND:
msg = _("Swift could not find object %s.") % location.obj
LOG.warn(msg)
raise exception.NotFound(msg)
else:
raise
return (resp_headers, resp_body)
def get(self, location, connection=None):
location = location.store_location
(resp_headers, resp_body) = self._get_object(location, connection)
class ResponseIndexable(glance.store.Indexable):
def another(self):
try:
return self.wrapped.next()
except StopIteration:
return ''
length = int(resp_headers.get('content-length', 0))
if CONF.swift_store_retry_get_count > 0:
resp_body = swift_retry_iter(resp_body, length, self, location)
return (ResponseIndexable(resp_body, length), length)
def get_size(self, location, connection=None):
location = location.store_location
if not connection:
connection = self.get_connection(location)
try:
resp_headers = connection.head_object(
container=location.container, obj=location.obj)
return int(resp_headers.get('content-length', 0))
except Exception:
return 0
def _option_get(self, param):
result = getattr(CONF, param)
if not result:
reason = (_("Could not find %(param)s in configuration options.")
% param)
LOG.error(reason)
raise exception.BadStoreConfiguration(store_name="swift",
reason=reason)
return result
def _delete_stale_chunks(self, connection, container, chunk_list):
for chunk in chunk_list:
LOG.debug("Deleting chunk %s" % chunk)
try:
connection.delete_object(container, chunk)
except Exception:
msg = _("Failed to delete orphaned chunk "
"%(container)s/%(chunk)s")
LOG.exception(msg % {'container': container,
'chunk': chunk})
def add(self, image_id, image_file, image_size, connection=None):
location = self.create_location(image_id)
if not connection:
connection = self.get_connection(location)
self._create_container_if_missing(location.container, connection)
LOG.debug("Adding image object '%(obj_name)s' "
"to Swift" % dict(obj_name=location.obj))
try:
if image_size > 0 and image_size < self.large_object_size:
# Image size is known, and is less than large_object_size.
# Send to Swift with regular PUT.
obj_etag = connection.put_object(location.container,
location.obj, image_file,
content_length=image_size)
else:
# Write the image into Swift in chunks.
chunk_id = 1
if image_size > 0:
total_chunks = str(int(
math.ceil(float(image_size) /
float(self.large_object_chunk_size))))
else:
# image_size == 0 is when we don't know the size
# of the image. This can occur with older clients
# that don't inspect the payload size.
LOG.debug("Cannot determine image size. Adding as a "
"segmented object to Swift.")
total_chunks = '?'
checksum = hashlib.md5()
written_chunks = []
combined_chunks_size = 0
while True:
chunk_size = self.large_object_chunk_size
if image_size == 0:
content_length = None
else:
left = image_size - combined_chunks_size
if left == 0:
break
if chunk_size > left:
chunk_size = left
content_length = chunk_size
chunk_name = "%s-%05d" % (location.obj, chunk_id)
reader = ChunkReader(image_file, checksum, chunk_size)
try:
chunk_etag = connection.put_object(
location.container, chunk_name, reader,
content_length=content_length)
written_chunks.append(chunk_name)
except Exception:
# Delete orphaned segments from swift backend
with excutils.save_and_reraise_exception():
LOG.exception(_("Error during chunked upload to "
"backend, deleting stale chunks"))
self._delete_stale_chunks(connection,
location.container,
written_chunks)
bytes_read = reader.bytes_read
msg = ("Wrote chunk %(chunk_name)s (%(chunk_id)d/"
"%(total_chunks)s) of length %(bytes_read)d "
"to Swift returning MD5 of content: "
"%(chunk_etag)s" %
{'chunk_name': chunk_name,
'chunk_id': chunk_id,
'total_chunks': total_chunks,
'bytes_read': bytes_read,
'chunk_etag': chunk_etag})
LOG.debug(msg)
if bytes_read == 0:
# Delete the last chunk, because it's of zero size.
# This will happen if size == 0.
LOG.debug("Deleting final zero-length chunk")
connection.delete_object(location.container,
chunk_name)
break
chunk_id += 1
combined_chunks_size += bytes_read
# In the case we have been given an unknown image size,
# set the size to the total size of the combined chunks.
if image_size == 0:
image_size = combined_chunks_size
# Now we write the object manifest and return the
# manifest's etag...
manifest = "%s/%s-" % (location.container, location.obj)
headers = {'ETag': hashlib.md5("").hexdigest(),
'X-Object-Manifest': manifest}
# The ETag returned for the manifest is actually the
# MD5 hash of the concatenated checksums of the strings
# of each chunk...so we ignore this result in favour of
# the MD5 of the entire image file contents, so that
# users can verify the image file contents accordingly
connection.put_object(location.container, location.obj,
None, headers=headers)
obj_etag = checksum.hexdigest()
# NOTE: We return the user and key here! Have to because
# location is used by the API server to return the actual
# image data. We *really* should consider NOT returning
# the location attribute from GET /images/<ID> and
# GET /images/details
if swift_store_utils.is_multiple_swift_store_accounts_enabled():
include_creds = False
else:
include_creds = True
return (location.get_uri(credentials_included=include_creds),
image_size, obj_etag, {})
except swiftclient.ClientException as e:
if e.http_status == httplib.CONFLICT:
raise exception.Duplicate(_("Swift already has an image at "
"this location"))
msg = (_("Failed to add object to Swift.\n"
"Got error from Swift: %s") % utils.exception_to_str(e))
LOG.error(msg)
raise glance.store.BackendException(msg)
def delete(self, location, connection=None):
location = location.store_location
if not connection:
connection = self.get_connection(location)
try:
# We request the manifest for the object. If one exists,
# that means the object was uploaded in chunks/segments,
# and we need to delete all the chunks as well as the
# manifest.
manifest = None
try:
headers = connection.head_object(
location.container, location.obj)
manifest = headers.get('x-object-manifest')
except swiftclient.ClientException as e:
if e.http_status != httplib.NOT_FOUND:
raise
if manifest:
# Delete all the chunks before the object manifest itself
obj_container, obj_prefix = manifest.split('/', 1)
segments = connection.get_container(
obj_container, prefix=obj_prefix)[1]
for segment in segments:
# TODO(jaypipes): This would be an easy area to parallelize
# since we're simply sending off parallelizable requests
# to Swift to delete stuff. It's not like we're going to
# be hogging up network or file I/O here...
connection.delete_object(obj_container,
segment['name'])
# Delete object (or, in segmented case, the manifest)
connection.delete_object(location.container, location.obj)
except swiftclient.ClientException as e:
if e.http_status == httplib.NOT_FOUND:
msg = _("Swift could not find image at URI.")
raise exception.NotFound(msg)
else:
raise
def _create_container_if_missing(self, container, connection):
"""
Creates a missing container in Swift if the
``swift_store_create_container_on_put`` option is set.
:param container: Name of container to create
:param connection: Connection to swift service
"""
try:
connection.head_container(container)
except swiftclient.ClientException as e:
if e.http_status == httplib.NOT_FOUND:
if CONF.swift_store_create_container_on_put:
try:
msg = (_LI("Creating swift container %(container)s") %
{'container': container})
LOG.info(msg)
connection.put_container(container)
except swiftclient.ClientException as e:
msg = (_("Failed to add container to Swift.\n"
"Got error from Swift: %(e)s") % {'e': e})
raise glance.store.BackendException(msg)
else:
msg = (_("The container %(container)s does not exist in "
"Swift. Please set the "
"swift_store_create_container_on_put option"
"to add container to Swift automatically.") %
{'container': container})
raise glance.store.BackendException(msg)
else:
raise
def get_connection(self):
raise NotImplementedError()
def create_location(self):
raise NotImplementedError()
class SingleTenantStore(BaseStore):
EXAMPLE_URL = "swift://<USER>:<KEY>@<AUTH_ADDRESS>/<CONTAINER>/<FILE>"
def configure(self):
super(SingleTenantStore, self).configure()
self.auth_version = self._option_get('swift_store_auth_version')
def configure_add(self):
default_swift_reference = \
SWIFT_STORE_REF_PARAMS.get(
CONF.default_swift_reference)
if default_swift_reference:
self.auth_address = default_swift_reference.get('auth_address')
if (not default_swift_reference) or (not self.auth_address):
reason = _("A value for swift_store_auth_address is required.")
LOG.error(reason)
raise exception.BadStoreConfiguration(store_name="swift",
reason=reason)
if self.auth_address.startswith('http://'):
self.scheme = 'swift+http'
else:
self.scheme = 'swift+https'
self.container = CONF.swift_store_container
self.user = default_swift_reference.get('user')
self.key = default_swift_reference.get('key')
if not (self.user or self.key):
reason = _("A value for swift_store_ref_params is required.")
LOG.error(reason)
raise exception.BadStoreConfiguration(store_name="swift",
reason=reason)
def create_location(self, image_id):
specs = {'scheme': self.scheme,
'container': self.container,
'obj': str(image_id),
'auth_or_store_url': self.auth_address,
'user': self.user,
'key': self.key}
return StoreLocation(specs)
def validate_location(self, uri):
pieces = urlparse.urlparse(uri)
if pieces.scheme in ['swift+config']:
reason = (_("Location credentials are invalid"))
raise exception.BadStoreUri(message=reason)
def get_connection(self, location):
if not location.user:
reason = _("Location is missing user:password information.")
LOG.info(reason)
raise exception.BadStoreUri(message=reason)
auth_url = location.swift_url
if not auth_url.endswith('/'):
auth_url += '/'
if self.auth_version == '2':
try:
tenant_name, user = location.user.split(':')
except ValueError:
reason = (_("Badly formed tenant:user '%(user)s' in "
"Swift URI") % {'user': location.user})
LOG.info(reason)
raise exception.BadStoreUri(message=reason)
else:
tenant_name = None
user = location.user
os_options = {}
if self.region:
os_options['region_name'] = self.region
os_options['endpoint_type'] = self.endpoint_type
os_options['service_type'] = self.service_type
return swiftclient.Connection(
auth_url, user, location.key, insecure=self.insecure,
tenant_name=tenant_name, snet=self.snet,
auth_version=self.auth_version, os_options=os_options,
ssl_compression=self.ssl_compression)
class MultiTenantStore(BaseStore):
EXAMPLE_URL = "swift://<SWIFT_URL>/<CONTAINER>/<FILE>"
def configure_add(self):
self.container = CONF.swift_store_container
if self.context is None:
reason = _("Multi-tenant Swift storage requires a context.")
raise exception.BadStoreConfiguration(store_name="swift",
reason=reason)
if self.context.service_catalog is None:
reason = _("Multi-tenant Swift storage requires "
"a service catalog.")
raise exception.BadStoreConfiguration(store_name="swift",
reason=reason)
self.storage_url = auth.get_endpoint(
self.context.service_catalog, service_type=self.service_type,
endpoint_region=self.region, endpoint_type=self.endpoint_type)
if self.storage_url.startswith('http://'):
self.scheme = 'swift+http'
else:
self.scheme = 'swift+https'
def delete(self, location, connection=None):
if not connection:
connection = self.get_connection(location.store_location)
super(MultiTenantStore, self).delete(location, connection)
connection.delete_container(location.store_location.container)
def set_acls(self, location, public=False, read_tenants=None,
write_tenants=None, connection=None):
location = location.store_location
if not connection:
connection = self.get_connection(location)
if read_tenants is None:
read_tenants = []
if write_tenants is None:
write_tenants = []
headers = {}
if public:
headers['X-Container-Read'] = ".r:*,.rlistings"
elif read_tenants:
headers['X-Container-Read'] = ','.join('%s:*' % i
for i in read_tenants)
else:
headers['X-Container-Read'] = ''
write_tenants.extend(self.admin_tenants)
if write_tenants:
headers['X-Container-Write'] = ','.join('%s:*' % i
for i in write_tenants)
else:
headers['X-Container-Write'] = ''
try:
connection.post_container(location.container, headers=headers)
except swiftclient.ClientException as e:
if e.http_status == httplib.NOT_FOUND:
msg = _("Swift could not find image at URI.")
raise exception.NotFound(msg)
else:
raise
def create_location(self, image_id):
specs = {'scheme': self.scheme,
'container': self.container + '_' + str(image_id),
'obj': str(image_id),
'auth_or_store_url': self.storage_url}
return StoreLocation(specs)
def get_connection(self, location):
return swiftclient.Connection(
None, self.context.user, None,
preauthurl=location.swift_url,
preauthtoken=self.context.auth_tok,
tenant_name=self.context.tenant,
auth_version='2', snet=self.snet, insecure=self.insecure,
ssl_compression=self.ssl_compression)
class ChunkReader(object):
def __init__(self, fd, checksum, total):
self.fd = fd
self.checksum = checksum
self.total = total
self.bytes_read = 0
def read(self, i):
left = self.total - self.bytes_read
if i > left:
i = left
result = self.fd.read(i)
self.bytes_read += len(result)
self.checksum.update(result)
return result

View File

@ -1,503 +0,0 @@
# Copyright 2014 OpenStack, LLC
# Copyright (c) 2014 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Storage backend for VMware Datastore"""
import hashlib
import httplib
import os
import netaddr
from oslo.config import cfg
from oslo.vmware import api
from retrying import retry
import six.moves.urllib.parse as urlparse
from glance.common import exception
from glance.openstack.common import excutils
from glance.openstack.common import gettextutils
import glance.openstack.common.log as logging
from glance.openstack.common import units
import glance.store
import glance.store.base
import glance.store.location
LOG = logging.getLogger(__name__)
_LE = gettextutils._LE
_LI = gettextutils._LI
MAX_REDIRECTS = 5
DEFAULT_STORE_IMAGE_DIR = '/openstack_glance'
DEFAULT_ESX_DATACENTER_PATH = 'ha-datacenter'
DS_URL_PREFIX = '/folder'
STORE_SCHEME = 'vsphere'
# check that datacenter/datastore combination is valid
_datastore_info_valid = False
vmware_opts = [
cfg.StrOpt('vmware_server_host',
help=_('ESX/ESXi or vCenter Server target system. '
'The server value can be an IP address or a DNS name.')),
cfg.StrOpt('vmware_server_username',
help=_('Username for authenticating with '
'VMware ESX/VC server.')),
cfg.StrOpt('vmware_server_password',
help=_('Password for authenticating with '
'VMware ESX/VC server.'),
secret=True),
cfg.StrOpt('vmware_datacenter_path',
default=DEFAULT_ESX_DATACENTER_PATH,
help=_('Inventory path to a datacenter. '
'If the vmware_server_host specified is an ESX/ESXi, '
'the vmware_datacenter_path is optional. If specified, '
'it should be "ha-datacenter".')),
cfg.StrOpt('vmware_datastore_name',
help=_('Datastore associated with the datacenter.')),
cfg.IntOpt('vmware_api_retry_count',
default=10,
help=_('Number of times VMware ESX/VC server API must be '
'retried upon connection related issues.')),
cfg.IntOpt('vmware_task_poll_interval',
default=5,
help=_('The interval used for polling remote tasks '
'invoked on VMware ESX/VC server.')),
cfg.StrOpt('vmware_store_image_dir',
default=DEFAULT_STORE_IMAGE_DIR,
help=_('The name of the directory where the glance images '
'will be stored in the VMware datastore.')),
cfg.BoolOpt('vmware_api_insecure',
default=False,
help=_('Allow to perform insecure SSL requests to ESX/VC.')),
]
CONF = cfg.CONF
CONF.register_opts(vmware_opts)
def is_valid_ipv6(address):
try:
return netaddr.valid_ipv6(address)
except Exception:
return False
def http_response_iterator(conn, response, size):
"""Return an iterator for a file-like object.
:param conn: HTTP(S) Connection
:param response: httplib.HTTPResponse object
:param size: Chunk size to iterate with
"""
try:
chunk = response.read(size)
while chunk:
yield chunk
chunk = response.read(size)
finally:
conn.close()
class _Reader(object):
def __init__(self, data):
self._size = 0
self.data = data
self.checksum = hashlib.md5()
def read(self, size=None):
result = self.data.read(size)
self._size += len(result)
self.checksum.update(result)
return result
def rewind(self):
try:
self.data.seek(0)
self._size = 0
self.checksum = hashlib.md5()
except IOError:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Failed to rewind image content'))
@property
def size(self):
return self._size
class _ChunkReader(_Reader):
def __init__(self, data, blocksize=8192):
self.blocksize = blocksize
self.current_chunk = ""
self.closed = False
super(_ChunkReader, self).__init__(data)
def read(self, size=None):
ret = ""
while size is None or size >= len(self.current_chunk):
ret += self.current_chunk
if size is not None:
size -= len(self.current_chunk)
if self.closed:
self.current_chunk = ""
break
self._get_chunk()
else:
ret += self.current_chunk[:size]
self.current_chunk = self.current_chunk[size:]
return ret
def _get_chunk(self):
if not self.closed:
chunk = self.data.read(self.blocksize)
chunk_len = len(chunk)
self._size += chunk_len
self.checksum.update(chunk)
if chunk:
self.current_chunk = '%x\r\n%s\r\n' % (chunk_len, chunk)
else:
self.current_chunk = '0\r\n\r\n'
self.closed = True
class StoreLocation(glance.store.location.StoreLocation):
"""Class describing an VMware URI.
An VMware URI can look like any of the following:
vsphere://server_host/folder/file_path?dcPath=dc_path&dsName=ds_name
"""
def process_specs(self):
self.scheme = self.specs.get('scheme', STORE_SCHEME)
self.server_host = self.specs.get('server_host')
self.path = os.path.join(DS_URL_PREFIX,
self.specs.get('image_dir').strip('/'),
self.specs.get('image_id'))
dc_path = self.specs.get('datacenter_path')
if dc_path is not None:
param_list = {'dcPath': self.specs.get('datacenter_path'),
'dsName': self.specs.get('datastore_name')}
else:
param_list = {'dsName': self.specs.get('datastore_name')}
self.query = urlparse.urlencode(param_list)
def get_uri(self):
if is_valid_ipv6(self.server_host):
base_url = '%s://[%s]%s' % (self.scheme,
self.server_host, self.path)
else:
base_url = '%s://%s%s' % (self.scheme,
self.server_host, self.path)
return '%s?%s' % (base_url, self.query)
def _is_valid_path(self, path):
return path.startswith(
os.path.join(DS_URL_PREFIX,
CONF.vmware_store_image_dir.strip('/')))
def parse_uri(self, uri):
if not uri.startswith('%s://' % STORE_SCHEME):
reason = (_("URI must start with %s://") % STORE_SCHEME)
LOG.info(reason)
raise exception.BadStoreUri(message=reason)
(self.scheme, self.server_host,
path, params, query, fragment) = urlparse.urlparse(uri)
if not query:
path = path.split('?')
if self._is_valid_path(path[0]):
self.path = path[0]
self.query = path[1]
return
elif self._is_valid_path(path):
self.path = path
self.query = query
return
reason = _('Badly formed VMware datastore URI')
LOG.info(reason)
raise exception.BadStoreUri(message=reason)
class Store(glance.store.base.Store):
"""An implementation of the VMware datastore adapter."""
WRITE_CHUNKSIZE = units.Mi
def get_schemes(self):
return (STORE_SCHEME,)
def _sanity_check(self):
if CONF.vmware_api_retry_count <= 0:
msg = _("vmware_api_retry_count should be greater than zero")
LOG.error(msg)
raise exception.BadStoreConfiguration(
store_name='vmware_datastore', reason=msg)
if CONF.vmware_task_poll_interval <= 0:
msg = _("vmware_task_poll_interval should be greater than zero")
LOG.error(msg)
raise exception.BadStoreConfiguration(
store_name='vmware_datastore', reason=msg)
def configure(self):
self._sanity_check()
self.scheme = STORE_SCHEME
self.server_host = self._option_get('vmware_server_host')
self.server_username = self._option_get('vmware_server_username')
self.server_password = self._option_get('vmware_server_password')
self.api_retry_count = CONF.vmware_api_retry_count
self.task_poll_interval = CONF.vmware_task_poll_interval
self.api_insecure = CONF.vmware_api_insecure
self._create_session()
def configure_add(self):
self.datacenter_path = CONF.vmware_datacenter_path
self.datastore_name = self._option_get('vmware_datastore_name')
global _datastore_info_valid
if not _datastore_info_valid:
search_index_moref = self._service_content.searchIndex
inventory_path = ('%s/datastore/%s'
% (self.datacenter_path, self.datastore_name))
ds_moref = self._session.invoke_api(self._session.vim,
'FindByInventoryPath',
search_index_moref,
inventoryPath=inventory_path)
if ds_moref is None:
msg = (_("Could not find datastore %(ds_name)s "
"in datacenter %(dc_path)s")
% {'ds_name': self.datastore_name,
'dc_path': self.datacenter_path})
LOG.error(msg)
raise exception.BadStoreConfiguration(
store_name='vmware_datastore', reason=msg)
else:
_datastore_info_valid = True
self.store_image_dir = CONF.vmware_store_image_dir
def _create_session(self):
self._session = api.VMwareAPISession(
self.server_host, self.server_username, self.server_password,
self.api_retry_count, self.task_poll_interval)
self._service_content = self._session.vim.service_content
def _option_get(self, param):
result = getattr(CONF, param)
if not result:
reason = (_("Could not find %(param)s in configuration "
"options.") % {'param': param})
raise exception.BadStoreConfiguration(
store_name='vmware_datastore', reason=reason)
return result
def _build_vim_cookie_header(self, vim_cookies):
"""Build ESX host session cookie header."""
if len(list(vim_cookies)) > 0:
cookie = list(vim_cookies)[0]
return cookie.name + '=' + cookie.value
def _session_not_authenticated(exc):
if isinstance(exc, exception.NotAuthenticated):
LOG.info(_LI("Store session is not authenticated, retry attempt"))
return True
return False
@retry(stop_max_attempt_number=CONF.vmware_api_retry_count + 1,
retry_on_exception=_session_not_authenticated)
def add(self, image_id, image_file, image_size):
"""Stores an image file with supplied identifier to the backend
storage system and returns a tuple containing information
about the stored image.
:param image_id: The opaque image identifier
:param image_file: The image data to write, as a file-like object
:param image_size: The size of the image data to write, in bytes
:retval tuple of URL in backing store, bytes written, checksum
and a dictionary with storage system specific information
:raises `glance.common.exception.Duplicate` if the image already
existed
`glance.common.exception.UnexpectedStatus` if the upload
request returned an unexpected status. The expected responses
are 201 Created and 200 OK.
"""
if image_size > 0:
headers = {'Content-Length': image_size}
image_file = _Reader(image_file)
else:
# NOTE (arnaud): use chunk encoding when the image is still being
# generated by the server (ex: stream optimized disks generated by
# Nova).
headers = {'Transfer-Encoding': 'chunked'}
image_file = _ChunkReader(image_file)
loc = StoreLocation({'scheme': self.scheme,
'server_host': self.server_host,
'image_dir': self.store_image_dir,
'datacenter_path': self.datacenter_path,
'datastore_name': self.datastore_name,
'image_id': image_id})
cookie = self._build_vim_cookie_header(
self._session.vim.client.options.transport.cookiejar)
headers = dict(headers.items() + {'Cookie': cookie}.items())
try:
conn = self._get_http_conn('PUT', loc, headers,
content=image_file)
res = conn.getresponse()
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Failed to upload content of image '
'%(image)s'), {'image': image_id})
if res.status == httplib.UNAUTHORIZED:
self._create_session()
image_file.rewind()
raise exception.NotAuthenticated()
if res.status == httplib.CONFLICT:
raise exception.Duplicate(_("Image file %(image_id)s already "
"exists!") % {'image_id': image_id})
if res.status not in (httplib.CREATED, httplib.OK):
msg = (_LE('Failed to upload content of image %(image)s') %
{'image': image_id})
LOG.error(msg)
raise exception.UnexpectedStatus(status=res.status,
body=res.read())
return (loc.get_uri(), image_file.size,
image_file.checksum.hexdigest(), {})
def get(self, location):
"""Takes a `glance.store.location.Location` object that indicates
where to find the image file, and returns a tuple of generator
(for reading the image file) and image_size
:param location: `glance.store.location.Location` object, supplied
from glance.store.location.get_location_from_uri()
"""
conn, resp, content_length = self._query(location, 'GET')
iterator = http_response_iterator(conn, resp, self.READ_CHUNKSIZE)
class ResponseIndexable(glance.store.Indexable):
def another(self):
try:
return self.wrapped.next()
except StopIteration:
return ''
return (ResponseIndexable(iterator, content_length), content_length)
def get_size(self, location):
"""Takes a `glance.store.location.Location` object that indicates
where to find the image file, and returns the size
:param location: `glance.store.location.Location` object, supplied
from glance.store.location.get_location_from_uri()
"""
return self._query(location, 'HEAD')[2]
def delete(self, location):
"""Takes a `glance.store.location.Location` object that indicates
where to find the image file to delete
:location `glance.store.location.Location` object, supplied
from glance.store.location.get_location_from_uri()
:raises NotFound if image does not exist
"""
file_path = '[%s] %s' % (
self.datastore_name,
location.store_location.path[len(DS_URL_PREFIX):])
search_index_moref = self._service_content.searchIndex
dc_moref = self._session.invoke_api(self._session.vim,
'FindByInventoryPath',
search_index_moref,
inventoryPath=self.datacenter_path)
delete_task = self._session.invoke_api(
self._session.vim,
'DeleteDatastoreFile_Task',
self._service_content.fileManager,
name=file_path,
datacenter=dc_moref)
try:
self._session.wait_for_task(delete_task)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Failed to delete image %(image)s content.'),
{'image': location.image_id})
@retry(stop_max_attempt_number=CONF.vmware_api_retry_count + 1,
retry_on_exception=_session_not_authenticated)
def _query(self, location, method, depth=0):
if depth > MAX_REDIRECTS:
msg = ("The HTTP URL exceeded %(max_redirects)s maximum "
"redirects.", {'max_redirects': MAX_REDIRECTS})
LOG.debug(msg)
raise exception.MaxRedirectsExceeded(redirects=MAX_REDIRECTS)
loc = location.store_location
cookie = self._build_vim_cookie_header(
self._session.vim.client.options.transport.cookiejar)
headers = {'Cookie': cookie}
try:
conn = self._get_http_conn(method, loc, headers)
resp = conn.getresponse()
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Failed to access image %(image)s content.'),
{'image': location.image_id})
if resp.status >= 400:
if resp.status == httplib.UNAUTHORIZED:
self._create_session()
raise exception.NotAuthenticated()
if resp.status == httplib.NOT_FOUND:
msg = 'VMware datastore could not find image at URI.'
LOG.debug(msg)
raise exception.NotFound(msg)
reason = (_('HTTP request returned a %(status)s status code.')
% {'status': resp.status})
LOG.info(reason)
raise exception.BadStoreUri(message=reason)
location_header = resp.getheader('location')
if location_header:
if resp.status not in (301, 302):
reason = (_("The HTTP URL %(path)s attempted to redirect "
"with an invalid %(status)s status code.")
% {'path': loc.path, 'status': resp.status})
LOG.info(reason)
raise exception.BadStoreUri(message=reason)
location_class = glance.store.location.Location
new_loc = location_class(location.store_name,
location.store_location.__class__,
uri=location_header,
image_id=location.image_id,
store_specs=location.store_specs)
return self._query(new_loc, method, depth + 1)
content_length = int(resp.getheader('content-length', 0))
return (conn, resp, content_length)
def _get_http_conn(self, method, loc, headers, content=None):
conn_class = self._get_http_conn_class()
conn = conn_class(loc.server_host)
url = urlparse.quote('%s?%s' % (loc.path, loc.query))
conn.request(method, url, content, headers)
return conn
def _get_http_conn_class(self):
if self.api_insecure:
return httplib.HTTPConnection
return httplib.HTTPSConnection

View File

@ -39,7 +39,6 @@ import testtools
from glance.common import utils
from glance.db.sqlalchemy import api as db_api
from glance.openstack.common import jsonutils
from glance.openstack.common import units
from glance import tests as glance_tests
from glance.tests import utils as test_utils
@ -280,28 +279,6 @@ class ApiServer(Server):
self.scrubber_datadir = os.path.join(self.test_dir, "scrubber")
self.log_file = os.path.join(self.test_dir, "api.log")
self.image_size_cap = 1099511627776
self.s3_store_host = "s3.amazonaws.com"
self.s3_store_access_key = ""
self.s3_store_secret_key = ""
self.s3_store_bucket = ""
self.s3_store_bucket_url_format = ""
self.swift_store_auth_version = kwargs.get("swift_store_auth_version",
"2")
self.swift_store_auth_address = kwargs.get("swift_store_auth_address",
"")
self.swift_store_user = kwargs.get("swift_store_user", "")
self.swift_store_key = kwargs.get("swift_store_key", "")
self.swift_store_container = kwargs.get("swift_store_container", "")
self.swift_store_create_container_on_put = kwargs.get(
"swift_store_create_container_on_put", "True")
self.swift_store_large_object_size = 5 * units.Ki
self.swift_store_large_object_chunk_size = 200
self.swift_store_multi_tenant = False
self.swift_store_admin_tenants = []
self.rbd_store_ceph_conf = ""
self.rbd_store_pool = ""
self.rbd_store_user = ""
self.rbd_store_chunk_size = 4
self.delayed_delete = delayed_delete
self.owner_is_tenant = True
self.workers = 0
@ -333,8 +310,6 @@ class ApiServer(Server):
verbose = %(verbose)s
debug = %(debug)s
default_log_levels = eventlet.wsgi.server=DEBUG
filesystem_store_datadir=%(image_dir)s
default_store = %(default_store)s
bind_host = 127.0.0.1
bind_port = %(bind_port)s
key_file = %(key_file)s
@ -344,25 +319,6 @@ registry_host = 127.0.0.1
registry_port = %(registry_port)s
log_file = %(log_file)s
image_size_cap = %(image_size_cap)d
s3_store_host = %(s3_store_host)s
s3_store_access_key = %(s3_store_access_key)s
s3_store_secret_key = %(s3_store_secret_key)s
s3_store_bucket = %(s3_store_bucket)s
s3_store_bucket_url_format = %(s3_store_bucket_url_format)s
swift_store_auth_version = %(swift_store_auth_version)s
swift_store_auth_address = %(swift_store_auth_address)s
swift_store_user = %(swift_store_user)s
swift_store_key = %(swift_store_key)s
swift_store_container = %(swift_store_container)s
swift_store_create_container_on_put = %(swift_store_create_container_on_put)s
swift_store_large_object_size = %(swift_store_large_object_size)s
swift_store_large_object_chunk_size = %(swift_store_large_object_chunk_size)s
swift_store_multi_tenant = %(swift_store_multi_tenant)s
swift_store_admin_tenants = %(swift_store_admin_tenants)s
rbd_store_chunk_size = %(rbd_store_chunk_size)s
rbd_store_user = %(rbd_store_user)s
rbd_store_pool = %(rbd_store_pool)s
rbd_store_ceph_conf = %(rbd_store_ceph_conf)s
delayed_delete = %(delayed_delete)s
owner_is_tenant = %(owner_is_tenant)s
workers = %(workers)s
@ -392,6 +348,9 @@ location_strategy=%(location_strategy)s
flavor = %(deployment_flavor)s
[store_type_location_strategy]
store_type_preference = %(store_type_location_strategy_preference)s
[glance_store]
filesystem_store_datadir=%(image_dir)s
default_store = %(default_store)s
"""
self.paste_conf_base = """[pipeline:glance-api]
pipeline = versionnegotiation gzip unauthenticated-context rootapp
@ -540,13 +499,6 @@ class ScrubberDaemon(Server):
"scrubber")
self.pid_file = os.path.join(self.test_dir, "scrubber.pid")
self.log_file = os.path.join(self.test_dir, "scrubber.log")
self.swift_store_auth_address = kwargs.get("swift_store_auth_address",
"")
self.swift_store_user = kwargs.get("swift_store_user", "")
self.swift_store_key = kwargs.get("swift_store_key", "")
self.swift_store_container = kwargs.get("swift_store_container", "")
self.swift_store_auth_version = kwargs.get("swift_store_auth_version",
"2")
self.metadata_encryption_key = "012345678901234567890123456789ab"
self.lock_path = self.test_dir
@ -566,11 +518,6 @@ scrubber_datadir = %(scrubber_datadir)s
registry_host = 127.0.0.1
registry_port = %(registry_port)s
metadata_encryption_key = %(metadata_encryption_key)s
swift_store_auth_address = %(swift_store_auth_address)s
swift_store_user = %(swift_store_user)s
swift_store_key = %(swift_store_key)s
swift_store_container = %(swift_store_container)s
swift_store_auth_version = %(swift_store_auth_version)s
lock_path = %(lock_path)s
sql_connection = %(sql_connection)s
sql_idle_timeout = 3600

View File

@ -1,128 +0,0 @@
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo.config import cfg
import six
#NOTE(bcwaldon): importing this to get the default_store option
import glance.api.v1.images
from glance.common import exception
import glance.store.location
CONF = cfg.CONF
class BaseTestCase(object):
"""
Basic test cases for glance image stores.
To run these tests on a new store X, create a test case like
class TestXStore(BaseTestCase, testtools.TestCase):
(MULTIPLE INHERITANCE REQUIRED)
def get_store(...):
(STORE SPECIFIC)
def stash_image(...):
(STORE SPECIFIC)
"""
def setUp(self):
super(BaseTestCase, self).setUp()
def tearDown(self):
CONF.reset()
super(BaseTestCase, self).tearDown()
def config(self, **kw):
for k, v in six.iteritems(kw):
CONF.set_override(k, v, group=None)
def get_store(self, **kwargs):
raise NotImplementedError('get_store() must be implemented')
def stash_image(self, image_id, image_data):
"""Store image data in the backend manually
:param image_id: image identifier
:param image_data: string representing image data fixture
:return URI referencing newly-created backend object
"""
raise NotImplementedError('stash_image is not implemented')
def test_create_store(self):
self.config(known_stores=[self.store_cls_path])
count = glance.store.create_stores()
self.assertEqual(7, count)
def test_lifecycle(self):
"""Add, get and delete an image"""
store = self.get_store()
image_id = str(uuid.uuid4())
image_data = six.StringIO('XXX')
image_checksum = 'bc9189406be84ec297464a514221406d'
try:
uri, add_size, add_checksum, _ = store.add(image_id, image_data, 3)
except NotImplementedError:
msg = 'Configured store can not add images'
self.skipTest(msg)
self.assertEqual(3, add_size)
self.assertEqual(image_checksum, add_checksum)
store = self.get_store()
location = glance.store.location.Location(
self.store_name,
store.get_store_location_class(),
uri=uri,
image_id=image_id)
(get_iter, get_size) = store.get(location)
self.assertEqual(3, get_size)
self.assertEqual('XXX', ''.join(get_iter))
image_size = store.get_size(location)
self.assertEqual(3, image_size)
store.delete(location)
self.assertRaises(exception.NotFound, store.get, location)
def test_get_remote_image(self):
"""Get an image that was created externally to Glance"""
image_id = str(uuid.uuid4())
try:
image_uri = self.stash_image(image_id, 'XXX')
except NotImplementedError:
msg = 'Configured store can not stash images'
self.skipTest(msg)
store = self.get_store()
location = glance.store.location.Location(
self.store_name,
store.get_store_location_class(),
uri=image_uri)
(get_iter, get_size) = store.get(location)
self.assertEqual(3, get_size)
self.assertEqual('XXX', ''.join(get_iter))
image_size = store.get_size(location)
self.assertEqual(3, image_size)

View File

@ -1,91 +0,0 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Functional tests for the Cinder store interface
"""
import os
import oslo.config.cfg
import testtools
import glance.store.cinder as cinder
import glance.tests.functional.store as store_tests
import glance.tests.functional.store.test_swift as store_tests_swift
import glance.tests.utils
def parse_config(config):
out = {}
options = [
'test_cinder_store_auth_address',
'test_cinder_store_auth_version',
'test_cinder_store_tenant',
'test_cinder_store_user',
'test_cinder_store_key',
]
for option in options:
out[option] = config.defaults()[option]
return out
class TestCinderStore(store_tests.BaseTestCase, testtools.TestCase):
store_cls_path = 'glance.store.cinder.Store'
store_cls = glance.store.cinder.Store
store_name = 'cinder'
def setUp(self):
config_path = os.environ.get('GLANCE_TEST_CINDER_CONF')
if not config_path:
msg = "GLANCE_TEST_CINDER_CONF environ not set."
self.skipTest(msg)
oslo.config.cfg.CONF(args=[], default_config_files=[config_path])
raw_config = store_tests_swift.read_config(config_path)
try:
self.cinder_config = parse_config(raw_config)
ret = store_tests_swift.keystone_authenticate(
self.cinder_config['test_cinder_store_auth_address'],
self.cinder_config['test_cinder_store_auth_version'],
self.cinder_config['test_cinder_store_tenant'],
self.cinder_config['test_cinder_store_user'],
self.cinder_config['test_cinder_store_key'])
(tenant_id, auth_token, service_catalog) = ret
self.context = glance.context.RequestContext(
tenant=tenant_id,
service_catalog=service_catalog,
auth_tok=auth_token)
self.cinder_client = cinder.get_cinderclient(self.context)
except Exception as e:
msg = "Cinder backend isn't set up: %s" % e
self.skipTest(msg)
super(TestCinderStore, self).setUp()
def get_store(self, **kwargs):
store = cinder.Store(context=kwargs.get('context') or self.context)
return store
def stash_image(self, image_id, image_data):
#(zhiyan): Currently cinder store is a partial implementation,
# after Cinder expose 'brick' library, 'host-volume-attaching' and
# 'multiple-attaching' enhancement ready, the store will support
# ADD/GET/DELETE interface.
raise NotImplementedError('stash_image can not be implemented so far')

View File

@ -1,60 +0,0 @@
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Functional tests for the File store interface
"""
import os
import os.path
import fixtures
import oslo.config.cfg
import testtools
import glance.store.filesystem
import glance.tests.functional.store as store_tests
class TestFilesystemStore(store_tests.BaseTestCase, testtools.TestCase):
store_cls_path = 'glance.store.filesystem.Store'
store_cls = glance.store.filesystem.Store
store_name = 'filesystem'
def setUp(self):
super(TestFilesystemStore, self).setUp()
self.tmp_dir = self.useFixture(fixtures.TempDir()).path
self.store_dir = os.path.join(self.tmp_dir, 'images')
os.mkdir(self.store_dir)
config_file = os.path.join(self.tmp_dir, 'glance.conf')
with open(config_file, 'w') as fap:
fap.write("[DEFAULT]\n")
fap.write("filesystem_store_datadir=%s" % self.store_dir)
oslo.config.cfg.CONF(default_config_files=[config_file], args=[])
def get_store(self, **kwargs):
store = glance.store.filesystem.Store(context=kwargs.get('context'))
store.configure()
store.configure_add()
return store
def stash_image(self, image_id, image_data):
filepath = os.path.join(self.store_dir, image_id)
with open(filepath, 'w') as fap:
fap.write(image_data)
return 'file://%s' % filepath

View File

@ -1,86 +0,0 @@
# Copyright 2013 Red Hat, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Functional tests for the gridfs store interface
Set the GLANCE_TEST_GRIDFS_CONF environment variable to the location
of a Glance config that defines how to connect to a functional
GridFS backend
"""
import ConfigParser
import os
import oslo.config.cfg
import testtools
import glance.store.gridfs
import glance.tests.functional.store as store_tests
try:
import gridfs
import pymongo
except ImportError:
gridfs = None
def read_config(path):
cp = ConfigParser.RawConfigParser()
cp.read(path)
return cp
def parse_config(config):
out = {}
options = [
'mongodb_store_db',
'mongodb_store_uri']
for option in options:
out[option] = config.defaults()[option]
return out
class TestGridfsStore(store_tests.BaseTestCase, testtools.TestCase):
store_cls_path = 'glance.store.gridfs.Store'
store_cls = glance.store.gridfs.Store
store_name = 'gridfs'
def setUp(self):
config_path = os.environ.get('GLANCE_TEST_GRIDFS_CONF')
if not config_path or not gridfs:
msg = "GLANCE_TEST_GRIDFS_CONF environ not set."
self.skipTest(msg)
oslo.config.cfg.CONF(args=[], default_config_files=[config_path])
raw_config = read_config(config_path)
self.gfs_config = parse_config(raw_config)
super(TestGridfsStore, self).setUp()
def get_store(self, **kwargs):
store = self.store_cls(context=kwargs.get('context'))
store.configure()
store.configure_add()
return store
def stash_image(self, image_id, image_data):
conn = pymongo.MongoClient(self.gfs_config.get("mongodb_store_uri"))
fs = gridfs.GridFS(conn[self.gfs_config.get("mongodb_store_db")])
fs.put(image_data, _id=image_id)
return 'gridfs://%s' % image_id

View File

@ -1,87 +0,0 @@
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Functional tests for the File store interface
"""
import BaseHTTPServer
import os
import signal
import testtools
import glance.store.http
import glance.tests.functional.store as store_tests
def get_handler_class(fixture):
class StaticHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header('Content-Length', str(len(fixture)))
self.end_headers()
self.wfile.write(fixture)
return
def do_HEAD(self):
self.send_response(200)
self.send_header('Content-Length', str(len(fixture)))
self.end_headers()
return
def log_message(*args, **kwargs):
# Override this method to prevent debug output from going
# to stderr during testing
return
return StaticHTTPRequestHandler
def http_server(image_id, image_data):
server_address = ('127.0.0.1', 0)
handler_class = get_handler_class(image_data)
httpd = BaseHTTPServer.HTTPServer(server_address, handler_class)
port = httpd.socket.getsockname()[1]
pid = os.fork()
if pid == 0:
httpd.serve_forever()
else:
return pid, port
class TestHTTPStore(store_tests.BaseTestCase, testtools.TestCase):
store_cls_path = 'glance.store.http.Store'
store_cls = glance.store.http.Store
store_name = 'http'
def setUp(self):
super(TestHTTPStore, self).setUp()
self.kill_pid = None
def tearDown(self):
if self.kill_pid is not None:
os.kill(self.kill_pid, signal.SIGKILL)
super(TestHTTPStore, self).tearDown()
def get_store(self, **kwargs):
store = glance.store.http.Store(context=kwargs.get('context'))
store.configure()
return store
def stash_image(self, image_id, image_data):
self.kill_pid, http_port = http_server(image_id, image_data)
return 'http://127.0.0.1:%s/' % (http_port,)

View File

@ -1,162 +0,0 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Functional tests for the RBD store interface.
Set the GLANCE_TEST_RBD_CONF environment variable to the location
of a Glance config that defines how to connect to a functional
RBD backend. This backend must be running Ceph Bobtail (0.56) or later.
"""
import ConfigParser
import os
import uuid
import oslo.config.cfg
import six
import testtools
from glance.common import exception
import glance.store.rbd
import glance.tests.functional.store as store_tests
try:
import rados
import rbd
except ImportError:
rados = None
def read_config(path):
cp = ConfigParser.RawConfigParser()
cp.read(path)
return cp
def parse_config(config):
out = {}
options = [
'rbd_store_chunk_size',
'rbd_store_pool',
'rbd_store_user',
'rbd_store_ceph_conf',
]
for option in options:
out[option] = config.defaults()[option]
return out
class TestRBDStore(store_tests.BaseTestCase, testtools.TestCase):
store_cls_path = 'glance.store.rbd.Store'
store_cls = glance.store.rbd.Store
store_name = 'rbd'
def setUp(self):
config_path = os.environ.get('GLANCE_TEST_RBD_CONF')
if not config_path:
msg = "GLANCE_TEST_RBD_CONF environ not set."
self.skipTest(msg)
oslo.config.cfg.CONF(args=[], default_config_files=[config_path])
raw_config = read_config(config_path)
config = parse_config(raw_config)
if rados is None:
self.skipTest("rados python library not found")
rados_client = rados.Rados(conffile=config['rbd_store_ceph_conf'],
rados_id=config['rbd_store_user'])
try:
rados_client.connect()
except rados.Error as e:
self.skipTest("Failed to connect to RADOS: %s" % e)
try:
rados_client.create_pool(config['rbd_store_pool'])
except rados.Error as e:
rados_client.shutdown()
self.skipTest("Failed to create pool: %s")
self.rados_client = rados_client
self.rbd_config = config
super(TestRBDStore, self).setUp()
def tearDown(self):
self.rados_client.delete_pool(self.rbd_config['rbd_store_pool'])
self.rados_client.shutdown()
super(TestRBDStore, self).tearDown()
def get_store(self, **kwargs):
store = glance.store.rbd.Store(context=kwargs.get('context'))
return store
def stash_image(self, image_id, image_data):
fsid = self.rados_client.get_fsid()
pool = self.rbd_config['rbd_store_pool']
librbd = rbd.RBD()
# image_id must not be unicode since librbd doesn't handle it
image_id = str(image_id)
snap_name = 'snap'
with self.rados_client.open_ioctx(pool) as ioctx:
librbd.create(ioctx, image_id, len(image_data), old_format=False,
features=rbd.RBD_FEATURE_LAYERING)
with rbd.Image(ioctx, image_id) as image:
image.write(image_data, 0)
image.create_snap(snap_name)
return 'rbd://%s/%s/%s/%s' % (fsid, pool, image_id, snap_name)
def test_unicode(self):
# librbd does not handle unicode, so make sure
# all paths through the rbd store convert a unicode image id
# and uri to ascii before passing it to librbd.
store = self.get_store()
image_id = six.text_type(str(uuid.uuid4()))
image_size = 300
image_data = six.StringIO('X' * image_size)
image_checksum = '41757066eaff7c4c6c965556b4d3c6c5'
uri, add_size, add_checksum = store.add(image_id,
image_data,
image_size)
uri = six.text_type(uri)
self.assertEqual(image_size, add_size)
self.assertEqual(image_checksum, add_checksum)
location = glance.store.location.Location(
self.store_name,
store.get_store_location_class(),
uri=uri,
image_id=image_id)
self.assertEqual(image_size, store.get_size(location))
get_iter, get_size = store.get(location)
self.assertEqual(image_size, get_size)
self.assertEqual('X' * image_size, ''.join(get_iter))
store.delete(location)
self.assertRaises(exception.NotFound, store.get, location)

View File

@ -1,124 +0,0 @@
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Functional tests for the S3 store interface
Set the GLANCE_TEST_S3_CONF environment variable to the location
of a Glance config that defines how to connect to a functional
S3 backend
"""
import ConfigParser
import os
import os.path
import oslo.config.cfg
import six.moves.urllib.parse as urlparse
import testtools
import glance.store.s3
import glance.tests.functional.store as store_tests
try:
from boto.s3.connection import S3Connection
except ImportError:
S3Connection = None
def read_config(path):
cp = ConfigParser.RawConfigParser()
cp.read(path)
return cp
def parse_config(config):
out = {}
options = [
's3_store_host',
's3_store_access_key',
's3_store_secret_key',
's3_store_bucket',
's3_store_bucket_url_format',
]
for option in options:
out[option] = config.defaults()[option]
return out
def s3_connect(s3_host, access_key, secret_key, calling_format):
return S3Connection(access_key, secret_key, host=s3_host,
is_secure=False, calling_format=calling_format)
def s3_put_object(s3_client, bucket_name, object_name, contents):
bucket = s3_client.get_bucket(bucket_name)
key = bucket.new_key(object_name)
key.set_contents_from_string(contents)
class TestS3Store(store_tests.BaseTestCase, testtools.TestCase):
store_cls_path = 'glance.store.s3.Store'
store_cls = glance.store.s3.Store
store_name = 's3'
def setUp(self):
config_path = os.environ.get('GLANCE_TEST_S3_CONF')
if not config_path:
msg = "GLANCE_TEST_S3_CONF environ not set."
self.skipTest(msg)
oslo.config.cfg.CONF(args=[], default_config_files=[config_path])
raw_config = read_config(config_path)
config = parse_config(raw_config)
calling_format = glance.store.s3.get_calling_format(
config['s3_store_bucket_url_format'])
s3_client = s3_connect(config['s3_store_host'],
config['s3_store_access_key'],
config['s3_store_secret_key'],
calling_format)
#NOTE(bcwaldon): ensure we have a functional S3 connection
s3_client.get_all_buckets()
self.s3_client = s3_client
self.s3_config = config
super(TestS3Store, self).setUp()
def get_store(self, **kwargs):
store = glance.store.s3.Store(context=kwargs.get('context'))
store.configure()
store.configure_add()
return store
def stash_image(self, image_id, image_data):
bucket_name = self.s3_config['s3_store_bucket']
s3_put_object(self.s3_client, bucket_name, image_id, 'XXX')
s3_store_host = urlparse.urlparse(self.s3_config['s3_store_host'])
access_key = urlparse.quote(self.s3_config['s3_store_access_key'])
secret_key = self.s3_config['s3_store_secret_key']
auth_chunk = '%s:%s' % (access_key, secret_key)
netloc = '%s@%s' % (auth_chunk, s3_store_host.netloc)
path = os.path.join(s3_store_host.path, bucket_name, image_id)
# This is an s3 url with /<BUCKET>/<OBJECT> on the end
return 's3://%s%s' % (netloc, path)

View File

@ -1,77 +0,0 @@
# Copyright 2013 Taobao Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Functional tests for the Sheepdog store interface
"""
import os
import os.path
import fixtures
import oslo.config.cfg
import testtools
from glance.store import BackendException
import glance.store.sheepdog as sheepdog
import glance.tests.functional.store as store_tests
import glance.tests.utils
class TestSheepdogStore(store_tests.BaseTestCase, testtools.TestCase):
store_cls_path = 'glance.store.sheepdog.Store'
store_cls = glance.store.sheepdog.Store
store_name = 'sheepdog'
def setUp(self):
image = sheepdog.SheepdogImage(sheepdog.DEFAULT_ADDR,
sheepdog.DEFAULT_PORT,
"test",
sheepdog.DEFAULT_CHUNKSIZE)
try:
image.create(512)
except BackendException:
msg = "Sheepdog cluster isn't set up"
self.skipTest(msg)
image.delete()
self.tmp_dir = self.useFixture(fixtures.TempDir()).path
config_file = os.path.join(self.tmp_dir, 'glance.conf')
with open(config_file, 'w') as f:
f.write("[DEFAULT]\n")
f.write("default_store = sheepdog")
oslo.config.cfg.CONF(default_config_files=[config_file], args=[])
super(TestSheepdogStore, self).setUp()
def get_store(self, **kwargs):
store = sheepdog.Store(context=kwargs.get('context'))
return store
def stash_image(self, image_id, image_data):
image_size = len(image_data)
image = sheepdog.SheepdogImage(sheepdog.DEFAULT_ADDR,
sheepdog.DEFAULT_PORT,
image_id,
sheepdog.DEFAULT_CHUNKSIZE)
image.create(image_size)
total = left = image_size
while left > 0:
length = min(sheepdog.DEFAULT_CHUNKSIZE, left)
image.write(image_data, total - left, length)
left -= length
return 'sheepdog://%s' % image_id

View File

@ -1,535 +0,0 @@
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Functional tests for the Swift store interface
Set the GLANCE_TEST_SWIFT_CONF environment variable to the location
of a Glance config that defines how to connect to a functional
Swift backend
"""
import ConfigParser
import hashlib
import os
import os.path
import random
import string
import uuid
import oslo.config.cfg
import six
import six.moves.urllib.parse as urlparse
import testtools
from glance.common import exception
import glance.common.utils as common_utils
import glance.store.swift
import glance.tests.functional.store as store_tests
try:
import swiftclient
except ImportError:
swiftclient = None
class SwiftStoreError(RuntimeError):
pass
def _uniq(value):
return '%s.%d' % (value, random.randint(0, 99999))
def read_config(path):
cp = ConfigParser.RawConfigParser()
cp.read(path)
return cp
def parse_config(config):
out = {}
options = [
'swift_store_auth_address',
'swift_store_auth_version',
'swift_store_user',
'swift_store_key',
'swift_store_container',
]
for option in options:
out[option] = config.defaults()[option]
return out
def swift_connect(auth_url, auth_version, user, key):
try:
return swiftclient.Connection(authurl=auth_url,
auth_version=auth_version,
user=user,
key=key,
snet=False,
retries=1)
except AttributeError:
raise SwiftStoreError("Could not find swiftclient module")
def swift_list_containers(swift_conn):
try:
_, containers = swift_conn.get_account()
except Exception as e:
msg = ("Failed to list containers (get_account) "
"from Swift. Got error: %s" % e)
raise SwiftStoreError(msg)
else:
return containers
def swift_create_container(swift_conn, container_name):
try:
swift_conn.put_container(container_name)
except swiftclient.ClientException as e:
msg = "Failed to create container. Got error: %s" % e
raise SwiftStoreError(msg)
def swift_get_container(swift_conn, container_name, **kwargs):
return swift_conn.get_container(container_name, **kwargs)
def swift_delete_container(swift_conn, container_name):
try:
swift_conn.delete_container(container_name)
except swiftclient.ClientException as e:
msg = "Failed to delete container from Swift. Got error: %s" % e
raise SwiftStoreError(msg)
def swift_put_object(swift_conn, container_name, object_name, contents):
return swift_conn.put_object(container_name, object_name, contents)
def swift_head_object(swift_conn, container_name, obj_name):
return swift_conn.head_object(container_name, obj_name)
def keystone_authenticate(auth_url, auth_version, tenant_name,
username, password):
assert int(auth_version) == 2, 'Only auth version 2 is supported'
import keystoneclient.v2_0.client
ksclient = keystoneclient.v2_0.client.Client(tenant_name=tenant_name,
username=username,
password=password,
auth_url=auth_url)
auth_resp = ksclient.service_catalog.catalog
tenant_id = auth_resp['token']['tenant']['id']
service_catalog = auth_resp['serviceCatalog']
return tenant_id, ksclient.auth_token, service_catalog
class TestSwiftStore(store_tests.BaseTestCase, testtools.TestCase):
store_cls_path = 'glance.store.swift.Store'
store_cls = glance.store.swift.Store
store_name = 'swift'
def setUp(self):
config_path = os.environ.get('GLANCE_TEST_SWIFT_CONF')
if not config_path:
msg = "GLANCE_TEST_SWIFT_CONF environ not set."
self.skipTest(msg)
oslo.config.cfg.CONF(args=[], default_config_files=[config_path])
raw_config = read_config(config_path)
config = parse_config(raw_config)
swift = swift_connect(config['swift_store_auth_address'],
config['swift_store_auth_version'],
config['swift_store_user'],
config['swift_store_key'])
#NOTE(bcwaldon): Ensure we have a functional swift connection
swift_list_containers(swift)
self.swift_client = swift
self.swift_config = config
self.swift_config['swift_store_create_container_on_put'] = True
super(TestSwiftStore, self).setUp()
def get_store(self, **kwargs):
store = glance.store.swift.Store(context=kwargs.get('context'))
return store
def test_object_chunking(self):
"""Upload an image that is split into multiple swift objects.
We specifically check the case that
image_size % swift_store_large_object_chunk_size != 0 to
ensure we aren't losing image data.
"""
self.config(
swift_store_large_object_size=2, # 2 MB
swift_store_large_object_chunk_size=2, # 2 MB
)
store = self.get_store()
image_id = str(uuid.uuid4())
image_size = 5242880 # 5 MB
image_data = six.StringIO('X' * image_size)
image_checksum = 'eb7f8c3716b9f059cee7617a4ba9d0d3'
uri, add_size, add_checksum, _ = store.add(image_id,
image_data,
image_size)
self.assertEqual(image_size, add_size)
self.assertEqual(image_checksum, add_checksum)
location = glance.store.location.Location(
self.store_name,
store.get_store_location_class(),
uri=uri,
image_id=image_id)
# Store interface should still be respected even though
# we are storing images in multiple Swift objects
(get_iter, get_size) = store.get(location)
self.assertEqual(5242880, get_size)
self.assertEqual('X' * 5242880, ''.join(get_iter))
# The object should have a manifest pointing to the chunks
# of image data
swift_location = location.store_location
headers = swift_head_object(self.swift_client,
swift_location.container,
swift_location.obj)
manifest = headers.get('x-object-manifest')
self.assertTrue(manifest)
# Verify the objects in the manifest exist
manifest_container, manifest_prefix = manifest.split('/', 1)
container = swift_get_container(self.swift_client,
manifest_container,
prefix=manifest_prefix)
segments = [segment['name'] for segment in container[1]]
for segment in segments:
headers = swift_head_object(self.swift_client,
manifest_container,
segment)
self.assertTrue(headers.get('content-length'))
# Since we used a 5 MB image with a 2 MB chunk size, we should
# expect to see three data objects
self.assertEqual(3, len(segments), 'Got segments %s' % segments)
# Add an object that should survive the delete operation
non_image_obj = image_id + '0'
swift_put_object(self.swift_client,
manifest_container,
non_image_obj,
'XXX')
store.delete(location)
# Verify the segments in the manifest are all gone
for segment in segments:
self.assertRaises(swiftclient.ClientException,
swift_head_object,
self.swift_client,
manifest_container,
segment)
# Verify the manifest is gone too
self.assertRaises(swiftclient.ClientException,
swift_head_object,
self.swift_client,
manifest_container,
swift_location.obj)
# Verify that the non-image object was not deleted
headers = swift_head_object(self.swift_client,
manifest_container,
non_image_obj)
self.assertTrue(headers.get('content-length'))
# Clean up
self.swift_client.delete_object(manifest_container,
non_image_obj)
# Simulate exceeding 'image_size_cap' setting
image_data = six.StringIO('X' * image_size)
image_data = common_utils.LimitingReader(image_data, image_size - 1)
image_id = str(uuid.uuid4())
self.assertRaises(exception.ImageSizeLimitExceeded,
store.add,
image_id,
image_data,
image_size)
# Verify written segments have been deleted
container = swift_get_container(self.swift_client,
manifest_container,
prefix=image_id)
segments = [segment['name'] for segment in container[1]]
self.assertEqual(0, len(segments), 'Got segments %s' % segments)
def test_retries_fail_start_of_download(self):
"""
Get an object from Swift where Swift does not complete the request
in one attempt. Fails at the start of the download.
"""
self.config(
swift_store_retry_get_count=1,
)
store = self.get_store()
image_id = str(uuid.uuid4())
image_size = 1024 * 1024 * 5 # 5 MB
chars = string.ascii_uppercase + string.digits
image_data = ''.join(random.choice(chars) for x in range(image_size))
image_checksum = hashlib.md5(image_data)
uri, add_size, add_checksum, _ = store.add(image_id,
image_data,
image_size)
location = glance.store.location.Location(
self.store_name,
store.get_store_location_class(),
uri=uri,
image_id=image_id)
def iter_wrapper(iterable):
# raise StopIteration as soon as iteration begins
yield ''
(get_iter, get_size) = store.get(location)
get_iter.wrapped = glance.store.swift.swift_retry_iter(
iter_wrapper(get_iter.wrapped), image_size,
store, location.store_location)
self.assertEqual(image_size, get_size)
received_data = ''.join(get_iter.wrapped)
self.assertEqual(image_data, received_data)
self.assertEqual(image_checksum.hexdigest(),
hashlib.md5(received_data).hexdigest())
def test_retries_fail_partway_through_download(self):
"""
Get an object from Swift where Swift does not complete the request
in one attempt. Fails partway through the download.
"""
self.config(
swift_store_retry_get_count=1,
)
store = self.get_store()
image_id = str(uuid.uuid4())
image_size = 1024 * 1024 * 5 # 5 MB
chars = string.ascii_uppercase + string.digits
image_data = ''.join(random.choice(chars) for x in range(image_size))
image_checksum = hashlib.md5(image_data)
uri, add_size, add_checksum, _ = store.add(image_id,
image_data,
image_size)
location = glance.store.location.Location(
self.store_name,
store.get_store_location_class(),
uri=uri,
image_id=image_id)
def iter_wrapper(iterable):
bytes_received = 0
for chunk in iterable:
yield chunk
bytes_received += len(chunk)
if bytes_received > (image_size / 2):
raise StopIteration
(get_iter, get_size) = store.get(location)
get_iter.wrapped = glance.store.swift.swift_retry_iter(
iter_wrapper(get_iter.wrapped), image_size,
store, location.store_location)
self.assertEqual(image_size, get_size)
received_data = ''.join(get_iter.wrapped)
self.assertEqual(image_data, received_data)
self.assertEqual(image_checksum.hexdigest(),
hashlib.md5(received_data).hexdigest())
def test_retries_fail_end_of_download(self):
"""
Get an object from Swift where Swift does not complete the request
in one attempt. Fails at the end of the download
"""
self.config(
swift_store_retry_get_count=1,
)
store = self.get_store()
image_id = str(uuid.uuid4())
image_size = 1024 * 1024 * 5 # 5 MB
chars = string.ascii_uppercase + string.digits
image_data = ''.join(random.choice(chars) for x in range(image_size))
image_checksum = hashlib.md5(image_data)
uri, add_size, add_checksum, _ = store.add(image_id,
image_data,
image_size)
location = glance.store.location.Location(
self.store_name,
store.get_store_location_class(),
uri=uri,
image_id=image_id)
def iter_wrapper(iterable):
bytes_received = 0
for chunk in iterable:
yield chunk
bytes_received += len(chunk)
if bytes_received == image_size:
raise StopIteration
(get_iter, get_size) = store.get(location)
get_iter.wrapped = glance.store.swift.swift_retry_iter(
iter_wrapper(get_iter.wrapped), image_size,
store, location.store_location)
self.assertEqual(image_size, get_size)
received_data = ''.join(get_iter.wrapped)
self.assertEqual(image_data, received_data)
self.assertEqual(image_checksum.hexdigest(),
hashlib.md5(received_data).hexdigest())
def stash_image(self, image_id, image_data):
container_name = self.swift_config['swift_store_container']
swift_put_object(self.swift_client,
container_name,
image_id,
'XXX')
#NOTE(bcwaldon): This is a hack until we find a better way to
# build this URL
auth_url = self.swift_config['swift_store_auth_address']
auth_url = urlparse.urlparse(auth_url)
user = urlparse.quote(self.swift_config['swift_store_user'])
key = self.swift_config['swift_store_key']
netloc = ''.join(('%s:%s' % (user, key), '@', auth_url.netloc))
path = os.path.join(auth_url.path, container_name, image_id)
# This is an auth url with /<CONTAINER>/<OBJECT> on the end
return 'swift+http://%s%s' % (netloc, path)
def test_multitenant(self):
"""Ensure an image is properly configured when using multitenancy."""
self.config(
swift_store_multi_tenant=True,
)
swift_store_user = self.swift_config['swift_store_user']
tenant_name, username = swift_store_user.split(':')
tenant_id, auth_token, service_catalog = keystone_authenticate(
self.swift_config['swift_store_auth_address'],
self.swift_config['swift_store_auth_version'],
tenant_name,
username,
self.swift_config['swift_store_key'])
context = glance.context.RequestContext(
tenant=tenant_id,
service_catalog=service_catalog,
auth_tok=auth_token)
store = self.get_store(context=context)
image_id = str(uuid.uuid4())
image_data = six.StringIO('XXX')
uri, _, _, _ = store.add(image_id, image_data, 3)
location = glance.store.location.Location(
self.store_name,
store.get_store_location_class(),
uri=uri,
image_id=image_id)
read_tenant = str(uuid.uuid4())
write_tenant = str(uuid.uuid4())
store.set_acls(location,
public=False,
read_tenants=[read_tenant],
write_tenants=[write_tenant])
container_name = location.store_location.container
container, _ = swift_get_container(self.swift_client, container_name)
self.assertEqual(read_tenant + ':*',
container.get('x-container-read'))
self.assertEqual(write_tenant + ':*',
container.get('x-container-write'))
store.set_acls(location, public=True, read_tenants=[read_tenant])
container_name = location.store_location.container
container, _ = swift_get_container(self.swift_client, container_name)
self.assertEqual('.r:*,.rlistings', container.get('x-container-read'))
self.assertEqual('', container.get('x-container-write', ''))
(get_iter, get_size) = store.get(location)
self.assertEqual(3, get_size)
self.assertEqual('XXX', ''.join(get_iter))
store.delete(location)
def test_delayed_delete_with_auth(self):
"""Ensure delete works with delayed delete and auth
Reproduces LP bug 1238604.
"""
self.config(
scrubber_datadir="/tmp",
)
swift_store_user = self.swift_config['swift_store_user']
tenant_name, username = swift_store_user.split(':')
tenant_id, auth_token, service_catalog = keystone_authenticate(
self.swift_config['swift_store_auth_address'],
self.swift_config['swift_store_auth_version'],
tenant_name,
username,
self.swift_config['swift_store_key'])
context = glance.context.RequestContext(
tenant=tenant_id,
service_catalog=service_catalog,
auth_tok=auth_token)
store = self.get_store(context=context)
image_id = str(uuid.uuid4())
image_data = six.StringIO('data')
uri, _, _, _ = store.add(image_id, image_data, 4)
location = glance.store.location.Location(
self.store_name,
store.get_store_location_class(),
uri=uri,
image_id=image_id)
container_name = location.store_location.container
container, _ = swift_get_container(self.swift_client, container_name)
(get_iter, get_size) = store.get(location)
self.assertEqual(4, get_size)
self.assertEqual('data', ''.join(get_iter))
glance.store.schedule_delayed_delete_from_backend(context,
uri,
image_id)
store.delete(location)

View File

@ -1,176 +0,0 @@
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Functional tests for the VMware Datastore store interface
Set the GLANCE_TEST_VMWARE_CONF environment variable to the location
of a Glance config that defines how to connect to a functional
VMware Datastore backend
"""
import ConfigParser
import httplib
import logging
import os
import uuid
import oslo.config.cfg
from oslo.vmware import api
import six
import six.moves.urllib.parse as urlparse
import testtools
from glance.common import exception
import glance.store.location
import glance.store.vmware_datastore as vm_store
import glance.tests.functional.store as store_tests
logging.getLogger('suds').setLevel(logging.INFO)
def read_config(path):
cp = ConfigParser.RawConfigParser()
cp.read(path)
return cp
def parse_config(config):
out = {}
options = [
'vmware_server_host',
'vmware_server_username',
'vmware_server_password',
'vmware_api_retry_count',
'vmware_task_poll_interval',
'vmware_store_image_dir',
'vmware_datacenter_path',
'vmware_datastore_name',
'vmware_api_insecure',
]
for option in options:
out[option] = config.defaults()[option]
return out
class VMwareDatastoreStoreError(RuntimeError):
pass
def vsphere_connect(server_ip, server_username, server_password,
api_retry_count, task_poll_interval,
scheme='https', create_session=True, wsdl_loc=None):
try:
return api.VMwareAPISession(server_ip,
server_username,
server_password,
api_retry_count,
task_poll_interval,
scheme=scheme,
create_session=create_session,
wsdl_loc=wsdl_loc)
except AttributeError:
raise VMwareDatastoreStoreError(
'Could not find VMware datastore module')
class TestVMwareDatastoreStore(store_tests.BaseTestCase, testtools.TestCase):
store_cls_path = 'glance.store.vmware_datastore.Store'
store_cls = vm_store.Store
store_name = 'vmware_datastore'
def _build_vim_cookie_header(self, vim_cookies):
"""Build ESX host session cookie header."""
if len(list(vim_cookies)) > 0:
cookie = list(vim_cookies)[0]
return cookie.name + '=' + cookie.value
def setUp(self):
config_path = os.environ.get('GLANCE_TEST_VMWARE_CONF')
if not config_path:
msg = 'GLANCE_TEST_VMWARE_CONF environ not set.'
self.skipTest(msg)
oslo.config.cfg.CONF(args=[], default_config_files=[config_path])
raw_config = read_config(config_path)
config = parse_config(raw_config)
scheme = 'http' if config['vmware_api_insecure'] == 'True' else 'https'
self.vsphere = vsphere_connect(config['vmware_server_host'],
config['vmware_server_username'],
config['vmware_server_password'],
config['vmware_api_retry_count'],
config['vmware_task_poll_interval'],
scheme=scheme)
self.vmware_config = config
super(TestVMwareDatastoreStore, self).setUp()
def get_store(self, **kwargs):
store = vm_store.Store(context=kwargs.get('context'))
return store
def stash_image(self, image_id, image_data):
server_ip = self.vmware_config['vmware_server_host']
path = os.path.join(
vm_store.DS_URL_PREFIX,
self.vmware_config['vmware_store_image_dir'].strip('/'), image_id)
dc_path = self.vmware_config.get('vmware_datacenter_path',
'ha-datacenter')
param_list = {'dcPath': dc_path,
'dsName': self.vmware_config['vmware_datastore_name']}
query = urlparse.urlencode(param_list)
conn = (httplib.HTTPConnection(server_ip)
if self.vmware_config['vmware_api_insecure'] == 'True'
else httplib.HTTPSConnection(server_ip))
cookie = self._build_vim_cookie_header(
self.vsphere.vim.client.options.transport.cookiejar)
headers = {'Cookie': cookie, 'Content-Length': len(image_data)}
url = urlparse.quote('%s?%s' % (path, query))
conn.request('PUT', url, image_data, headers)
conn.getresponse()
return '%s://%s%s?%s' % (vm_store.STORE_SCHEME, server_ip, path, query)
def test_timeout(self):
store = self.get_store()
store._session.logout()
image_id = str(uuid.uuid4())
image_data = six.StringIO('XXX')
image_checksum = 'bc9189406be84ec297464a514221406d'
uri, add_size, add_checksum, _ = store.add(image_id, image_data, 3)
self.assertEqual(3, add_size)
self.assertEqual(image_checksum, add_checksum)
loc = glance.store.location.Location(
self.store_name,
store.get_store_location_class(),
uri=uri,
image_id=image_id)
store._session.logout()
get_iter, get_size = store.get(loc)
self.assertEqual(3, get_size)
self.assertEqual('XXX', ''.join(get_iter))
store._session.logout()
image_size = store.get_size(loc)
self.assertEqual(3, image_size)
store._session.logout()
store.delete(loc)
self.assertRaises(exception.NotFound, store.get, loc)

View File

@ -17,18 +17,14 @@ import os
import sys
import time
import glance_store.location
import httplib2
from six.moves import xrange
import swiftclient
from glance.common import crypt
from glance.openstack.common import jsonutils
from glance.openstack.common import units
from glance.store.swift import StoreLocation
from glance.tests import functional
from glance.tests.functional.store.test_swift import parse_config
from glance.tests.functional.store.test_swift import read_config
from glance.tests.functional.store.test_swift import swift_connect
from glance.tests.utils import execute
@ -133,85 +129,20 @@ class TestScrubber(functional.FunctionalTest):
self.stop_servers()
def test_scrubber_app_against_swift(self):
"""
test that the glance-scrubber script runs successfully against a swift
backend when not in daemon mode
"""
config_path = os.environ.get('GLANCE_TEST_SWIFT_CONF')
if not config_path:
msg = "GLANCE_TEST_SWIFT_CONF environ not set."
self.skipTest(msg)
raw_config = read_config(config_path)
swift_config = parse_config(raw_config)
self.cleanup()
self.start_servers(delayed_delete=True, daemon=False,
metadata_encryption_key='',
default_store='swift', **swift_config)
# add an image
headers = {
'x-image-meta-name': 'test_image',
'x-image-meta-is_public': 'true',
'x-image-meta-disk_format': 'raw',
'x-image-meta-container_format': 'ovf',
'content-type': 'application/octet-stream',
}
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'POST', body='XXX',
headers=headers)
# ensure the request was successful and the image is active
self.assertEqual(response.status, 201)
image = jsonutils.loads(content)['image']
self.assertEqual('active', image['status'])
image_id = image['id']
# delete the image
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(response.status, 200)
# ensure the image is marked pending delete
response, content = http.request(path, 'HEAD')
self.assertEqual(response.status, 200)
self.assertEqual('pending_delete', response['x-image-meta-status'])
# wait for the scrub time on the image to pass
time.sleep(self.api_server.scrub_time)
# call the scrubber to scrub images
exe_cmd = "%s -m glance.cmd.scrubber" % sys.executable
cmd = ("%s --config-file %s" %
(exe_cmd, self.scrubber_daemon.conf_file_name))
exitcode, out, err = execute(cmd, raise_error=False)
self.assertEqual(0, exitcode)
# ensure the image has been successfully deleted
self.wait_for_scrub(path)
self.stop_servers()
def test_scrubber_with_metadata_enc(self):
"""
test that files written to scrubber_data_dir use
metadata_encryption_key when available to encrypt the location
"""
config_path = os.environ.get('GLANCE_TEST_SWIFT_CONF')
if not config_path:
msg = "GLANCE_TEST_SWIFT_CONF environ not set."
self.skipTest(msg)
raw_config = read_config(config_path)
swift_config = parse_config(raw_config)
# FIXME(flaper87): It looks like an older commit
# may have broken this test. The file_queue `add_location`
# is not being called.
self.skipTest("Test broken. See bug #1366682")
self.cleanup()
self.start_servers(delayed_delete=True, daemon=True,
default_store='swift', **swift_config)
self.start_servers(delayed_delete=True,
daemon=True,
default_store='file')
# add an image
headers = {
@ -221,6 +152,7 @@ class TestScrubber(functional.FunctionalTest):
'x-image-meta-container_format': 'ovf',
'content-type': 'application/octet-stream',
}
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'POST', body='XXX',
@ -231,7 +163,8 @@ class TestScrubber(functional.FunctionalTest):
image_id = image['id']
# delete the image
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1",
self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
@ -252,86 +185,13 @@ class TestScrubber(functional.FunctionalTest):
decrypted_uri = crypt.urlsafe_decrypt(
self.api_server.metadata_encryption_key, marker_uri)
loc = StoreLocation({})
loc = glance_store.location.StoreLocation({})
loc.parse_uri(decrypted_uri)
self.assertIn(loc.scheme, ("swift+http", "swift+https"))
self.assertEqual(loc.scheme, "file")
self.assertEqual(image['id'], loc.obj)
self.wait_for_scrub(path)
self.stop_servers()
def test_scrubber_handles_swift_missing(self):
"""
Test that the scrubber handles the case where the image to be scrubbed
is missing from swift
"""
config_path = os.environ.get('GLANCE_TEST_SWIFT_CONF')
if not config_path:
msg = "GLANCE_TEST_SWIFT_CONF environ not set."
self.skipTest(msg)
raw_config = read_config(config_path)
swift_config = parse_config(raw_config)
self.cleanup()
self.start_servers(delayed_delete=True, daemon=False,
default_store='swift', **swift_config)
# add an image
headers = {
'x-image-meta-name': 'test_image',
'x-image-meta-is_public': 'true',
'x-image-meta-disk_format': 'raw',
'x-image-meta-container_format': 'ovf',
'content-type': 'application/octet-stream',
}
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'POST', body='XXX',
headers=headers)
self.assertEqual(response.status, 201)
image = jsonutils.loads(content)['image']
self.assertEqual('active', image['status'])
image_id = image['id']
# delete the image
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(response.status, 200)
# ensure the image is marked pending delete
response, content = http.request(path, 'HEAD')
self.assertEqual(response.status, 200)
self.assertEqual('pending_delete', response['x-image-meta-status'])
# go directly to swift and remove the image object
swift = swift_connect(swift_config['swift_store_auth_address'],
swift_config['swift_store_auth_version'],
swift_config['swift_store_user'],
swift_config['swift_store_key'])
swift.delete_object(swift_config['swift_store_container'], image_id)
try:
swift.head_object(swift_config['swift_store_container'], image_id)
self.fail('image should have been deleted from swift')
except swiftclient.ClientException as e:
self.assertEqual(e.http_status, 404)
# wait for the scrub time on the image to pass
time.sleep(self.api_server.scrub_time)
# run the scrubber app, and ensure it doesn't fall over
exe_cmd = "%s -m glance.cmd.scrubber" % sys.executable
cmd = ("%s --config-file %s" %
(exe_cmd, self.scrubber_daemon.conf_file_name))
exitcode, out, err = execute(cmd, raise_error=False)
self.assertEqual(0, exitcode)
self.wait_for_scrub(path)
self.stop_servers()
def test_scrubber_delete_handles_exception(self):

View File

@ -13,6 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import BaseHTTPServer
import os
import signal
import tempfile
@ -23,7 +24,6 @@ import six
from glance.openstack.common import jsonutils
from glance.tests import functional
from glance.tests.functional.store import test_http
TENANT1 = str(uuid.uuid4())
@ -32,6 +32,42 @@ TENANT3 = str(uuid.uuid4())
TENANT4 = str(uuid.uuid4())
def get_handler_class(fixture):
class StaticHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header('Content-Length', str(len(fixture)))
self.end_headers()
self.wfile.write(fixture)
return
def do_HEAD(self):
self.send_response(200)
self.send_header('Content-Length', str(len(fixture)))
self.end_headers()
return
def log_message(*args, **kwargs):
# Override this method to prevent debug output from going
# to stderr during testing
return
return StaticHTTPRequestHandler
def http_server(image_id, image_data):
server_address = ('127.0.0.1', 0)
handler_class = get_handler_class(image_data)
httpd = BaseHTTPServer.HTTPServer(server_address, handler_class)
port = httpd.socket.getsockname()[1]
pid = os.fork()
if pid == 0:
httpd.serve_forever()
else:
return pid, port
class TestImages(functional.FunctionalTest):
def setUp(self):
@ -2289,7 +2325,7 @@ class TestImageLocationSelectionStrategy(functional.FunctionalTest):
self.foo_image_file.write("foo image file")
self.foo_image_file.flush()
self.addCleanup(self.foo_image_file.close)
ret = test_http.http_server("foo_image_id", "foo_image")
ret = http_server("foo_image_id", "foo_image")
self.http_server_pid, self.http_port = ret
def tearDown(self):

View File

@ -33,7 +33,6 @@ import glance.notifier
from glance.openstack.common import jsonutils as json
import glance.openstack.common.log as logging
import glance.schema
import glance.store
LOG = logging.getLogger(__name__)
_LE = i18n._LE

View File

@ -15,6 +15,7 @@ import os.path
import tempfile
import fixtures
import glance_store
from oslo.config import cfg
from oslo.db import options
@ -23,7 +24,6 @@ from glance.common import config
from glance.db import migration
import glance.db.sqlalchemy.api
import glance.registry.client.v1.client
import glance.store
from glance import tests as glance_tests
from glance.tests import utils as test_utils
@ -112,7 +112,6 @@ paste.filter_factory = glance.tests.utils:FakeAuthMiddleware.factory
"""
CONF = cfg.CONF
CONF.import_opt('filesystem_store_datadir', 'glance.store.filesystem')
class ApiTest(test_utils.BaseTestCase):
@ -194,9 +193,14 @@ class ApiTest(test_utils.BaseTestCase):
atexit.register(_delete_cached_db)
def _setup_stores(self):
glance_store.register_opts(CONF)
glance_store.register_store_opts(CONF)
image_dir = os.path.join(self.test_dir, "images")
self.config(filesystem_store_datadir=image_dir)
glance.store.create_stores()
self.config(group='glance_store',
filesystem_store_datadir=image_dir)
glance_store.create_stores()
def _load_paste_app(self, name, flavor, conf):
conf_file_path = os.path.join(self.test_dir, '%s-paste.ini' % name)

View File

@ -18,6 +18,7 @@ import os.path
import tempfile
import fixtures
import glance_store
from oslo.config import cfg
from oslo.db import options
@ -26,7 +27,6 @@ from glance.common import config
from glance.db import migration
import glance.db.sqlalchemy.api
import glance.registry.client.v1.client
import glance.store
from glance import tests as glance_tests
from glance.tests import utils as test_utils
@ -115,7 +115,6 @@ paste.filter_factory = glance.tests.utils:FakeAuthMiddleware.factory
"""
CONF = cfg.CONF
CONF.import_opt('filesystem_store_datadir', 'glance.store.filesystem')
class ApiTest(test_utils.BaseTestCase):
@ -190,9 +189,14 @@ class ApiTest(test_utils.BaseTestCase):
atexit.register(_delete_cached_db)
def _setup_stores(self):
glance_store.register_opts(CONF)
glance_store.register_store_opts(CONF)
image_dir = os.path.join(self.test_dir, "images")
self.config(filesystem_store_datadir=image_dir)
glance.store.create_stores()
self.config(group='glance_store',
filesystem_store_datadir=image_dir)
glance_store.create_stores()
def _load_paste_app(self, name, flavor, conf):
conf_file_path = os.path.join(self.test_dir, '%s-paste.ini' % name)

View File

@ -13,6 +13,8 @@
import mock
import sys
import glance_store as store
from oslo.config import cfg
import six
import glance.cmd.api
@ -26,6 +28,9 @@ import glance.image_cache.pruner
from glance.tests import utils as test_utils
CONF = cfg.CONF
class TestGlanceApiCmd(test_utils.BaseTestCase):
__argv_backup = None
@ -45,6 +50,8 @@ class TestGlanceApiCmd(test_utils.BaseTestCase):
self.stderr = six.StringIO()
sys.stderr = self.stderr
store.register_opts(CONF)
self.stubs.Set(glance.common.config, 'load_paste_app',
self._do_nothing)
self.stubs.Set(glance.common.wsgi.Server, 'start',
@ -58,11 +65,11 @@ class TestGlanceApiCmd(test_utils.BaseTestCase):
super(TestGlanceApiCmd, self).tearDown()
def test_supported_default_store(self):
self.config(default_store='file')
self.config(group='glance_store', default_store='file')
glance.cmd.api.main()
def test_unsupported_default_store(self):
self.config(default_store='shouldnotexist')
self.config(group='glance_store', default_store='shouldnotexist')
exit = self.assertRaises(SystemExit, glance.cmd.api.main)
self.assertEqual(exit.code, 1)

View File

@ -17,20 +17,16 @@ import os
import shutil
import fixtures
import glance_store as store
from glance_store import location
from oslo.config import cfg
from oslo.db import options
from glance.common import exception
from glance.openstack.common import jsonutils
from glance import store
from glance.store import location
from glance.store import sheepdog
from glance.store import vmware_datastore
from glance.tests import stubs
from glance.tests import utils as test_utils
CONF = cfg.CONF
CONF.import_opt('filesystem_store_datadir', 'glance.store.filesystem')
class StoreClearingUnitTest(test_utils.BaseTestCase):
@ -50,17 +46,8 @@ class StoreClearingUnitTest(test_utils.BaseTestCase):
:param passing_config: making store driver passes basic configurations.
:returns: the number of how many store drivers been loaded.
"""
def _fun(*args, **kwargs):
if passing_config:
return None
else:
raise exception.BadStoreConfiguration()
self.stubs.Set(sheepdog.Store, 'configure', _fun)
self.stubs.Set(vmware_datastore.Store, 'configure', _fun)
self.stubs.Set(vmware_datastore.Store, 'configure_add', _fun)
return store.create_stores()
store.register_opts(CONF)
store.create_stores(CONF)
class IsolatedUnitTest(StoreClearingUnitTest):
@ -77,12 +64,17 @@ class IsolatedUnitTest(StoreClearingUnitTest):
policy_file = self._copy_data_file('policy.json', self.test_dir)
options.set_defaults(CONF, connection='sqlite://',
sqlite_db='glance.sqlite')
self.config(verbose=False,
debug=False,
default_store='filesystem',
filesystem_store_datadir=os.path.join(self.test_dir),
policy_file=policy_file,
lock_path=os.path.join(self.test_dir))
self.config(default_store='filesystem',
filesystem_store_datadir=os.path.join(self.test_dir),
group="glance_store")
store.create_stores()
stubs.stub_out_registry_and_store_server(self.stubs,
self.test_dir,
registry=self.registry)

View File

@ -1,84 +0,0 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import stubout
from cinderclient.v2 import client as cinderclient
import six
from glance.common import exception
from glance.openstack.common import units
import glance.store.cinder as cinder
from glance.store.location import get_location_from_uri
from glance.tests.unit import base
class FakeObject(object):
def __init__(self, **kwargs):
for name, value in six.iteritems(kwargs):
setattr(self, name, value)
class TestCinderStore(base.StoreClearingUnitTest):
def setUp(self):
self.config(default_store='cinder',
known_stores=['glance.store.cinder.Store'])
super(TestCinderStore, self).setUp()
self.stubs = stubout.StubOutForTesting()
def test_cinder_configure_add(self):
store = cinder.Store()
self.assertRaises(exception.BadStoreConfiguration,
store.configure_add)
store = cinder.Store(context=None)
self.assertRaises(exception.BadStoreConfiguration,
store.configure_add)
store = cinder.Store(context=FakeObject(service_catalog=None))
self.assertRaises(exception.BadStoreConfiguration,
store.configure_add)
store = cinder.Store(context=FakeObject(
service_catalog='fake_service_catalog'))
store.configure_add()
def test_cinder_get_size(self):
fake_client = FakeObject(auth_token=None, management_url=None)
fake_volumes = {'12345678-9012-3455-6789-012345678901':
FakeObject(size=5)}
class FakeCinderClient(FakeObject):
def __init__(self, *args, **kwargs):
super(FakeCinderClient, self).__init__(client=fake_client,
volumes=fake_volumes)
self.stubs.Set(cinderclient, 'Client', FakeCinderClient)
fake_sc = [{u'endpoints': [{u'publicURL': u'foo_public_url'}],
u'endpoints_links': [],
u'name': u'cinder',
u'type': u'volume'}]
fake_context = FakeObject(service_catalog=fake_sc,
user='fake_uer',
auth_tok='fake_token',
tenant='fake_tenant')
uri = 'cinder://%s' % fake_volumes.keys()[0]
loc = get_location_from_uri(uri)
store = cinder.Store(context=fake_context)
image_size = store.get_size(loc)
self.assertEqual(image_size,
fake_volumes.values()[0].size * units.Gi)
self.assertEqual(fake_client.auth_token, 'fake_token')
self.assertEqual(fake_client.management_url, 'foo_public_url')

View File

@ -1,462 +0,0 @@
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests the filesystem backend store"""
import errno
import hashlib
import json
import os
import uuid
import fixtures
from mock import patch
from oslo.config import cfg
import six
import six.moves.builtins as __builtin__
from glance.common import exception
from glance.openstack.common import units
from glance.store.filesystem import Store
from glance.store.location import get_location_from_uri
from glance.tests.unit import base
CONF = cfg.CONF
class TestStore(base.IsolatedUnitTest):
def setUp(self):
"""Establish a clean test environment"""
super(TestStore, self).setUp()
self.orig_read_chunksize = Store.READ_CHUNKSIZE
self.orig_write_chunksize = Store.WRITE_CHUNKSIZE
Store.READ_CHUNKSIZE = Store.WRITE_CHUNKSIZE = 10
self.store = Store()
def tearDown(self):
"""Clear the test environment"""
super(TestStore, self).tearDown()
Store.READ_CHUNKSIZE = self.orig_read_chunksize
Store.WRITE_CHUNKSIZE = self.orig_write_chunksize
def test_configure_add_single_datadir(self):
"""
Tests filesystem specified by filesystem_store_datadir
are parsed correctly.
"""
store = self.useFixture(fixtures.TempDir()).path
CONF.set_override('filesystem_store_datadir', store)
self.store.configure_add()
self.assertEqual(self.store.datadir, store)
def test_configure_add_with_single_and_multi_datadirs(self):
"""
Tests BadStoreConfiguration exception is raised if both
filesystem_store_datadir and filesystem_store_datadirs are specified.
"""
store_map = [self.useFixture(fixtures.TempDir()).path,
self.useFixture(fixtures.TempDir()).path]
CONF.set_override('filesystem_store_datadirs',
[store_map[0] + ":100",
store_map[1] + ":200"])
self.assertRaises(exception.BadStoreConfiguration,
self.store.configure_add)
def test_configure_add_without_single_and_multi_datadirs(self):
"""
Tests BadStoreConfiguration exception is raised if neither
filesystem_store_datadir nor filesystem_store_datadirs are specified.
"""
CONF.clear_override('filesystem_store_datadir')
self.assertRaises(exception.BadStoreConfiguration,
self.store.configure_add)
def test_configure_add_with_multi_datadirs(self):
"""
Tests multiple filesystem specified by filesystem_store_datadirs
are parsed correctly.
"""
store_map = [self.useFixture(fixtures.TempDir()).path,
self.useFixture(fixtures.TempDir()).path]
CONF.clear_override('filesystem_store_datadir')
CONF.set_override('filesystem_store_datadirs',
[store_map[0] + ":100",
store_map[1] + ":200"])
self.store.configure_add()
expected_priority_map = {100: [store_map[0]], 200: [store_map[1]]}
expected_priority_list = [200, 100]
self.assertEqual(self.store.priority_data_map, expected_priority_map)
self.assertEqual(self.store.priority_list, expected_priority_list)
def test_configure_add_invalid_priority(self):
"""
Tests invalid priority specified by filesystem_store_datadirs
param raises BadStoreConfiguration exception.
"""
CONF.clear_override('filesystem_store_datadir')
CONF.set_override('filesystem_store_datadirs',
[self.useFixture(fixtures.TempDir()).path + ":100",
self.useFixture(fixtures.TempDir()).path +
":invalid"])
self.assertRaises(exception.BadStoreConfiguration,
self.store.configure_add)
def test_configure_add_same_dir_multiple_times(self):
"""
Tests BadStoreConfiguration exception is raised if same directory
is specified multiple times in filesystem_store_datadirs.
"""
store_map = [self.useFixture(fixtures.TempDir()).path,
self.useFixture(fixtures.TempDir()).path]
CONF.clear_override('filesystem_store_datadir')
CONF.set_override('filesystem_store_datadirs',
[store_map[0] + ":100",
store_map[1] + ":200",
store_map[0] + ":300"])
self.assertRaises(exception.BadStoreConfiguration,
self.store.configure_add)
def test_configure_add_with_empty_datadir_path(self):
"""
Tests BadStoreConfiguration exception is raised if empty directory
path is specified in filesystem_store_datadirs.
"""
CONF.clear_override('filesystem_store_datadir')
CONF.set_override('filesystem_store_datadirs', [''])
self.assertRaises(exception.BadStoreConfiguration,
self.store.configure_add)
def test_configure_add_with_readonly_datadir_path(self):
"""
Tests BadStoreConfiguration exception is raised if directory
path specified in filesystem_store_datadirs is readonly.
"""
readonly_dir = self.useFixture(fixtures.TempDir()).path
os.chmod(readonly_dir, 0o444)
CONF.clear_override('filesystem_store_datadir')
CONF.set_override('filesystem_store_datadirs', [readonly_dir])
self.assertRaises(exception.BadStoreConfiguration,
self.store.configure_add)
def test_get(self):
"""Test a "normal" retrieval of an image in chunks"""
# First add an image...
image_id = str(uuid.uuid4())
file_contents = "chunk00000remainder"
image_file = six.StringIO(file_contents)
location, size, checksum, _ = self.store.add(image_id,
image_file,
len(file_contents))
# Now read it back...
uri = "file:///%s/%s" % (self.test_dir, image_id)
loc = get_location_from_uri(uri)
(image_file, image_size) = self.store.get(loc)
expected_data = "chunk00000remainder"
expected_num_chunks = 2
data = ""
num_chunks = 0
for chunk in image_file:
num_chunks += 1
data += chunk
self.assertEqual(expected_data, data)
self.assertEqual(expected_num_chunks, num_chunks)
def test_get_non_existing(self):
"""
Test that trying to retrieve a file that doesn't exist
raises an error
"""
loc = get_location_from_uri("file:///%s/non-existing" % self.test_dir)
self.assertRaises(exception.NotFound,
self.store.get,
loc)
def test_add(self):
"""Test that we can add an image via the filesystem backend"""
Store.WRITE_CHUNKSIZE = 1024
expected_image_id = str(uuid.uuid4())
expected_file_size = 5 * units.Ki # 5K
expected_file_contents = "*" * expected_file_size
expected_checksum = hashlib.md5(expected_file_contents).hexdigest()
expected_location = "file://%s/%s" % (self.test_dir,
expected_image_id)
image_file = six.StringIO(expected_file_contents)
location, size, checksum, _ = self.store.add(expected_image_id,
image_file,
expected_file_size)
self.assertEqual(expected_location, location)
self.assertEqual(expected_file_size, size)
self.assertEqual(expected_checksum, checksum)
uri = "file:///%s/%s" % (self.test_dir, expected_image_id)
loc = get_location_from_uri(uri)
(new_image_file, new_image_size) = self.store.get(loc)
new_image_contents = ""
new_image_file_size = 0
for chunk in new_image_file:
new_image_file_size += len(chunk)
new_image_contents += chunk
self.assertEqual(expected_file_contents, new_image_contents)
self.assertEqual(expected_file_size, new_image_file_size)
def test_add_with_multiple_dirs(self):
"""Test adding multiple filesystem directories."""
store_map = [self.useFixture(fixtures.TempDir()).path,
self.useFixture(fixtures.TempDir()).path]
CONF.clear_override('filesystem_store_datadir')
CONF.set_override('filesystem_store_datadirs',
[store_map[0] + ":100",
store_map[1] + ":200"])
self.store.configure_add()
"""Test that we can add an image via the filesystem backend"""
Store.WRITE_CHUNKSIZE = 1024
expected_image_id = str(uuid.uuid4())
expected_file_size = 5 * units.Ki # 5K
expected_file_contents = "*" * expected_file_size
expected_checksum = hashlib.md5(expected_file_contents).hexdigest()
expected_location = "file://%s/%s" % (store_map[1],
expected_image_id)
image_file = six.StringIO(expected_file_contents)
location, size, checksum, _ = self.store.add(expected_image_id,
image_file,
expected_file_size)
self.assertEqual(expected_location, location)
self.assertEqual(expected_file_size, size)
self.assertEqual(expected_checksum, checksum)
loc = get_location_from_uri(expected_location)
(new_image_file, new_image_size) = self.store.get(loc)
new_image_contents = ""
new_image_file_size = 0
for chunk in new_image_file:
new_image_file_size += len(chunk)
new_image_contents += chunk
self.assertEqual(expected_file_contents, new_image_contents)
self.assertEqual(expected_file_size, new_image_file_size)
def test_add_with_multiple_dirs_storage_full(self):
"""
Test StorageFull exception is raised if no filesystem directory
is found that can store an image.
"""
store_map = [self.useFixture(fixtures.TempDir()).path,
self.useFixture(fixtures.TempDir()).path]
CONF.clear_override('filesystem_store_datadir')
CONF.set_override('filesystem_store_datadirs',
[store_map[0] + ":100",
store_map[1] + ":200"])
self.store.configure_add()
def fake_get_capacity_info(mount_point):
return 0
self.stubs.Set(self.store, '_get_capacity_info',
fake_get_capacity_info)
Store.WRITE_CHUNKSIZE = 1024
expected_image_id = str(uuid.uuid4())
expected_file_size = 5 * units.Ki # 5K
expected_file_contents = "*" * expected_file_size
image_file = six.StringIO(expected_file_contents)
self.assertRaises(exception.StorageFull, self.store.add,
expected_image_id, image_file, expected_file_size)
def test_add_check_metadata_success(self):
expected_image_id = str(uuid.uuid4())
in_metadata = {'akey': u'some value', 'list': [u'1', u'2', u'3']}
jsonfilename = os.path.join(self.test_dir,
"storage_metadata.%s" % expected_image_id)
self.config(filesystem_store_metadata_file=jsonfilename)
with open(jsonfilename, 'w') as fptr:
json.dump(in_metadata, fptr)
expected_file_size = 10
expected_file_contents = "*" * expected_file_size
image_file = six.StringIO(expected_file_contents)
location, size, checksum, metadata = self.store.add(expected_image_id,
image_file,
expected_file_size)
self.assertEqual(metadata, in_metadata)
def test_add_check_metadata_bad_data(self):
expected_image_id = str(uuid.uuid4())
in_metadata = {'akey': 10} # only unicode is allowed
jsonfilename = os.path.join(self.test_dir,
"storage_metadata.%s" % expected_image_id)
self.config(filesystem_store_metadata_file=jsonfilename)
with open(jsonfilename, 'w') as fptr:
json.dump(in_metadata, fptr)
expected_file_size = 10
expected_file_contents = "*" * expected_file_size
image_file = six.StringIO(expected_file_contents)
location, size, checksum, metadata = self.store.add(expected_image_id,
image_file,
expected_file_size)
self.assertEqual(metadata, {})
def test_add_check_metadata_bad_nosuch_file(self):
expected_image_id = str(uuid.uuid4())
jsonfilename = os.path.join(self.test_dir,
"storage_metadata.%s" % expected_image_id)
self.config(filesystem_store_metadata_file=jsonfilename)
expected_file_size = 10
expected_file_contents = "*" * expected_file_size
image_file = six.StringIO(expected_file_contents)
location, size, checksum, metadata = self.store.add(expected_image_id,
image_file,
expected_file_size)
self.assertEqual(metadata, {})
def test_add_already_existing(self):
"""
Tests that adding an image with an existing identifier
raises an appropriate exception
"""
Store.WRITE_CHUNKSIZE = 1024
image_id = str(uuid.uuid4())
file_size = 5 * units.Ki # 5K
file_contents = "*" * file_size
image_file = six.StringIO(file_contents)
location, size, checksum, _ = self.store.add(image_id,
image_file,
file_size)
image_file = six.StringIO("nevergonnamakeit")
self.assertRaises(exception.Duplicate,
self.store.add,
image_id, image_file, 0)
def _do_test_add_write_failure(self, errno, exception):
Store.WRITE_CHUNKSIZE = 1024
image_id = str(uuid.uuid4())
file_size = 5 * units.Ki # 5K
file_contents = "*" * file_size
path = os.path.join(self.test_dir, image_id)
image_file = six.StringIO(file_contents)
e = IOError()
e.errno = errno
with patch.object(__builtin__, 'open', side_effect=e) as mock_open:
self.assertRaises(exception,
self.store.add,
image_id, image_file, 0)
self.assertFalse(os.path.exists(path))
mock_open.assert_called_once_with(path, 'wb')
def test_add_storage_full(self):
"""
Tests that adding an image without enough space on disk
raises an appropriate exception
"""
self._do_test_add_write_failure(errno.ENOSPC, exception.StorageFull)
def test_add_file_too_big(self):
"""
Tests that adding an excessively large image file
raises an appropriate exception
"""
self._do_test_add_write_failure(errno.EFBIG, exception.StorageFull)
def test_add_storage_write_denied(self):
"""
Tests that adding an image with insufficient filestore permissions
raises an appropriate exception
"""
self._do_test_add_write_failure(errno.EACCES,
exception.StorageWriteDenied)
def test_add_other_failure(self):
"""
Tests that a non-space-related IOError does not raise a
StorageFull exception.
"""
self._do_test_add_write_failure(errno.ENOTDIR, IOError)
def test_add_cleanup_on_read_failure(self):
"""
Tests the partial image file is cleaned up after a read
failure.
"""
Store.WRITE_CHUNKSIZE = 1024
image_id = str(uuid.uuid4())
file_size = 5 * units.Ki # 5K
file_contents = "*" * file_size
path = os.path.join(self.test_dir, image_id)
image_file = six.StringIO(file_contents)
def fake_Error(size):
raise AttributeError()
self.stubs.Set(image_file, 'read', fake_Error)
self.assertRaises(AttributeError,
self.store.add,
image_id, image_file, 0)
self.assertFalse(os.path.exists(path))
def test_delete(self):
"""
Test we can delete an existing image in the filesystem store
"""
# First add an image
image_id = str(uuid.uuid4())
file_size = 5 * units.Ki # 5K
file_contents = "*" * file_size
image_file = six.StringIO(file_contents)
location, size, checksum, _ = self.store.add(image_id,
image_file,
file_size)
# Now check that we can delete it
uri = "file:///%s/%s" % (self.test_dir, image_id)
loc = get_location_from_uri(uri)
self.store.delete(loc)
self.assertRaises(exception.NotFound, self.store.get, loc)
def test_delete_non_existing(self):
"""
Test that trying to delete a file that doesn't exist
raises an error
"""
loc = get_location_from_uri("file:///tmp/glance-tests/non-existing")
self.assertRaises(exception.NotFound,
self.store.delete,
loc)

View File

@ -1,96 +0,0 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
import stubout
from glance.common import exception
from glance.common import utils
from glance.store.gridfs import Store
from glance.tests.unit import base
try:
import gridfs
import pymongo
except ImportError:
pymongo = None
GRIDFS_CONF = {'verbose': True,
'debug': True,
'default_store': 'gridfs',
'mongodb_store_uri': 'mongodb://fake_store_uri',
'mongodb_store_db': 'fake_store_db'}
def stub_out_gridfs(stubs):
class FakeMongoClient(object):
def __init__(self, *args, **kwargs):
pass
def __getitem__(self, key):
return None
class FakeGridFS(object):
image_data = {}
called_commands = []
def __init__(self, *args, **kwargs):
pass
def exists(self, image_id):
self.called_commands.append('exists')
return False
def put(self, image_file, _id):
self.called_commands.append('put')
data = None
while True:
data = image_file.read(64)
if data:
self.image_data[_id] = \
self.image_data.setdefault(_id, '') + data
else:
break
def delete(self, _id):
self.called_commands.append('delete')
if pymongo is not None:
stubs.Set(pymongo, 'MongoClient', FakeMongoClient)
stubs.Set(gridfs, 'GridFS', FakeGridFS)
class TestStore(base.StoreClearingUnitTest):
def setUp(self):
"""Establish a clean test environment"""
self.config(**GRIDFS_CONF)
super(TestStore, self).setUp()
self.stubs = stubout.StubOutForTesting()
stub_out_gridfs(self.stubs)
self.store = Store()
self.addCleanup(self.stubs.UnsetAll)
def test_cleanup_when_add_image_exception(self):
if pymongo is None:
msg = 'GridFS store can not add images, skip test.'
self.skipTest(msg)
self.assertRaises(exception.ImageSizeLimitExceeded,
self.store.add,
'fake_image_id',
utils.LimitingReader(six.StringIO('xx'), 1),
2)
self.assertEqual(self.store.fs.called_commands,
['exists', 'put', 'delete'])

View File

@ -1,190 +0,0 @@
# Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from six.moves import xrange
import stubout
from glance.common import exception
from glance.common.store_utils import safe_delete_from_backend
from glance import context
from glance.db.sqlalchemy import api as db_api
from glance.registry.client.v1.api import configure_registry_client
from glance.store import delete_from_backend
from glance.store.http import MAX_REDIRECTS
from glance.store.http import Store
from glance.store.location import get_location_from_uri
from glance.tests import stubs as test_stubs
from glance.tests.unit import base
from glance.tests import utils
# The response stack is used to return designated responses in order;
# however when it's empty a default 200 OK response is returned from
# FakeHTTPConnection below.
FAKE_RESPONSE_STACK = []
def stub_out_http_backend(stubs):
"""
Stubs out the httplib.HTTPRequest.getresponse to return
faked-out data instead of grabbing actual contents of a resource
The stubbed getresponse() returns an iterator over
the data "I am a teapot, short and stout\n"
:param stubs: Set of stubout stubs
"""
class FakeHTTPConnection(object):
def __init__(self, *args, **kwargs):
pass
def getresponse(self):
if len(FAKE_RESPONSE_STACK):
return FAKE_RESPONSE_STACK.pop()
return utils.FakeHTTPResponse()
def request(self, *_args, **_kwargs):
pass
def close(self):
pass
def fake_get_conn_class(self, *args, **kwargs):
return FakeHTTPConnection
stubs.Set(Store, '_get_conn_class', fake_get_conn_class)
def stub_out_registry_image_update(stubs):
"""
Stubs an image update on the registry.
:param stubs: Set of stubout stubs
"""
test_stubs.stub_out_registry_server(stubs)
def fake_image_update(ctx, image_id, values, purge_props=False):
return {'properties': {}}
stubs.Set(db_api, 'image_update', fake_image_update)
class TestHttpStore(base.StoreClearingUnitTest):
def setUp(self):
global FAKE_RESPONSE_STACK
FAKE_RESPONSE_STACK = []
self.config(default_store='http',
known_stores=['glance.store.http.Store'])
super(TestHttpStore, self).setUp()
self.stubs = stubout.StubOutForTesting()
stub_out_http_backend(self.stubs)
Store.READ_CHUNKSIZE = 2
self.store = Store()
configure_registry_client()
def test_http_get(self):
uri = "http://netloc/path/to/file.tar.gz"
expected_returns = ['I ', 'am', ' a', ' t', 'ea', 'po', 't,', ' s',
'ho', 'rt', ' a', 'nd', ' s', 'to', 'ut', '\n']
loc = get_location_from_uri(uri)
(image_file, image_size) = self.store.get(loc)
self.assertEqual(image_size, 31)
chunks = [c for c in image_file]
self.assertEqual(chunks, expected_returns)
def test_http_get_redirect(self):
# Add two layers of redirects to the response stack, which will
# return the default 200 OK with the expected data after resolving
# both redirects.
redirect_headers_1 = {"location": "http://example.com/teapot.img"}
redirect_resp_1 = utils.FakeHTTPResponse(status=302,
headers=redirect_headers_1)
redirect_headers_2 = {"location": "http://example.com/teapot_real.img"}
redirect_resp_2 = utils.FakeHTTPResponse(status=301,
headers=redirect_headers_2)
FAKE_RESPONSE_STACK.append(redirect_resp_1)
FAKE_RESPONSE_STACK.append(redirect_resp_2)
uri = "http://netloc/path/to/file.tar.gz"
expected_returns = ['I ', 'am', ' a', ' t', 'ea', 'po', 't,', ' s',
'ho', 'rt', ' a', 'nd', ' s', 'to', 'ut', '\n']
loc = get_location_from_uri(uri)
(image_file, image_size) = self.store.get(loc)
self.assertEqual(image_size, 31)
chunks = [c for c in image_file]
self.assertEqual(chunks, expected_returns)
def test_http_get_max_redirects(self):
# Add more than MAX_REDIRECTS redirects to the response stack
redirect_headers = {"location": "http://example.com/teapot.img"}
redirect_resp = utils.FakeHTTPResponse(status=302,
headers=redirect_headers)
for i in xrange(MAX_REDIRECTS + 2):
FAKE_RESPONSE_STACK.append(redirect_resp)
uri = "http://netloc/path/to/file.tar.gz"
loc = get_location_from_uri(uri)
self.assertRaises(exception.MaxRedirectsExceeded, self.store.get, loc)
def test_http_get_redirect_invalid(self):
redirect_headers = {"location": "http://example.com/teapot.img"}
redirect_resp = utils.FakeHTTPResponse(status=307,
headers=redirect_headers)
FAKE_RESPONSE_STACK.append(redirect_resp)
uri = "http://netloc/path/to/file.tar.gz"
loc = get_location_from_uri(uri)
self.assertRaises(exception.BadStoreUri, self.store.get, loc)
def test_http_get_not_found(self):
not_found_resp = utils.FakeHTTPResponse(status=404,
data="404 Not Found")
FAKE_RESPONSE_STACK.append(not_found_resp)
uri = "http://netloc/path/to/file.tar.gz"
loc = get_location_from_uri(uri)
self.assertRaises(exception.NotFound, self.store.get, loc)
def test_https_get(self):
uri = "https://netloc/path/to/file.tar.gz"
expected_returns = ['I ', 'am', ' a', ' t', 'ea', 'po', 't,', ' s',
'ho', 'rt', ' a', 'nd', ' s', 'to', 'ut', '\n']
loc = get_location_from_uri(uri)
(image_file, image_size) = self.store.get(loc)
self.assertEqual(image_size, 31)
chunks = [c for c in image_file]
self.assertEqual(chunks, expected_returns)
def test_http_delete_raise_error(self):
uri = "https://netloc/path/to/file.tar.gz"
loc = get_location_from_uri(uri)
ctx = context.RequestContext()
self.assertRaises(NotImplementedError, self.store.delete, loc)
self.assertRaises(exception.StoreDeleteNotSupported,
delete_from_backend, ctx, uri)
def test_http_schedule_delete_swallows_error(self):
uri = {"url": "https://netloc/path/to/file.tar.gz"}
ctx = context.RequestContext()
stub_out_registry_image_update(self.stubs)
try:
safe_delete_from_backend(ctx, 'image_id', uri)
except exception.StoreDeleteNotSupported:
self.fail('StoreDeleteNotSupported should be swallowed')

View File

@ -17,7 +17,6 @@ from contextlib import contextmanager
import datetime
import hashlib
import os
import tempfile
import time
import fixtures
@ -30,8 +29,6 @@ from glance import image_cache
from glance.openstack.common import units
#NOTE(bcwaldon): This is imported to load the registry config options
import glance.registry # noqa
import glance.store.filesystem as fs_store
import glance.store.s3 as s3_store
from glance.tests import utils as test_utils
from glance.tests.utils import skip_if_disabled
from glance.tests.utils import xattr_writes_supported
@ -409,44 +406,6 @@ class ImageCacheTestCase(object):
# checksum is valid, fake image should be cached:
self.assertTrue(cache.is_cached(image_id))
def test_gate_caching_iter_fs_chunked_file(self):
"""Tests get_caching_iter when using a filesystem ChunkedFile"""
image_id = 123
with tempfile.NamedTemporaryFile() as test_data_file:
test_data_file.write(FIXTURE_DATA)
test_data_file.seek(0)
image = fs_store.ChunkedFile(test_data_file.name)
md5 = hashlib.md5()
md5.update(FIXTURE_DATA)
checksum = md5.hexdigest()
cache = image_cache.ImageCache()
img_iter = cache.get_caching_iter(image_id, checksum, image)
for chunk in img_iter:
pass
# checksum is valid, fake image should be cached:
self.assertTrue(cache.is_cached(image_id))
def test_gate_caching_iter_s3_chunked_file(self):
"""Tests get_caching_iter when using an S3 ChunkedFile"""
image_id = 123
with tempfile.NamedTemporaryFile() as test_data_file:
test_data_file.write(FIXTURE_DATA)
test_data_file.seek(0)
image = s3_store.ChunkedFile(test_data_file)
md5 = hashlib.md5()
md5.update(FIXTURE_DATA)
checksum = md5.hexdigest()
cache = image_cache.ImageCache()
img_iter = cache.get_caching_iter(image_id, checksum, image)
for chunk in img_iter:
pass
# checksum is valid, fake image should be cached:
self.assertTrue(cache.is_cached(image_id))
def test_gate_caching_iter_bad_checksum(self):
image = "12345678990abcdefghijklmnop"
image_id = 123

View File

@ -16,6 +16,7 @@
import datetime
import glance_store
import mock
from oslo.config import cfg
from oslo import messaging
@ -254,7 +255,7 @@ class TestImageNotifications(utils.BaseTestCase):
def data_iterator():
self.notifier.log = []
yield 'abcde'
raise exception.StorageFull('Modern Major General')
raise glance_store.StorageFull(message='Modern Major General')
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.image_proxy.set_data, data_iterator(), 10)
@ -304,7 +305,7 @@ class TestImageNotifications(utils.BaseTestCase):
def data_iterator():
self.notifier.log = []
yield 'abcde'
raise exception.StorageWriteDenied('The Very Model')
raise glance_store.StorageWriteDenied(message='The Very Model')
self.assertRaises(webob.exc.HTTPServiceUnavailable,
self.image_proxy.set_data, data_iterator(), 10)

View File

@ -1,187 +0,0 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import six
from glance.common import exception
from glance.common import utils
from glance.openstack.common import units
from glance.store.location import Location
import glance.store.rbd as rbd_store
from glance.store.rbd import StoreLocation
from glance.tests.unit import base
from glance.tests.unit.fake_rados import mock_rados
from glance.tests.unit.fake_rados import mock_rbd
class TestStore(base.StoreClearingUnitTest):
def setUp(self):
"""Establish a clean test environment"""
super(TestStore, self).setUp()
self.stubs.Set(rbd_store, 'rados', mock_rados)
self.stubs.Set(rbd_store, 'rbd', mock_rbd)
self.store = rbd_store.Store()
self.store.chunk_size = 2
self.called_commands_actual = []
self.called_commands_expected = []
self.store_specs = {'image': 'fake_image',
'snapshot': 'fake_snapshot'}
self.location = StoreLocation(self.store_specs)
# Provide enough data to get more than one chunk iteration.
self.data_len = 3 * units.Ki
self.data_iter = six.StringIO('*' * self.data_len)
def test_add_w_image_size_zero(self):
"""Assert that correct size is returned even though 0 was provided."""
self.store.chunk_size = units.Ki
with mock.patch.object(rbd_store.rbd.Image, 'resize') as resize:
with mock.patch.object(rbd_store.rbd.Image, 'write') as write:
ret = self.store.add('fake_image_id', self.data_iter, 0)
resize.assert_called()
write.assert_called()
self.assertEqual(ret[1], self.data_len)
def test_add_w_rbd_image_exception(self):
def _fake_create_image(*args, **kwargs):
self.called_commands_actual.append('create')
return self.location
def _fake_delete_image(*args, **kwargs):
self.called_commands_actual.append('delete')
def _fake_enter(*args, **kwargs):
raise exception.NotFound("")
self.stubs.Set(self.store, '_create_image', _fake_create_image)
self.stubs.Set(self.store, '_delete_image', _fake_delete_image)
self.stubs.Set(mock_rbd.Image, '__enter__', _fake_enter)
self.assertRaises(exception.NotFound, self.store.add,
'fake_image_id', self.data_iter, self.data_len)
self.called_commands_expected = ['create', 'delete']
def test_add_w_rbd_image_exception2(self):
def _fake_create_image(*args, **kwargs):
self.called_commands_actual.append('create')
return self.location
def _fake_delete_image(*args, **kwargs):
self.called_commands_actual.append('delete')
raise exception.InUseByStore()
def _fake_enter(*args, **kwargs):
raise exception.NotFound("")
self.stubs.Set(self.store, '_create_image', _fake_create_image)
self.stubs.Set(self.store, '_delete_image', _fake_delete_image)
self.stubs.Set(mock_rbd.Image, '__enter__', _fake_enter)
self.assertRaises(exception.InUseByStore, self.store.add,
'fake_image_id', self.data_iter, self.data_len)
self.called_commands_expected = ['create', 'delete']
def test_add_duplicate_image(self):
def _fake_create_image(*args, **kwargs):
self.called_commands_actual.append('create')
raise mock_rbd.ImageExists()
self.stubs.Set(self.store, '_create_image', _fake_create_image)
self.assertRaises(exception.Duplicate, self.store.add,
'fake_image_id', self.data_iter, self.data_len)
self.called_commands_expected = ['create']
def test_delete(self):
def _fake_remove(*args, **kwargs):
self.called_commands_actual.append('remove')
self.stubs.Set(mock_rbd.RBD, 'remove', _fake_remove)
self.store.delete(Location('test_rbd_store', StoreLocation,
self.location.get_uri()))
self.called_commands_expected = ['remove']
def test__delete_image(self):
def _fake_remove(*args, **kwargs):
self.called_commands_actual.append('remove')
self.stubs.Set(mock_rbd.RBD, 'remove', _fake_remove)
self.store._delete_image(self.location)
self.called_commands_expected = ['remove']
def test__delete_image_w_snap(self):
def _fake_unprotect_snap(*args, **kwargs):
self.called_commands_actual.append('unprotect_snap')
def _fake_remove_snap(*args, **kwargs):
self.called_commands_actual.append('remove_snap')
def _fake_remove(*args, **kwargs):
self.called_commands_actual.append('remove')
self.stubs.Set(mock_rbd.RBD, 'remove', _fake_remove)
self.stubs.Set(mock_rbd.Image, 'unprotect_snap', _fake_unprotect_snap)
self.stubs.Set(mock_rbd.Image, 'remove_snap', _fake_remove_snap)
self.store._delete_image(self.location, snapshot_name='snap')
self.called_commands_expected = ['unprotect_snap', 'remove_snap',
'remove']
def test__delete_image_w_snap_exc_image_not_found(self):
def _fake_unprotect_snap(*args, **kwargs):
self.called_commands_actual.append('unprotect_snap')
raise mock_rbd.ImageNotFound()
self.stubs.Set(mock_rbd.Image, 'unprotect_snap', _fake_unprotect_snap)
self.assertRaises(exception.NotFound, self.store._delete_image,
self.location, snapshot_name='snap')
self.called_commands_expected = ['unprotect_snap']
def test__delete_image_exc_image_not_found(self):
def _fake_remove(*args, **kwargs):
self.called_commands_actual.append('remove')
raise mock_rbd.ImageNotFound()
self.stubs.Set(mock_rbd.RBD, 'remove', _fake_remove)
self.assertRaises(exception.NotFound, self.store._delete_image,
self.location, snapshot_name='snap')
self.called_commands_expected = ['remove']
def test_image_size_exceeded_exception(self):
def _fake_write(*args, **kwargs):
if 'write' not in self.called_commands_actual:
self.called_commands_actual.append('write')
raise exception.ImageSizeLimitExceeded
def _fake_delete_image(*args, **kwargs):
self.called_commands_actual.append('delete')
self.stubs.Set(mock_rbd.Image, 'write', _fake_write)
self.stubs.Set(self.store, '_delete_image', _fake_delete_image)
data = utils.LimitingReader(self.data_iter, self.data_len)
self.assertRaises(exception.ImageSizeLimitExceeded,
self.store.add, 'fake_image_id',
data, self.data_len + 1)
self.called_commands_expected = ['write', 'delete']
def tearDown(self):
self.assertEqual(self.called_commands_actual,
self.called_commands_expected)
super(TestStore, self).tearDown()

View File

@ -1,573 +0,0 @@
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests the S3 backend store"""
import hashlib
import uuid
import xml.etree.ElementTree
import boto.s3.connection
import mock
import six
import stubout
from glance.common import exception
from glance.openstack.common import units
from glance.store.location import get_location_from_uri
import glance.store.s3
from glance.store.s3 import get_s3_location
from glance.store.s3 import Store
from glance.store import UnsupportedBackend
from glance.tests.unit import base
FAKE_UUID = str(uuid.uuid4())
FIVE_KB = 5 * units.Ki
S3_CONF = {'verbose': True,
'debug': True,
'default_store': 's3',
's3_store_access_key': 'user',
's3_store_secret_key': 'key',
's3_store_host': 'localhost:8080',
's3_store_bucket': 'glance',
'known_stores': ['glance.store.s3.Store'],
's3_store_large_object_size': 5, # over 5MB is large
's3_store_large_object_chunk_size': 6} # part size is 6MB
# ensure that mpu api is used and parts are uploaded as expected
mpu_parts_uploaded = 0
# We stub out as little as possible to ensure that the code paths
# between glance.store.s3 and boto.s3.connection are tested
# thoroughly
def stub_out_s3(stubs):
class FakeKey:
"""
Acts like a ``boto.s3.key.Key``
"""
def __init__(self, bucket, name):
self.bucket = bucket
self.name = name
self.data = None
self.size = 0
self.etag = None
self.BufferSize = 1024
def close(self):
pass
def exists(self):
return self.bucket.exists(self.name)
def delete(self):
self.bucket.delete(self.name)
def compute_md5(self, data):
chunk = data.read(self.BufferSize)
checksum = hashlib.md5()
while chunk:
checksum.update(chunk)
chunk = data.read(self.BufferSize)
checksum_hex = checksum.hexdigest()
return checksum_hex, None
def set_contents_from_file(self, fp, replace=False, **kwargs):
max_read = kwargs.get('size')
self.data = six.StringIO()
checksum = hashlib.md5()
while True:
if max_read is None or max_read > self.BufferSize:
read_size = self.BufferSize
elif max_read <= 0:
break
else:
read_size = max_read
chunk = fp.read(read_size)
if not chunk:
break
checksum.update(chunk)
self.data.write(chunk)
if max_read is not None:
max_read -= len(chunk)
self.size = self.data.len
# Reset the buffer to start
self.data.seek(0)
self.etag = checksum.hexdigest()
self.read = self.data.read
def get_file(self):
return self.data
class FakeMPU:
"""
Acts like a ``boto.s3.multipart.MultiPartUpload``
"""
def __init__(self, bucket, key_name):
self.bucket = bucket
self.id = str(uuid.uuid4())
self.key_name = key_name
self.parts = {} # pnum -> FakeKey
global mpu_parts_uploaded
mpu_parts_uploaded = 0
def upload_part_from_file(self, fp, part_num, **kwargs):
size = kwargs.get('size')
part = FakeKey(self.bucket, self.key_name)
part.set_contents_from_file(fp, size=size)
self.parts[part_num] = part
global mpu_parts_uploaded
mpu_parts_uploaded += 1
return part
def verify_xml(self, xml_body):
"""
Verify xml matches our part info.
"""
xmlparts = {}
cmuroot = xml.etree.ElementTree.fromstring(xml_body)
for cmupart in cmuroot:
pnum = int(cmupart.findtext('PartNumber'))
etag = cmupart.findtext('ETag')
xmlparts[pnum] = etag
if len(xmlparts) != len(self.parts):
return False
for pnum in xmlparts.keys():
if self.parts[pnum] is None:
return False
if xmlparts[pnum] != self.parts[pnum].etag:
return False
return True
def complete_key(self):
"""
Complete the parts into one big FakeKey
"""
key = FakeKey(self.bucket, self.key_name)
key.data = six.StringIO()
checksum = hashlib.md5()
cnt = 0
for pnum in sorted(self.parts.keys()):
cnt += 1
part = self.parts[pnum]
chunk = part.data.read(key.BufferSize)
while chunk:
checksum.update(chunk)
key.data.write(chunk)
chunk = part.data.read(key.BufferSize)
key.size = key.data.len
key.data.seek(0)
key.etag = checksum.hexdigest() + '-%d' % cnt
key.read = key.data.read
return key
class FakeBucket:
"""
Acts like a ``boto.s3.bucket.Bucket``
"""
def __init__(self, name, keys=None):
self.name = name
self.keys = keys or {}
self.mpus = {} # {key_name -> {id -> FakeMPU}}
def __str__(self):
return self.name
def exists(self, key):
return key in self.keys
def delete(self, key):
del self.keys[key]
def get_key(self, key_name, **kwargs):
return self.keys.get(key_name)
def new_key(self, key_name):
new_key = FakeKey(self, key_name)
self.keys[key_name] = new_key
return new_key
def initiate_multipart_upload(self, key_name, **kwargs):
mpu = FakeMPU(self, key_name)
if key_name not in self.mpus:
self.mpus[key_name] = {}
self.mpus[key_name][mpu.id] = mpu
return mpu
def cancel_multipart_upload(self, key_name, upload_id, **kwargs):
if key_name in self.mpus:
if upload_id in self.mpus[key_name]:
del self.mpus[key_name][upload_id]
if not self.mpus[key_name]:
del self.mpus[key_name]
def complete_multipart_upload(self, key_name, upload_id,
xml_body, **kwargs):
if key_name in self.mpus:
if upload_id in self.mpus[key_name]:
mpu = self.mpus[key_name][upload_id]
if mpu.verify_xml(xml_body):
key = mpu.complete_key()
self.cancel_multipart_upload(key_name, upload_id)
self.keys[key_name] = key
cmpu = mock.Mock()
cmpu.bucket = self
cmpu.bucket_name = self.name
cmpu.key_name = key_name
cmpu.etag = key.etag
return cmpu
return None # tho raising an exception might be better
fixture_buckets = {'glance': FakeBucket('glance')}
b = fixture_buckets['glance']
k = b.new_key(FAKE_UUID)
k.set_contents_from_file(six.StringIO("*" * FIVE_KB))
def fake_connection_constructor(self, *args, **kwargs):
host = kwargs.get('host')
if host.startswith('http://') or host.startswith('https://'):
raise UnsupportedBackend(host)
def fake_get_bucket(conn, bucket_id):
bucket = fixture_buckets.get(bucket_id)
if not bucket:
bucket = FakeBucket(bucket_id)
return bucket
stubs.Set(boto.s3.connection.S3Connection,
'__init__', fake_connection_constructor)
stubs.Set(boto.s3.connection.S3Connection,
'get_bucket', fake_get_bucket)
def format_s3_location(user, key, authurl, bucket, obj):
"""
Helper method that returns a S3 store URI given
the component pieces.
"""
scheme = 's3'
if authurl.startswith('https://'):
scheme = 's3+https'
authurl = authurl[8:]
elif authurl.startswith('http://'):
authurl = authurl[7:]
authurl = authurl.strip('/')
return "%s://%s:%s@%s/%s/%s" % (scheme, user, key, authurl,
bucket, obj)
class TestStore(base.StoreClearingUnitTest):
def setUp(self):
"""Establish a clean test environment"""
self.config(**S3_CONF)
super(TestStore, self).setUp()
self.stubs = stubout.StubOutForTesting()
stub_out_s3(self.stubs)
self.store = Store()
self.addCleanup(self.stubs.UnsetAll)
def test_get(self):
"""Test a "normal" retrieval of an image in chunks"""
loc = get_location_from_uri(
"s3://user:key@auth_address/glance/%s" % FAKE_UUID)
(image_s3, image_size) = self.store.get(loc)
self.assertEqual(image_size, FIVE_KB)
expected_data = "*" * FIVE_KB
data = ""
for chunk in image_s3:
data += chunk
self.assertEqual(expected_data, data)
def test_get_calling_format_path(self):
"""Test a "normal" retrieval of an image in chunks"""
self.config(s3_store_bucket_url_format='path')
def fake_S3Connection_init(*args, **kwargs):
expected_cls = boto.s3.connection.OrdinaryCallingFormat
self.assertIsInstance(kwargs.get('calling_format'), expected_cls)
self.stubs.Set(boto.s3.connection.S3Connection, '__init__',
fake_S3Connection_init)
loc = get_location_from_uri(
"s3://user:key@auth_address/glance/%s" % FAKE_UUID)
(image_s3, image_size) = self.store.get(loc)
def test_get_calling_format_default(self):
"""Test a "normal" retrieval of an image in chunks"""
def fake_S3Connection_init(*args, **kwargs):
expected_cls = boto.s3.connection.SubdomainCallingFormat
self.assertIsInstance(kwargs.get('calling_format'), expected_cls)
self.stubs.Set(boto.s3.connection.S3Connection, '__init__',
fake_S3Connection_init)
loc = get_location_from_uri(
"s3://user:key@auth_address/glance/%s" % FAKE_UUID)
(image_s3, image_size) = self.store.get(loc)
def test_get_non_existing(self):
"""
Test that trying to retrieve a s3 that doesn't exist
raises an error
"""
uri = "s3://user:key@auth_address/badbucket/%s" % FAKE_UUID
loc = get_location_from_uri(uri)
self.assertRaises(exception.NotFound, self.store.get, loc)
uri = "s3://user:key@auth_address/glance/noexist"
loc = get_location_from_uri(uri)
self.assertRaises(exception.NotFound, self.store.get, loc)
def test_add(self):
"""Test that we can add an image via the s3 backend"""
expected_image_id = str(uuid.uuid4())
expected_s3_size = FIVE_KB
expected_s3_contents = "*" * expected_s3_size
expected_checksum = hashlib.md5(expected_s3_contents).hexdigest()
expected_location = format_s3_location(
S3_CONF['s3_store_access_key'],
S3_CONF['s3_store_secret_key'],
S3_CONF['s3_store_host'],
S3_CONF['s3_store_bucket'],
expected_image_id)
image_s3 = six.StringIO(expected_s3_contents)
location, size, checksum, _ = self.store.add(expected_image_id,
image_s3,
expected_s3_size)
self.assertEqual(expected_location, location)
self.assertEqual(expected_s3_size, size)
self.assertEqual(expected_checksum, checksum)
loc = get_location_from_uri(expected_location)
(new_image_s3, new_image_size) = self.store.get(loc)
new_image_contents = six.StringIO()
for chunk in new_image_s3:
new_image_contents.write(chunk)
new_image_s3_size = new_image_contents.len
self.assertEqual(expected_s3_contents, new_image_contents.getvalue())
self.assertEqual(expected_s3_size, new_image_s3_size)
def test_add_size_variations(self):
"""
Test that adding images of various sizes which exercise both S3
single uploads and the multipart upload apis. We've configured
the big upload threshold to 5MB and the part size to 6MB.
"""
variations = [(FIVE_KB, 0), # simple put (5KB < 5MB)
(5242880, 1), # 1 part (5MB <= 5MB < 6MB)
(6291456, 1), # 1 part exact (5MB <= 6MB <= 6MB)
(7340032, 2)] # 2 parts (6MB < 7MB <= 12MB)
for (vsize, vcnt) in variations:
expected_image_id = str(uuid.uuid4())
expected_s3_size = vsize
expected_s3_contents = "12345678" * (expected_s3_size / 8)
expected_chksum = hashlib.md5(expected_s3_contents).hexdigest()
expected_location = format_s3_location(
S3_CONF['s3_store_access_key'],
S3_CONF['s3_store_secret_key'],
S3_CONF['s3_store_host'],
S3_CONF['s3_store_bucket'],
expected_image_id)
image_s3 = six.StringIO(expected_s3_contents)
# add image
location, size, chksum, _ = self.store.add(expected_image_id,
image_s3,
expected_s3_size)
self.assertEqual(expected_location, location)
self.assertEqual(expected_s3_size, size)
self.assertEqual(expected_chksum, chksum)
self.assertEqual(vcnt, mpu_parts_uploaded)
# get image
loc = get_location_from_uri(expected_location)
(new_image_s3, new_image_s3_size) = self.store.get(loc)
new_image_contents = six.StringIO()
for chunk in new_image_s3:
new_image_contents.write(chunk)
new_image_size = new_image_contents.len
self.assertEqual(expected_s3_size, new_image_s3_size)
self.assertEqual(expected_s3_size, new_image_size)
self.assertEqual(expected_s3_contents,
new_image_contents.getvalue())
def test_add_host_variations(self):
"""
Test that having http(s):// in the s3serviceurl in config
options works as expected.
"""
variations = ['http://localhost:80',
'http://localhost',
'http://localhost/v1',
'http://localhost/v1/',
'https://localhost',
'https://localhost:8080',
'https://localhost/v1',
'https://localhost/v1/',
'localhost',
'localhost:8080/v1']
for variation in variations:
expected_image_id = str(uuid.uuid4())
expected_s3_size = FIVE_KB
expected_s3_contents = "*" * expected_s3_size
expected_checksum = hashlib.md5(expected_s3_contents).hexdigest()
new_conf = S3_CONF.copy()
new_conf['s3_store_host'] = variation
expected_location = format_s3_location(
new_conf['s3_store_access_key'],
new_conf['s3_store_secret_key'],
new_conf['s3_store_host'],
new_conf['s3_store_bucket'],
expected_image_id)
image_s3 = six.StringIO(expected_s3_contents)
self.config(**new_conf)
self.store = Store()
location, size, checksum, _ = self.store.add(expected_image_id,
image_s3,
expected_s3_size)
self.assertEqual(expected_location, location)
self.assertEqual(expected_s3_size, size)
self.assertEqual(expected_checksum, checksum)
loc = get_location_from_uri(expected_location)
(new_image_s3, new_image_size) = self.store.get(loc)
new_image_contents = new_image_s3.getvalue()
new_image_s3_size = len(new_image_s3)
self.assertEqual(expected_s3_contents, new_image_contents)
self.assertEqual(expected_s3_size, new_image_s3_size)
def test_add_already_existing(self):
"""
Tests that adding an image with an existing identifier
raises an appropriate exception
"""
image_s3 = six.StringIO("nevergonnamakeit")
self.assertRaises(exception.Duplicate,
self.store.add,
FAKE_UUID, image_s3, 0)
def _option_required(self, key):
conf = S3_CONF.copy()
conf[key] = None
try:
self.config(**conf)
self.store = Store()
return self.store.add == self.store.add_disabled
except Exception:
return False
return False
def test_no_access_key(self):
"""
Tests that options without access key disables the add method
"""
self.assertTrue(self._option_required('s3_store_access_key'))
def test_no_secret_key(self):
"""
Tests that options without secret key disables the add method
"""
self.assertTrue(self._option_required('s3_store_secret_key'))
def test_no_host(self):
"""
Tests that options without host disables the add method
"""
self.assertTrue(self._option_required('s3_store_host'))
def test_delete(self):
"""
Test we can delete an existing image in the s3 store
"""
uri = "s3://user:key@auth_address/glance/%s" % FAKE_UUID
loc = get_location_from_uri(uri)
self.store.delete(loc)
self.assertRaises(exception.NotFound, self.store.get, loc)
def test_delete_non_existing(self):
"""
Test that trying to delete a s3 that doesn't exist
raises an error
"""
uri = "s3://user:key@auth_address/glance/noexist"
loc = get_location_from_uri(uri)
self.assertRaises(exception.NotFound, self.store.delete, loc)
def _do_test_get_s3_location(self, host, loc):
self.assertEqual(get_s3_location(host), loc)
self.assertEqual(get_s3_location(host + ':80'), loc)
self.assertEqual(get_s3_location('http://' + host), loc)
self.assertEqual(get_s3_location('http://' + host + ':80'), loc)
self.assertEqual(get_s3_location('https://' + host), loc)
self.assertEqual(get_s3_location('https://' + host + ':80'), loc)
def test_get_s3_good_location(self):
"""
Test that the s3 location can be derived from the host
"""
good_locations = [
('s3.amazonaws.com', ''),
('s3-eu-west-1.amazonaws.com', 'EU'),
('s3-us-west-1.amazonaws.com', 'us-west-1'),
('s3-ap-southeast-1.amazonaws.com', 'ap-southeast-1'),
('s3-ap-northeast-1.amazonaws.com', 'ap-northeast-1'),
]
for (url, expected) in good_locations:
self._do_test_get_s3_location(url, expected)
def test_get_s3_bad_location(self):
"""
Test that the s3 location cannot be derived from an unexpected host
"""
bad_locations = [
('', ''),
('s3.amazon.co.uk', ''),
('s3-govcloud.amazonaws.com', ''),
('cloudfiles.rackspace.com', ''),
]
for (url, expected) in bad_locations:
self._do_test_get_s3_location(url, expected)
def test_calling_format_path(self):
self.config(s3_store_bucket_url_format='path')
self.assertIsInstance(glance.store.s3.get_calling_format(),
boto.s3.connection.OrdinaryCallingFormat)
def test_calling_format_subdomain(self):
self.config(s3_store_bucket_url_format='subdomain')
self.assertIsInstance(glance.store.s3.get_calling_format(),
boto.s3.connection.SubdomainCallingFormat)
def test_calling_format_default(self):
self.assertIsInstance(glance.store.s3.get_calling_format(),
boto.s3.connection.SubdomainCallingFormat)

View File

@ -19,22 +19,25 @@ import tempfile
import uuid
import eventlet
import glance_store
import mox
from oslo.config import cfg
from glance.common import exception
from glance import scrubber
import glance.store
from glance.tests import utils as test_utils
CONF = cfg.CONF
class TestScrubber(test_utils.BaseTestCase):
def setUp(self):
self.data_dir = tempfile.mkdtemp()
self.config(scrubber_datadir=self.data_dir)
self.config(default_store='file')
glance.store.create_stores()
glance_store.register_opts(CONF)
glance_store.create_stores()
self.config(group='glance_store', default_store='file')
self.mox = mox.Mox()
super(TestScrubber, self).setUp()
@ -49,12 +52,12 @@ class TestScrubber(test_utils.BaseTestCase):
def _scrubber_cleanup_with_store_delete_exception(self, ex):
uri = 'file://some/path/%s' % uuid.uuid4()
id = 'helloworldid'
scrub = scrubber.Scrubber(glance.store)
scrub = scrubber.Scrubber(glance_store)
scrub.registry = self.mox.CreateMockAnything()
scrub.registry.get_image(id).AndReturn({'status': 'pending_delete'})
scrub.registry.update_image(id, {'status': 'deleted'})
self.mox.StubOutWithMock(glance.store, "delete_from_backend")
glance.store.delete_from_backend(
self.mox.StubOutWithMock(glance_store, "delete_from_backend")
glance_store.delete_from_backend(
mox.IgnoreArg(),
uri).AndRaise(ex)
self.mox.ReplayAll()
@ -66,7 +69,7 @@ class TestScrubber(test_utils.BaseTestCase):
self.assertFalse(os.path.exists(q_path))
def test_store_delete_unsupported_backend_exception(self):
ex = glance.store.UnsupportedBackend()
ex = glance_store.UnsupportedBackend()
self._scrubber_cleanup_with_store_delete_exception(ex)
def test_store_delete_notfound_exception(self):

View File

@ -1,60 +0,0 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
import stubout
from glance.common import exception
from glance.common import utils
from glance.openstack.common import processutils
import glance.store.sheepdog
from glance.store.sheepdog import Store
from glance.tests.unit import base
SHEEPDOG_CONF = {'verbose': True,
'debug': True,
'default_store': 'sheepdog'}
class TestStore(base.StoreClearingUnitTest):
def setUp(self):
"""Establish a clean test environment"""
def _fake_execute(*cmd, **kwargs):
pass
self.config(**SHEEPDOG_CONF)
super(TestStore, self).setUp()
self.stubs = stubout.StubOutForTesting()
self.stubs.Set(processutils, 'execute', _fake_execute)
self.store = Store()
self.addCleanup(self.stubs.UnsetAll)
def test_cleanup_when_add_image_exception(self):
called_commands = []
def _fake_run_command(self, command, data, *params):
called_commands.append(command)
self.stubs.Set(glance.store.sheepdog.SheepdogImage,
'_run_command', _fake_run_command)
self.assertRaises(exception.ImageSizeLimitExceeded,
self.store.add,
'fake_image_id',
utils.LimitingReader(six.StringIO('xx'), 1),
2)
self.assertEqual([['list', '-r'], ['create'], ['delete']],
called_commands)

View File

@ -1,80 +0,0 @@
# Copyright 2011-2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from glance.common import exception
from glance.store import base as store_base
from glance.tests.unit import base as test_base
class FakeUnconfigurableStoreDriver(store_base.Store):
def configure(self):
raise exception.BadStoreConfiguration("Unconfigurable store driver.")
class TestStoreBase(test_base.StoreClearingUnitTest):
class UnconfiguredStore(store_base.Store):
def add(self, image_id, image_file, image_size):
return True
def delete(self, location):
return True
def set_acls(self, location, public=False, read_tenants=None,
write_tenants=None):
return True
def get_size(self, location):
return True
def get(self, location):
return True
def add_disabled(self, *args, **kwargs):
return True
def setUp(self):
self.config(default_store='file')
super(TestStoreBase, self).setUp()
def test_create_store_exclude_unconfigurable_drivers(self):
self.config(known_stores=[
"glance.tests.unit.test_store_base.FakeUnconfigurableStoreDriver",
"glance.store.filesystem.Store"])
count = self._create_stores(passing_config=True)
self.assertEqual(9, count)
count = self._create_stores(passing_config=False)
# Sheepdog and vshpere store driver
# needs to use default configure()
# to handle essential options.
self.assertEqual(7, count)
def test_create_store_not_configured(self):
store = self.UnconfiguredStore(configure=False)
self.assertRaises(exception.StoreNotConfigured, store.add)
self.assertRaises(exception.StoreNotConfigured, store.get)
self.assertRaises(exception.StoreNotConfigured, store.get_size)
self.assertRaises(exception.StoreNotConfigured, store.add_disabled)
self.assertRaises(exception.StoreNotConfigured, store.delete)
self.assertRaises(exception.StoreNotConfigured, store.set_acls)
def test_create_store_configured(self):
store = self.UnconfiguredStore(configure=True)
self.assertTrue(store.add)
self.assertTrue(store.get)
self.assertTrue(store.get_size)
self.assertTrue(store.add_disabled)
self.assertTrue(store.delete)
self.assertTrue(store.set_acls)

View File

@ -14,9 +14,10 @@
# under the License.
import mox
import glance_store
from glance.common import exception
import glance.location
import glance.store
from glance.tests.unit import utils as unit_test_utils
from glance.tests import utils
@ -93,11 +94,11 @@ class TestStoreImage(utils.BaseTestCase):
self.store_api, self.store_utils)
location = image.locations[0]
self.assertEqual(image.status, 'active')
self.store_api.get_from_backend({}, location['url'])
self.store_api.get_from_backend(location['url'], context={})
image.delete()
self.assertEqual(image.status, 'deleted')
self.assertRaises(exception.NotFound,
self.store_api.get_from_backend, {}, location['url'])
self.assertRaises(glance_store.NotFound,
self.store_api.get_from_backend, location['url'], {})
def test_image_get_data(self):
image = glance.location.ImageProxy(self.image_stub, {},
@ -105,7 +106,7 @@ class TestStoreImage(utils.BaseTestCase):
self.assertEqual(image.get_data(), 'XXX')
def test_image_get_data_from_second_location(self):
def fake_get_from_backend(self, context, location):
def fake_get_from_backend(self, location, context=None):
if UUID1 in location:
raise Exception('not allow download from %s' % location)
else:
@ -161,9 +162,9 @@ class TestStoreImage(utils.BaseTestCase):
self.assertEqual(image.status, 'active')
image.delete()
self.assertEqual(image.status, 'deleted')
self.assertRaises(exception.NotFound,
self.store_api.get_from_backend, {},
image.locations[0]['url'])
self.assertRaises(glance_store.NotFound,
self.store_api.get_from_backend,
image.locations[0]['url'], {})
def test_image_set_data_unknown_size(self):
context = glance.context.RequestContext(user=USER1)
@ -178,9 +179,9 @@ class TestStoreImage(utils.BaseTestCase):
self.assertEqual(image.status, 'active')
image.delete()
self.assertEqual(image.status, 'deleted')
self.assertRaises(exception.NotFound,
self.store_api.get_from_backend, {},
image.locations[0]['url'])
self.assertRaises(glance_store.NotFound,
self.store_api.get_from_backend,
image.locations[0]['url'], context={})
def _add_image(self, context, image_id, data, len):
image_stub = ImageStub(image_id, status='queued', locations=[])
@ -225,7 +226,7 @@ class TestStoreImage(utils.BaseTestCase):
# check below cases within 'TestStoreMetaDataChecker'.
location_bad = {'url': UUID3, 'metadata': "a invalid metadata"}
self.assertRaises(glance.store.BackendException,
self.assertRaises(glance_store.BackendException,
image1.locations.append, location_bad)
image1.delete()
@ -314,7 +315,7 @@ class TestStoreImage(utils.BaseTestCase):
location_bad = {'url': UUID3, 'metadata': "a invalid metadata"}
self.assertRaises(glance.store.BackendException,
self.assertRaises(glance_store.BackendException,
image1.locations.extend, [location_bad])
image1.delete()
@ -416,7 +417,7 @@ class TestStoreImage(utils.BaseTestCase):
location_bad = {'url': UUID3, 'metadata': "a invalid metadata"}
self.assertRaises(glance.store.BackendException,
self.assertRaises(glance_store.BackendException,
image1.locations.insert, 0, location_bad)
image1.delete()
@ -510,7 +511,7 @@ class TestStoreImage(utils.BaseTestCase):
location_bad = {'url': UUID2, 'metadata': "a invalid metadata"}
self.assertRaises(glance.store.BackendException,
self.assertRaises(glance_store.BackendException,
image2.locations.__iadd__, [location_bad])
self.assertEqual(image_stub2.locations, [])
self.assertEqual(image2.locations, [])
@ -790,43 +791,43 @@ class TestImageFactory(utils.BaseTestCase):
class TestStoreMetaDataChecker(utils.BaseTestCase):
def test_empty(self):
glance.store.check_location_metadata({})
glance_store.check_location_metadata({})
def test_unicode(self):
m = {'key': u'somevalue'}
glance.store.check_location_metadata(m)
glance_store.check_location_metadata(m)
def test_unicode_list(self):
m = {'key': [u'somevalue', u'2']}
glance.store.check_location_metadata(m)
glance_store.check_location_metadata(m)
def test_unicode_dict(self):
inner = {'key1': u'somevalue', 'key2': u'somevalue'}
m = {'topkey': inner}
glance.store.check_location_metadata(m)
glance_store.check_location_metadata(m)
def test_unicode_dict_list(self):
inner = {'key1': u'somevalue', 'key2': u'somevalue'}
m = {'topkey': inner, 'list': [u'somevalue', u'2'], 'u': u'2'}
glance.store.check_location_metadata(m)
glance_store.check_location_metadata(m)
def test_nested_dict(self):
inner = {'key1': u'somevalue', 'key2': u'somevalue'}
inner = {'newkey': inner}
inner = {'anotherkey': inner}
m = {'topkey': inner}
glance.store.check_location_metadata(m)
glance_store.check_location_metadata(m)
def test_simple_bad(self):
m = {'key1': object()}
self.assertRaises(glance.store.BackendException,
glance.store.check_location_metadata,
self.assertRaises(glance_store.BackendException,
glance_store.check_location_metadata,
m)
def test_list_bad(self):
m = {'key1': [u'somevalue', object()]}
self.assertRaises(glance.store.BackendException,
glance.store.check_location_metadata,
self.assertRaises(glance_store.BackendException,
glance_store.check_location_metadata,
m)
def test_nested_dict_bad(self):
@ -835,8 +836,8 @@ class TestStoreMetaDataChecker(utils.BaseTestCase):
inner = {'anotherkey': inner}
m = {'topkey': inner}
self.assertRaises(glance.store.BackendException,
glance.store.check_location_metadata,
self.assertRaises(glance_store.BackendException,
glance_store.check_location_metadata,
m)
@ -856,36 +857,36 @@ class TestStoreAddToBackend(utils.BaseTestCase):
self.mox.UnsetStubs()
def _bad_metadata(self, in_metadata):
store = self.mox.CreateMockAnything()
store.add(self.image_id, mox.IgnoreArg(), self.size).AndReturn(
mstore = self.mox.CreateMockAnything()
mstore.add(self.image_id, mox.IgnoreArg(), self.size).AndReturn(
(self.location, self.size, self.checksum, in_metadata))
store.__str__ = lambda: "hello"
store.__unicode__ = lambda: "hello"
mstore.__str__ = lambda: "hello"
mstore.__unicode__ = lambda: "hello"
self.mox.ReplayAll()
self.assertRaises(glance.store.BackendException,
glance.store.store_add_to_backend,
self.assertRaises(glance_store.BackendException,
glance_store.store_add_to_backend,
self.image_id,
self.data,
self.size,
store)
mstore)
self.mox.VerifyAll()
def _good_metadata(self, in_metadata):
store = self.mox.CreateMockAnything()
store.add(self.image_id, mox.IgnoreArg(), self.size).AndReturn(
mstore = self.mox.CreateMockAnything()
mstore.add(self.image_id, mox.IgnoreArg(), self.size).AndReturn(
(self.location, self.size, self.checksum, in_metadata))
self.mox.ReplayAll()
(location,
size,
checksum,
metadata) = glance.store.store_add_to_backend(self.image_id,
metadata) = glance_store.store_add_to_backend(self.image_id,
self.data,
self.size,
store)
mstore)
self.mox.VerifyAll()
self.assertEqual(self.location, location)
self.assertEqual(self.size, size)
@ -940,8 +941,8 @@ class TestStoreAddToBackend(utils.BaseTestCase):
self.mox.ReplayAll()
self.assertRaises(glance.store.BackendException,
glance.store.store_add_to_backend,
self.assertRaises(glance_store.BackendException,
glance_store.store_add_to_backend,
self.image_id,
self.data,
self.size,

View File

@ -13,19 +13,11 @@
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import mock
from glance.common import exception
from glance import context
import glance_store
import glance.location
import glance.store
import glance.store.filesystem
import glance.store.http
import glance.store.location as location
import glance.store.s3
import glance.store.swift
import glance.store.vmware_datastore
from glance.tests.unit import base
@ -40,516 +32,18 @@ CONF = {'default_store': 'file',
class TestStoreLocation(base.StoreClearingUnitTest):
def setUp(self):
self.config(default_store='file')
# NOTE(flaper87): Each store should test
# this in their test suite.
self.config(known_stores=[
"glance.store.filesystem.Store",
"glance.store.http.Store",
"glance.store.rbd.Store",
"glance.store.s3.Store",
"glance.store.swift.Store",
"glance.store.sheepdog.Store",
"glance.store.cinder.Store",
"glance.store.gridfs.Store",
"glance.store.vmware_datastore.Store",
])
conf = CONF.copy()
self.config(**conf)
reload(glance.store.swift)
super(TestStoreLocation, self).setUp()
def test_get_location_from_uri_back_to_uri(self):
"""
Test that for various URIs, the correct Location
object can be constructed and then the original URI
returned via the get_store_uri() method.
"""
good_store_uris = [
'https://user:pass@example.com:80/images/some-id',
'http://images.oracle.com/123456',
'swift://account%3Auser:pass@authurl.com/container/obj-id',
'swift://storeurl.com/container/obj-id',
'swift+https://account%3Auser:pass@authurl.com/container/obj-id',
's3://accesskey:secretkey@s3.amazonaws.com/bucket/key-id',
's3://accesskey:secretwith/aslash@s3.amazonaws.com/bucket/key-id',
's3+http://accesskey:secret@s3.amazonaws.com/bucket/key-id',
's3+https://accesskey:secretkey@s3.amazonaws.com/bucket/key-id',
'file:///var/lib/glance/images/1',
'rbd://imagename',
'rbd://fsid/pool/image/snap',
'rbd://%2F/%2F/%2F/%2F',
'sheepdog://244e75f1-9c69-4167-9db7-1aa7d1973f6c',
'cinder://12345678-9012-3455-6789-012345678901',
'vsphere://ip/folder/openstack_glance/2332298?dcPath=dc&dsName=ds',
]
for uri in good_store_uris:
loc = location.get_location_from_uri(uri)
# The get_store_uri() method *should* return an identical URI
# to the URI that is passed to get_location_from_uri()
self.assertEqual(loc.get_store_uri(), uri)
def test_bad_store_scheme(self):
"""
Test that a URI with a non-existing scheme triggers exception
"""
bad_uri = 'unknown://user:pass@example.com:80/images/some-id'
self.assertRaises(exception.UnknownScheme,
location.get_location_from_uri,
bad_uri)
def test_filesystem_store_location(self):
"""
Test the specific StoreLocation for the Filesystem store
"""
uri = 'file:///var/lib/glance/images/1'
loc = glance.store.filesystem.StoreLocation({})
loc.parse_uri(uri)
self.assertEqual("file", loc.scheme)
self.assertEqual("/var/lib/glance/images/1", loc.path)
self.assertEqual(uri, loc.get_uri())
bad_uri = 'fil://'
self.assertRaises(AssertionError, loc.parse_uri, bad_uri)
bad_uri = 'file://'
self.assertRaises(exception.BadStoreUri, loc.parse_uri, bad_uri)
def test_http_store_location(self):
"""
Test the specific StoreLocation for the HTTP store
"""
uri = 'http://example.com/images/1'
loc = glance.store.http.StoreLocation({})
loc.parse_uri(uri)
self.assertEqual("http", loc.scheme)
self.assertEqual("example.com", loc.netloc)
self.assertEqual("/images/1", loc.path)
self.assertEqual(uri, loc.get_uri())
uri = 'https://example.com:8080/images/container/1'
loc.parse_uri(uri)
self.assertEqual("https", loc.scheme)
self.assertEqual("example.com:8080", loc.netloc)
self.assertEqual("/images/container/1", loc.path)
self.assertEqual(uri, loc.get_uri())
uri = 'https://user:password@example.com:8080/images/container/1'
loc.parse_uri(uri)
self.assertEqual("https", loc.scheme)
self.assertEqual("example.com:8080", loc.netloc)
self.assertEqual("user", loc.user)
self.assertEqual("password", loc.password)
self.assertEqual("/images/container/1", loc.path)
self.assertEqual(uri, loc.get_uri())
uri = 'https://user:@example.com:8080/images/1'
loc.parse_uri(uri)
self.assertEqual("https", loc.scheme)
self.assertEqual("example.com:8080", loc.netloc)
self.assertEqual("user", loc.user)
self.assertEqual("", loc.password)
self.assertEqual("/images/1", loc.path)
self.assertEqual(uri, loc.get_uri())
bad_uri = 'htt://'
self.assertRaises(AssertionError, loc.parse_uri, bad_uri)
bad_uri = 'http://'
self.assertRaises(exception.BadStoreUri, loc.parse_uri, bad_uri)
bad_uri = 'http://user@example.com:8080/images/1'
self.assertRaises(exception.BadStoreUri, loc.parse_uri, bad_uri)
def test_swift_store_location(self):
"""
Test the specific StoreLocation for the Swift store
"""
uri = 'swift+config://store_1/images/1'
loc = glance.store.swift.StoreLocation({})
loc.parse_uri(uri)
self.assertEqual("swift+config", loc.scheme)
self.assertEqual("localhost:8080", loc.auth_or_store_url)
self.assertEqual("https://localhost:8080", loc.swift_url)
self.assertEqual("images", loc.container)
self.assertEqual("1", loc.obj)
self.assertEqual('user', loc.user)
self.assertEqual('swift+https://user:key@localhost:8080/images/1',
loc.get_uri())
conf_file = "glance-swift.conf"
test_dir = self.useFixture(fixtures.TempDir()).path
self.swift_config_file = self._copy_data_file(conf_file, test_dir)
conf = CONF.copy()
conf.update({'swift_store_config_file': self.swift_config_file})
self.config(**conf)
reload(glance.store.swift)
uri = 'swift+config://store_2/images/1'
loc = glance.store.swift.StoreLocation({})
loc.parse_uri(uri)
self.assertEqual("swift+config", loc.scheme)
self.assertEqual("localhost:8080", loc.auth_or_store_url)
self.assertEqual("https://localhost:8080", loc.swift_url)
self.assertEqual("images", loc.container)
self.assertEqual("1", loc.obj)
self.assertEqual('tenant:user1', loc.user)
self.assertEqual('key1', loc.key)
self.assertEqual('swift+https://tenant%3Auser1:key1@localhost:8080'
'/images/1',
loc.get_uri())
uri = 'swift://example.com/images/1'
loc = glance.store.swift.StoreLocation({})
loc.parse_uri(uri)
self.assertEqual("swift", loc.scheme)
self.assertEqual("example.com", loc.auth_or_store_url)
self.assertEqual("https://example.com", loc.swift_url)
self.assertEqual("images", loc.container)
self.assertEqual("1", loc.obj)
self.assertIsNone(loc.user)
self.assertEqual(uri, loc.get_uri())
uri = 'swift+https://user:pass@authurl.com/images/1'
loc.parse_uri(uri)
self.assertEqual("swift+https", loc.scheme)
self.assertEqual("authurl.com", loc.auth_or_store_url)
self.assertEqual("https://authurl.com", loc.swift_url)
self.assertEqual("images", loc.container)
self.assertEqual("1", loc.obj)
self.assertEqual("user", loc.user)
self.assertEqual("pass", loc.key)
self.assertEqual(uri, loc.get_uri())
uri = 'swift+https://user:pass@authurl.com/v1/container/12345'
loc.parse_uri(uri)
self.assertEqual("swift+https", loc.scheme)
self.assertEqual("authurl.com/v1", loc.auth_or_store_url)
self.assertEqual("https://authurl.com/v1", loc.swift_url)
self.assertEqual("container", loc.container)
self.assertEqual("12345", loc.obj)
self.assertEqual("user", loc.user)
self.assertEqual("pass", loc.key)
self.assertEqual(uri, loc.get_uri())
uri = ('swift+http://a%3Auser%40example.com:p%40ss@authurl.com/'
'v1/container/12345')
loc.parse_uri(uri)
self.assertEqual("swift+http", loc.scheme)
self.assertEqual("authurl.com/v1", loc.auth_or_store_url)
self.assertEqual("http://authurl.com/v1", loc.swift_url)
self.assertEqual("container", loc.container)
self.assertEqual("12345", loc.obj)
self.assertEqual("a:user@example.com", loc.user)
self.assertEqual("p@ss", loc.key)
self.assertEqual(uri, loc.get_uri())
# multitenant puts store URL in the location (not auth)
uri = ('swift+http://storeurl.com/v1/container/12345')
loc.parse_uri(uri)
self.assertEqual("swift+http", loc.scheme)
self.assertEqual("storeurl.com/v1", loc.auth_or_store_url)
self.assertEqual("http://storeurl.com/v1", loc.swift_url)
self.assertEqual("container", loc.container)
self.assertEqual("12345", loc.obj)
self.assertIsNone(loc.user)
self.assertIsNone(loc.key)
self.assertEqual(uri, loc.get_uri())
bad_uri = 'swif://'
self.assertRaises(AssertionError, loc.parse_uri, bad_uri)
bad_uri = 'swift://'
self.assertRaises(exception.BadStoreUri, loc.parse_uri, bad_uri)
bad_uri = 'swift://user@example.com:8080/images/1'
self.assertRaises(exception.BadStoreUri, loc.parse_uri, bad_uri)
bad_uri = 'swift://user:pass@http://example.com:8080/images/1'
self.assertRaises(exception.BadStoreUri, loc.parse_uri, bad_uri)
def test_s3_store_location(self):
"""
Test the specific StoreLocation for the S3 store
"""
uri = 's3://example.com/images/1'
loc = glance.store.s3.StoreLocation({})
loc.parse_uri(uri)
self.assertEqual("s3", loc.scheme)
self.assertEqual("example.com", loc.s3serviceurl)
self.assertEqual("images", loc.bucket)
self.assertEqual("1", loc.key)
self.assertIsNone(loc.accesskey)
self.assertEqual(uri, loc.get_uri())
uri = 's3+https://accesskey:pass@s3serviceurl.com/images/1'
loc.parse_uri(uri)
self.assertEqual("s3+https", loc.scheme)
self.assertEqual("s3serviceurl.com", loc.s3serviceurl)
self.assertEqual("images", loc.bucket)
self.assertEqual("1", loc.key)
self.assertEqual("accesskey", loc.accesskey)
self.assertEqual("pass", loc.secretkey)
self.assertEqual(uri, loc.get_uri())
uri = 's3+https://accesskey:pass@s3serviceurl.com/v1/bucket/12345'
loc.parse_uri(uri)
self.assertEqual("s3+https", loc.scheme)
self.assertEqual("s3serviceurl.com/v1", loc.s3serviceurl)
self.assertEqual("bucket", loc.bucket)
self.assertEqual("12345", loc.key)
self.assertEqual("accesskey", loc.accesskey)
self.assertEqual("pass", loc.secretkey)
self.assertEqual(uri, loc.get_uri())
uri = 's3://accesskey:pass/withslash@s3serviceurl.com/v1/bucket/12345'
loc.parse_uri(uri)
self.assertEqual("s3", loc.scheme)
self.assertEqual("s3serviceurl.com/v1", loc.s3serviceurl)
self.assertEqual("bucket", loc.bucket)
self.assertEqual("12345", loc.key)
self.assertEqual("accesskey", loc.accesskey)
self.assertEqual("pass/withslash", loc.secretkey)
self.assertEqual(uri, loc.get_uri())
bad_uri = 's://'
self.assertRaises(AssertionError, loc.parse_uri, bad_uri)
bad_uri = 's3://'
self.assertRaises(exception.BadStoreUri, loc.parse_uri, bad_uri)
bad_uri = 's3://accesskey@example.com:8080/images/1'
self.assertRaises(exception.BadStoreUri, loc.parse_uri, bad_uri)
bad_uri = 's3://user:pass@http://example.com:8080/images/1'
self.assertRaises(exception.BadStoreUri, loc.parse_uri, bad_uri)
def test_rbd_store_location(self):
"""
Test the specific StoreLocation for the RBD store
"""
uri = 'rbd://imagename'
loc = glance.store.rbd.StoreLocation({})
loc.parse_uri(uri)
self.assertEqual('imagename', loc.image)
self.assertIsNone(loc.fsid)
self.assertIsNone(loc.pool)
self.assertIsNone(loc.snapshot)
uri = u'rbd://imagename'
loc = glance.store.rbd.StoreLocation({})
loc.parse_uri(uri)
self.assertEqual('imagename', loc.image)
self.assertIsNone(loc.fsid)
self.assertIsNone(loc.pool)
self.assertIsNone(loc.snapshot)
uri = 'rbd://fsid/pool/image/snap'
loc = glance.store.rbd.StoreLocation({})
loc.parse_uri(uri)
self.assertEqual('image', loc.image)
self.assertEqual('fsid', loc.fsid)
self.assertEqual('pool', loc.pool)
self.assertEqual('snap', loc.snapshot)
uri = u'rbd://fsid/pool/image/snap'
loc = glance.store.rbd.StoreLocation({})
loc.parse_uri(uri)
self.assertEqual('image', loc.image)
self.assertEqual('fsid', loc.fsid)
self.assertEqual('pool', loc.pool)
self.assertEqual('snap', loc.snapshot)
uri = 'rbd://%2f/%2f/%2f/%2f'
loc = glance.store.rbd.StoreLocation({})
loc.parse_uri(uri)
self.assertEqual('/', loc.image)
self.assertEqual('/', loc.fsid)
self.assertEqual('/', loc.pool)
self.assertEqual('/', loc.snapshot)
uri = u'rbd://%2f/%2f/%2f/%2f'
loc = glance.store.rbd.StoreLocation({})
loc.parse_uri(uri)
self.assertEqual('/', loc.image)
self.assertEqual('/', loc.fsid)
self.assertEqual('/', loc.pool)
self.assertEqual('/', loc.snapshot)
bad_uri = 'rbd:/image'
self.assertRaises(exception.BadStoreUri, loc.parse_uri, bad_uri)
bad_uri = 'rbd://image/extra'
self.assertRaises(exception.BadStoreUri, loc.parse_uri, bad_uri)
bad_uri = 'rbd://image/'
self.assertRaises(exception.BadStoreUri, loc.parse_uri, bad_uri)
bad_uri = 'http://image'
self.assertRaises(exception.BadStoreUri, loc.parse_uri, bad_uri)
bad_uri = 'http://fsid/pool/image/snap'
self.assertRaises(exception.BadStoreUri, loc.parse_uri, bad_uri)
bad_uri = 'rbd://fsid/pool/image/'
self.assertRaises(exception.BadStoreUri, loc.parse_uri, bad_uri)
bad_uri = 'rbd://fsid/pool/image/snap/'
self.assertRaises(exception.BadStoreUri, loc.parse_uri, bad_uri)
bad_uri = 'http://///'
self.assertRaises(exception.BadStoreUri, loc.parse_uri, bad_uri)
bad_uri = 'rbd://' + unichr(300)
self.assertRaises(exception.BadStoreUri, loc.parse_uri, bad_uri)
def test_sheepdog_store_location(self):
"""
Test the specific StoreLocation for the Sheepdog store
"""
uri = 'sheepdog://244e75f1-9c69-4167-9db7-1aa7d1973f6c'
loc = glance.store.sheepdog.StoreLocation({})
loc.parse_uri(uri)
self.assertEqual('244e75f1-9c69-4167-9db7-1aa7d1973f6c', loc.image)
bad_uri = 'sheepdog:/244e75f1-9c69-4167-9db7-1aa7d1973f6c'
self.assertRaises(exception.BadStoreUri, loc.parse_uri, bad_uri)
bad_uri = 'http://244e75f1-9c69-4167-9db7-1aa7d1973f6c'
self.assertRaises(exception.BadStoreUri, loc.parse_uri, bad_uri)
bad_uri = 'image; name'
self.assertRaises(exception.BadStoreUri, loc.parse_uri, bad_uri)
def test_vmware_store_location(self):
"""
Test the specific StoreLocation for the VMware store
"""
ds_url_prefix = glance.store.vmware_datastore.DS_URL_PREFIX
image_dir = glance.store.vmware_datastore.DEFAULT_STORE_IMAGE_DIR
uri = ('vsphere://127.0.0.1%s%s/29038321?dcPath=my-dc&dsName=my-ds' %
(ds_url_prefix, image_dir))
loc = glance.store.vmware_datastore.StoreLocation({})
loc.parse_uri(uri)
self.assertEqual("vsphere", loc.scheme)
self.assertEqual("127.0.0.1", loc.server_host)
self.assertEqual("%s%s/29038321" %
(ds_url_prefix, image_dir), loc.path)
self.assertEqual("dcPath=my-dc&dsName=my-ds", loc.query)
self.assertEqual(uri, loc.get_uri())
bad_uri = 'vphere://'
self.assertRaises(exception.BadStoreUri, loc.parse_uri, bad_uri)
bad_uri = ('vspheer://127.0.0.1%s%s/29038321?dcPath=my-dc&dsName=my-ds'
% (ds_url_prefix, image_dir))
self.assertRaises(exception.BadStoreUri, loc.parse_uri, bad_uri)
bad_uri = ('http://127.0.0.1%s%s/29038321?dcPath=my-dc&dsName=my-ds'
% (ds_url_prefix, image_dir))
self.assertRaises(exception.BadStoreUri, loc.parse_uri, bad_uri)
bad_uri = ('vsphere:/127.0.0.1%s%s/29038321?dcPath=my-dc&dsName=my-ds'
% (ds_url_prefix, image_dir))
self.assertRaises(exception.BadStoreUri, loc.parse_uri, bad_uri)
bad_uri = ('vsphere://127.0.0.1%s%s/29038321?dcPath=my-dc&dsName=my-ds'
% (ds_url_prefix, "/folder_not_in_configuration"))
self.assertRaises(exception.BadStoreUri, loc.parse_uri, bad_uri)
bad_uri = ('vsphere://127.0.0.1%s%s/29038321?dcPath=my-dc&dsName=my-ds'
% ("/wrong_folder_path", image_dir))
self.assertRaises(exception.BadStoreUri, loc.parse_uri, bad_uri)
def test_cinder_store_good_location(self):
"""
Test the specific StoreLocation for the Cinder store
"""
good_uri = 'cinder://12345678-9012-3455-6789-012345678901'
loc = glance.store.cinder.StoreLocation({})
loc.parse_uri(good_uri)
self.assertEqual('12345678-9012-3455-6789-012345678901', loc.volume_id)
def test_cinder_store_bad_location(self):
"""
Test the specific StoreLocation for the Cinder store
"""
bad_uri = 'cinder://volume-id-is-a-uuid'
loc = glance.store.cinder.StoreLocation({})
self.assertRaises(exception.BadStoreUri, loc.parse_uri, bad_uri)
def test_get_store_from_scheme(self):
"""
Test that the backend returned by glance.store.get_backend_class
is correct or raises an appropriate error.
"""
good_results = {
'swift': glance.store.swift.SingleTenantStore,
'swift+http': glance.store.swift.SingleTenantStore,
'swift+https': glance.store.swift.SingleTenantStore,
's3': glance.store.s3.Store,
's3+http': glance.store.s3.Store,
's3+https': glance.store.s3.Store,
'file': glance.store.filesystem.Store,
'filesystem': glance.store.filesystem.Store,
'http': glance.store.http.Store,
'https': glance.store.http.Store,
'rbd': glance.store.rbd.Store,
'sheepdog': glance.store.sheepdog.Store,
'cinder': glance.store.cinder.Store,
'vsphere': glance.store.vmware_datastore.Store}
ctx = context.RequestContext()
for scheme, store in good_results.items():
store_obj = glance.store.get_store_from_scheme(ctx, scheme)
self.assertEqual(store_obj.__class__, store)
bad_results = ['fil', 'swift+h', 'unknown']
for store in bad_results:
self.assertRaises(exception.UnknownScheme,
glance.store.get_store_from_scheme,
ctx,
store)
def test_add_location_for_image_without_size(self):
class FakeImageProxy():
size = None
context = None
store_api = mock.Mock()
def fake_get_size_from_backend(context, uri):
def fake_get_size_from_backend(uri, context=None):
return 1
self.stubs.Set(glance.store, 'get_size_from_backend',
self.stubs.Set(glance_store, 'get_size_from_backend',
fake_get_size_from_backend)
with mock.patch('glance.location._check_image_location'):
loc1 = {'url': 'file:///fake1.img.tar.gz', 'metadata': {}}
loc2 = {'url': 'file:///fake2.img.tar.gz', 'metadata': {}}

File diff suppressed because it is too large Load Diff

View File

@ -1,394 +0,0 @@
# Copyright 2014 OpenStack, LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests the VMware Datastore backend store"""
import hashlib
import uuid
import mock
import six
from glance.common import exception
from glance.openstack.common import units
from glance.store.location import get_location_from_uri
import glance.store.vmware_datastore as vm_store
from glance.store.vmware_datastore import Store
from glance.tests.unit import base
from glance.tests.unit import utils as unit_utils
from glance.tests import utils
FAKE_UUID = str(uuid.uuid4())
FIVE_KB = 5 * units.Ki
VMWARE_DATASTORE_CONF = {
'verbose': True,
'debug': True,
'known_stores': ['glance.store.vmware_datastore.Store'],
'default_store': 'vsphere',
'vmware_server_host': '127.0.0.1',
'vmware_server_username': 'username',
'vmware_server_password': 'password',
'vmware_datacenter_path': 'dc1',
'vmware_datastore_name': 'ds1',
'vmware_store_image_dir': '/openstack_glance',
'vmware_api_insecure': 'True',
'vmware_api_retry_count': 10
}
def format_location(host_ip, folder_name,
image_id, datacenter_path, datastore_name):
"""
Helper method that returns a VMware Datastore store URI given
the component pieces.
"""
scheme = 'vsphere'
return ("%s://%s/folder%s/%s?dsName=%s&dcPath=%s"
% (scheme, host_ip, folder_name,
image_id, datastore_name, datacenter_path))
class FakeHTTPConnection(object):
def __init__(self, status=200, *args, **kwargs):
self.status = status
pass
def getresponse(self):
return utils.FakeHTTPResponse(status=self.status)
def request(self, *_args, **_kwargs):
pass
def close(self):
pass
class TestStore(base.StoreClearingUnitTest):
@mock.patch('oslo.vmware.api.VMwareAPISession', autospec=True)
def setUp(self, mock_session):
"""Establish a clean test environment"""
self.config(default_store='file')
# NOTE(flaper87): Each store should test
# this in their test suite.
self.config(known_stores=VMWARE_DATASTORE_CONF['known_stores'])
super(TestStore, self).setUp()
Store.READ_CHUNKSIZE = 2
self.store = Store()
class FakeSession:
def __init__(self):
self.vim = FakeVim()
class FakeVim:
def __init__(self):
self.client = FakeClient()
class FakeClient:
def __init__(self):
self.options = FakeOptions()
class FakeOptions:
def __init__(self):
self.transport = FakeTransport()
class FakeTransport:
def __init__(self):
self.cookiejar = FakeCookieJar()
class FakeCookieJar:
pass
self.store.scheme = VMWARE_DATASTORE_CONF['default_store']
self.store.server_host = (
VMWARE_DATASTORE_CONF['vmware_server_host'])
self.store.datacenter_path = (
VMWARE_DATASTORE_CONF['vmware_datacenter_path'])
self.store.datastore_name = (
VMWARE_DATASTORE_CONF['vmware_datastore_name'])
self.store.api_insecure = (
VMWARE_DATASTORE_CONF['vmware_api_insecure'])
self.store.api_retry_count = (
VMWARE_DATASTORE_CONF['vmware_api_retry_count'])
self.store._session = FakeSession()
self.store._session.invoke_api = mock.Mock()
self.store._session.wait_for_task = mock.Mock()
self.store.store_image_dir = (
VMWARE_DATASTORE_CONF['vmware_store_image_dir'])
Store._build_vim_cookie_header = mock.Mock()
self.addCleanup(self.stubs.UnsetAll)
def test_get(self):
"""Test a "normal" retrieval of an image in chunks"""
expected_image_size = 31
expected_returns = ['I ', 'am', ' a', ' t', 'ea', 'po', 't,', ' s',
'ho', 'rt', ' a', 'nd', ' s', 'to', 'ut', '\n']
loc = get_location_from_uri(
"vsphere://127.0.0.1/folder/openstack_glance/%s"
"?dsName=ds1&dcPath=dc1" % FAKE_UUID)
with mock.patch('httplib.HTTPConnection') as HttpConn:
HttpConn.return_value = FakeHTTPConnection()
(image_file, image_size) = self.store.get(loc)
self.assertEqual(image_size, expected_image_size)
chunks = [c for c in image_file]
self.assertEqual(chunks, expected_returns)
def test_get_non_existing(self):
"""
Test that trying to retrieve an image that doesn't exist
raises an error
"""
loc = get_location_from_uri("vsphere://127.0.0.1/folder/openstack_glan"
"ce/%s?dsName=ds1&dcPath=dc1" % FAKE_UUID)
with mock.patch('httplib.HTTPConnection') as HttpConn:
HttpConn.return_value = FakeHTTPConnection(status=404)
self.assertRaises(exception.NotFound, self.store.get, loc)
@mock.patch.object(vm_store._Reader, 'size')
def test_add(self, fake_size):
"""Test that we can add an image via the VMware backend"""
expected_image_id = str(uuid.uuid4())
expected_size = FIVE_KB
expected_contents = "*" * expected_size
hash_code = hashlib.md5(expected_contents)
expected_checksum = hash_code.hexdigest()
fake_size.__get__ = mock.Mock(return_value=expected_size)
with mock.patch('hashlib.md5') as md5:
md5.return_value = hash_code
expected_location = format_location(
VMWARE_DATASTORE_CONF['vmware_server_host'],
VMWARE_DATASTORE_CONF['vmware_store_image_dir'],
expected_image_id,
VMWARE_DATASTORE_CONF['vmware_datacenter_path'],
VMWARE_DATASTORE_CONF['vmware_datastore_name'])
image = six.StringIO(expected_contents)
with mock.patch('httplib.HTTPConnection') as HttpConn:
HttpConn.return_value = FakeHTTPConnection()
location, size, checksum, _ = self.store.add(expected_image_id,
image,
expected_size)
self.assertEqual(unit_utils.sort_url_by_qs_keys(expected_location),
unit_utils.sort_url_by_qs_keys(location))
self.assertEqual(expected_size, size)
self.assertEqual(expected_checksum, checksum)
@mock.patch.object(vm_store._Reader, 'size')
def test_add_size_zero(self, fake_size):
"""
Test that when specifying size zero for the image to add,
the actual size of the image is returned.
"""
expected_image_id = str(uuid.uuid4())
expected_size = FIVE_KB
expected_contents = "*" * expected_size
hash_code = hashlib.md5(expected_contents)
expected_checksum = hash_code.hexdigest()
fake_size.__get__ = mock.Mock(return_value=expected_size)
with mock.patch('hashlib.md5') as md5:
md5.return_value = hash_code
expected_location = format_location(
VMWARE_DATASTORE_CONF['vmware_server_host'],
VMWARE_DATASTORE_CONF['vmware_store_image_dir'],
expected_image_id,
VMWARE_DATASTORE_CONF['vmware_datacenter_path'],
VMWARE_DATASTORE_CONF['vmware_datastore_name'])
image = six.StringIO(expected_contents)
with mock.patch('httplib.HTTPConnection') as HttpConn:
HttpConn.return_value = FakeHTTPConnection()
location, size, checksum, _ = self.store.add(expected_image_id,
image, 0)
self.assertEqual(unit_utils.sort_url_by_qs_keys(expected_location),
unit_utils.sort_url_by_qs_keys(location))
self.assertEqual(expected_size, size)
self.assertEqual(expected_checksum, checksum)
def test_delete(self):
"""Test we can delete an existing image in the VMware store"""
loc = get_location_from_uri(
"vsphere://127.0.0.1/folder/openstack_glance/%s?"
"dsName=ds1&dcPath=dc1" % FAKE_UUID)
with mock.patch('httplib.HTTPConnection') as HttpConn:
HttpConn.return_value = FakeHTTPConnection()
Store._service_content = mock.Mock()
self.store.delete(loc)
with mock.patch('httplib.HTTPConnection') as HttpConn:
HttpConn.return_value = FakeHTTPConnection(status=404)
self.assertRaises(exception.NotFound, self.store.get, loc)
def test_get_size(self):
"""Test we can get the size of an existing image in the VMware store"""
loc = get_location_from_uri(
"vsphere://127.0.0.1/folder/openstack_glance/%s"
"?dsName=ds1&dcPath=dc1" % FAKE_UUID)
with mock.patch('httplib.HTTPConnection') as HttpConn:
HttpConn.return_value = FakeHTTPConnection()
image_size = self.store.get_size(loc)
self.assertEqual(image_size, 31)
def test_get_size_non_existing(self):
"""
Test that trying to retrieve an image size that doesn't exist
raises an error
"""
loc = get_location_from_uri("vsphere://127.0.0.1/folder/openstack_glan"
"ce/%s?dsName=ds1&dcPath=dc1" % FAKE_UUID)
with mock.patch('httplib.HTTPConnection') as HttpConn:
HttpConn.return_value = FakeHTTPConnection(status=404)
self.assertRaises(exception.NotFound, self.store.get_size, loc)
def test_reader_full(self):
content = 'XXX'
image = six.StringIO(content)
expected_checksum = hashlib.md5(content).hexdigest()
reader = vm_store._Reader(image)
ret = reader.read()
self.assertEqual(content, ret)
self.assertEqual(expected_checksum, reader.checksum.hexdigest())
self.assertEqual(len(content), reader.size)
def test_reader_partial(self):
content = 'XXX'
image = six.StringIO(content)
expected_checksum = hashlib.md5('X').hexdigest()
reader = vm_store._Reader(image)
ret = reader.read(1)
self.assertEqual('X', ret)
self.assertEqual(expected_checksum, reader.checksum.hexdigest())
self.assertEqual(1, reader.size)
def test_rewind(self):
content = 'XXX'
image = six.StringIO(content)
expected_checksum = hashlib.md5(content).hexdigest()
reader = vm_store._Reader(image)
reader.read(1)
ret = reader.read()
self.assertEqual('XX', ret)
self.assertEqual(expected_checksum, reader.checksum.hexdigest())
self.assertEqual(len(content), reader.size)
reader.rewind()
ret = reader.read()
self.assertEqual(content, ret)
self.assertEqual(expected_checksum, reader.checksum.hexdigest())
self.assertEqual(len(content), reader.size)
def test_chunkreader_image_fits_in_blocksize(self):
"""
Test that the image file reader returns the expected chunk of data
when the block size is larger than the image.
"""
content = 'XXX'
image = six.StringIO(content)
expected_checksum = hashlib.md5(content).hexdigest()
reader = vm_store._ChunkReader(image)
ret = reader.read()
expected_chunk = '%x\r\n%s\r\n' % (len(content), content)
last_chunk = '0\r\n\r\n'
self.assertEqual('%s%s' % (expected_chunk, last_chunk), ret)
self.assertEqual(image.len, reader.size)
self.assertEqual(expected_checksum, reader.checksum.hexdigest())
self.assertTrue(reader.closed)
ret = reader.read()
self.assertEqual(image.len, reader.size)
self.assertEqual(expected_checksum, reader.checksum.hexdigest())
self.assertTrue(reader.closed)
self.assertEqual('', ret)
def test_chunkreader_image_larger_blocksize(self):
"""
Test that the image file reader returns the expected chunks when
the block size specified is smaller than the image.
"""
content = 'XXX'
image = six.StringIO(content)
expected_checksum = hashlib.md5(content).hexdigest()
last_chunk = '0\r\n\r\n'
reader = vm_store._ChunkReader(image, blocksize=1)
ret = reader.read()
expected_chunk = '1\r\nX\r\n'
self.assertEqual('%s%s%s%s' % (expected_chunk, expected_chunk,
expected_chunk, last_chunk), ret)
self.assertEqual(expected_checksum, reader.checksum.hexdigest())
self.assertEqual(image.len, reader.size)
self.assertTrue(reader.closed)
def test_chunkreader_size(self):
"""Test that the image reader takes into account the specified size."""
content = 'XXX'
image = six.StringIO(content)
expected_checksum = hashlib.md5(content).hexdigest()
reader = vm_store._ChunkReader(image, blocksize=1)
ret = reader.read(size=3)
self.assertEqual('1\r\n', ret)
ret = reader.read(size=1)
self.assertEqual('X', ret)
ret = reader.read()
self.assertEqual(expected_checksum, reader.checksum.hexdigest())
self.assertEqual(image.len, reader.size)
self.assertTrue(reader.closed)
def test_sanity_check_api_retry_count(self):
"""Test that sanity check raises if api_retry_count is <= 0."""
vm_store.CONF.vmware_api_retry_count = -1
self.assertRaises(exception.BadStoreConfiguration,
self.store._sanity_check)
vm_store.CONF.vmware_api_retry_count = 0
self.assertRaises(exception.BadStoreConfiguration,
self.store._sanity_check)
vm_store.CONF.vmware_api_retry_count = 1
try:
self.store._sanity_check()
except exception.BadStoreConfiguration:
self.fail()
def test_sanity_check_task_poll_interval(self):
"""Test that sanity check raises if task_poll_interval is <= 0."""
vm_store.CONF.vmware_task_poll_interval = -1
self.assertRaises(exception.BadStoreConfiguration,
self.store._sanity_check)
vm_store.CONF.vmware_task_poll_interval = 0
self.assertRaises(exception.BadStoreConfiguration,
self.store._sanity_check)
vm_store.CONF.vmware_task_poll_interval = 1
try:
self.store._sanity_check()
except exception.BadStoreConfiguration:
self.fail()
def test_retry_count(self):
expected_image_id = str(uuid.uuid4())
expected_size = FIVE_KB
expected_contents = "*" * expected_size
image = six.StringIO(expected_contents)
self.store._create_session = mock.Mock()
with mock.patch('httplib.HTTPConnection') as HttpConn:
HttpConn.return_value = FakeHTTPConnection(status=401)
try:
location, size, checksum, _ = self.store.add(expected_image_id,
image,
expected_size)
except exception.NotAuthenticated:
pass
self.assertEqual(VMWARE_DATASTORE_CONF['vmware_api_retry_count'] + 1,
self.store._create_session.call_count)

View File

@ -16,6 +16,7 @@
import urllib
import urlparse
import glance_store as store
from oslo.config import cfg
from glance.common import exception
@ -23,7 +24,6 @@ from glance.common import wsgi
import glance.context
import glance.db.simple.api as simple_db
import glance.openstack.common.log as logging
import glance.store
CONF = cfg.CONF
@ -78,7 +78,7 @@ def get_fake_request(path='', method='POST', is_admin=False, user=USER1,
return req
def fake_get_size_from_backend(context, uri):
def fake_get_size_from_backend(uri, context=None):
return 1
@ -151,8 +151,8 @@ class FakeStoreAPI(object):
def create_stores(self):
pass
def set_acls(self, context, uri, public=False,
read_tenants=None, write_tenants=None):
def set_acls(self, uri, public=False, read_tenants=None,
write_tenants=None, context=None):
if read_tenants is None:
read_tenants = []
if write_tenants is None:
@ -164,19 +164,20 @@ class FakeStoreAPI(object):
'write': write_tenants,
}
def get_from_backend(self, context, location):
def get_from_backend(self, location, context=None):
try:
scheme = location[:location.find('/') - 1]
if scheme == 'unknown':
raise exception.UnknownScheme(scheme=scheme)
raise store.UnknownScheme(scheme=scheme)
return self.data[location]
except KeyError:
raise exception.NotFound()
raise store.NotFound()
def get_size_from_backend(self, context, location):
return self.get_from_backend(context, location)[1]
def get_size_from_backend(self, location, context=None):
return self.get_from_backend(location, context=context)[1]
def add_to_backend(self, context, scheme, image_id, data, size):
def add_to_backend(self, conf, image_id, data, size,
scheme=None, context=None):
store_max_size = 7
current_store_size = 2
for location in self.data.keys():
@ -198,7 +199,7 @@ class FakeStoreAPI(object):
return (image_id, size, checksum, self.store_metadata)
def check_location_metadata(self, val, key=''):
glance.store.check_location_metadata(val)
store.check_location_metadata(val)
class FakePolicyEnforcer(object):

View File

@ -20,6 +20,7 @@ import datetime
import hashlib
import uuid
import glance_store as store
import mock
from oslo.config import cfg
import routes
@ -39,8 +40,6 @@ from glance.openstack.common import jsonutils
from glance.openstack.common import timeutils
import glance.registry.client.v1.api as registry
import glance.store.filesystem
from glance.store import http
from glance.tests.unit import base
import glance.tests.unit.utils as unit_test_utils
from glance.tests import utils as test_utils
@ -91,7 +90,7 @@ class TestGlanceAPI(base.IsolatedUnitTest):
'metadata': {}, 'status': 'active'}],
'properties': {}}]
self.context = glance.context.RequestContext(is_admin=True)
glance.api.v1.images.validate_location = mock.Mock()
store.validate_location = mock.Mock()
db_api.get_engine()
self.destroy_fixtures()
self.create_fixtures()
@ -126,7 +125,9 @@ class TestGlanceAPI(base.IsolatedUnitTest):
for k, v in six.iteritems(fixture_headers):
req.headers[k] = v
with mock.patch.object(http.Store, 'get_size') as mocked_size:
http = store.get_store_from_scheme('http')
with mock.patch.object(http, 'get_size') as mocked_size:
mocked_size.return_value = 0
res = req.get_response(self.api)
self.assertEqual(res.status_int, 201)
@ -246,7 +247,8 @@ class TestGlanceAPI(base.IsolatedUnitTest):
for k, v in six.iteritems(fixture_headers):
req.headers[k] = v
with mock.patch.object(http.Store, 'get_size') as mocked_size:
http = store.get_store_from_scheme('http')
with mock.patch.object(http, 'get_size') as mocked_size:
mocked_size.return_value = 0
res = req.get_response(self.api)
self.assertEqual(res.status_int, 201)
@ -284,7 +286,9 @@ class TestGlanceAPI(base.IsolatedUnitTest):
for k, v in six.iteritems(fixture_headers):
req.headers[k] = v
with mock.patch.object(http.Store, 'get_size') as mocked_size:
http = store.get_store_from_scheme('http')
with mock.patch.object(http, 'get_size') as mocked_size:
mocked_size.return_value = 0
res = req.get_response(self.api)
self.assertEqual(res.status_int, 201)
@ -341,7 +345,9 @@ class TestGlanceAPI(base.IsolatedUnitTest):
for k, v in six.iteritems(fixture_headers):
req.headers[k] = v
with mock.patch.object(http.Store, 'get_size') as mocked_size:
http = store.get_store_from_scheme('http')
with mock.patch.object(http, 'get_size') as mocked_size:
mocked_size.return_value = 0
res = req.get_response(self.api)
self.assertEqual(res.status_int, 400)
@ -384,7 +390,7 @@ class TestGlanceAPI(base.IsolatedUnitTest):
def test_create_with_location_bad_store_uri(self):
fixture_headers = {
'x-image-meta-store': 'swift',
'x-image-meta-store': 'file',
'x-image-meta-name': 'bogus',
'x-image-meta-location': 'http://',
'x-image-meta-disk-format': 'qcow2',
@ -495,7 +501,9 @@ class TestGlanceAPI(base.IsolatedUnitTest):
req.method = 'PUT'
req.headers['x-image-meta-location'] = 'http://localhost:0/images/123'
with mock.patch.object(http.Store, 'get_size') as mocked_size:
http = store.get_store_from_scheme('http')
with mock.patch.object(http, 'get_size') as mocked_size:
mocked_size.return_value = 0
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
@ -953,7 +961,10 @@ class TestGlanceAPI(base.IsolatedUnitTest):
req = webob.Request.blank("/images")
req.headers['Content-Type'] = 'application/octet-stream'
req.method = 'POST'
with mock.patch.object(http.Store, 'get_size') as size:
http = store.get_store_from_scheme('http')
with mock.patch.object(http, 'get_size') as size:
size.return_value = 2
for k, v in six.iteritems(fixture_headers):
@ -966,8 +977,8 @@ class TestGlanceAPI(base.IsolatedUnitTest):
"""Tests creates an image from location and conflict image size"""
mock_validate_location = mock.Mock()
glance.api.v1.images.validate_location = mock_validate_location
mock_validate_location.side_effect = exception.BadStoreUri()
store.validate_location = mock_validate_location
mock_validate_location.side_effect = store.BadStoreUri()
fixture_headers = {'x-image-meta-store': 'file',
'x-image-meta-disk-format': 'vhd',
@ -1240,26 +1251,29 @@ class TestGlanceAPI(base.IsolatedUnitTest):
self.assertEqual(res.status_int, 200)
self.assertEqual("deleted", res.headers['x-image-meta-status'])
@mock.patch.object(glance.store.filesystem.Store, 'delete')
def test_image_status_when_delete_fails(self, mock_fsstore_delete):
def test_image_status_when_delete_fails(self):
"""
Tests that the image status set to active if deletion of image fails.
"""
mock_fsstore_delete.side_effect = exception.Forbidden()
# trigger the v1 delete api
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'DELETE'
res = req.get_response(self.api)
self.assertEqual(res.status_int, 403)
self.assertTrue('Forbidden to delete image' in res.body)
fs = store.get_store_from_scheme('file')
# check image metadata is still there with active state
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'HEAD'
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
self.assertEqual("active", res.headers['x-image-meta-status'])
with mock.patch.object(fs, 'delete') as mock_fsstore_delete:
mock_fsstore_delete.side_effect = exception.Forbidden()
# trigger the v1 delete api
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'DELETE'
res = req.get_response(self.api)
self.assertEqual(res.status_int, 403)
self.assertTrue('Forbidden to delete image' in res.body)
# check image metadata is still there with active state
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'HEAD'
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
self.assertEqual("active", res.headers['x-image-meta-status'])
def test_delete_pending_delete_image(self):
"""
@ -1542,7 +1556,7 @@ class TestGlanceAPI(base.IsolatedUnitTest):
# We expect 500 since an exception occured during upload.
self.assertEqual(500, res.status_int)
@mock.patch('glance.store.store_add_to_backend')
@mock.patch('glance_store.store_add_to_backend')
def test_upload_safe_kill(self, mock_store_add_to_backend):
def mock_store_add_to_backend_w_exception(*args, **kwargs):
@ -1562,7 +1576,7 @@ class TestGlanceAPI(base.IsolatedUnitTest):
self.assertEqual(1, mock_store_add_to_backend.call_count)
@mock.patch('glance.store.store_add_to_backend')
@mock.patch('glance_store.store_add_to_backend')
def test_upload_safe_kill_deleted(self, mock_store_add_to_backend):
test_router_api = router.API(self.mapper)
self.api = test_utils.FakeAuthMiddleware(test_router_api,
@ -2296,7 +2310,9 @@ class TestGlanceAPI(base.IsolatedUnitTest):
req.headers[k] = v
req.method = 'POST'
with mock.patch.object(http.Store, 'get_size') as size:
http = store.get_store_from_scheme('http')
with mock.patch.object(http, 'get_size') as size:
size.return_value = 0
res = req.get_response(self.api)
self.assertEqual(res.status_int, 201)
@ -2616,15 +2632,6 @@ class TestGlanceAPI(base.IsolatedUnitTest):
res = req.get_response(self.api)
self.assertEqual(res.status_int, 403)
@mock.patch.object(glance.store.filesystem.Store, 'delete')
def test_delete_image_in_use(self, mock_filesystem_delete):
mock_filesystem_delete.side_effect = exception.InUseByStore()
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'DELETE'
res = req.get_response(self.api)
self.assertEqual(res.status_int, 409)
def test_head_details(self):
req = webob.Request.blank('/images/detail')
req.method = 'HEAD'

View File

@ -34,7 +34,6 @@ from glance.openstack.common import jsonutils
from glance.openstack.common import timeutils
from glance.registry.api import v1 as rserver
import glance.store.filesystem
from glance.tests.unit import base
from glance.tests import utils as test_utils

View File

@ -17,6 +17,7 @@ from contextlib import contextmanager
import mock
from mock import patch
import glance_store
import webob.exc
from glance.api.v1 import upload_utils
@ -210,12 +211,12 @@ class TestUploadUtils(base.StoreClearingUnitTest):
def test_upload_data_to_store_storage_full(self):
self._test_upload_data_to_store_exception_with_notify(
exception.StorageFull,
glance_store.StorageFull,
webob.exc.HTTPRequestEntityTooLarge)
def test_upload_data_to_store_storage_write_denied(self):
self._test_upload_data_to_store_exception_with_notify(
exception.StorageWriteDenied,
glance_store.StorageWriteDenied,
webob.exc.HTTPServiceUnavailable)
def test_upload_data_to_store_size_limit_exceeded(self):

View File

@ -16,6 +16,7 @@
import mock
import uuid
import glance_store
import six
import webob
@ -228,7 +229,7 @@ class TestImagesController(base.StoreClearingUnitTest):
def test_upload_storage_full(self):
request = unit_test_utils.get_fake_request()
image = FakeImage()
image.set_data = Raise(exception.StorageFull)
image.set_data = Raise(glance_store.StorageFull)
self.image_repo.result = image
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.upload,
@ -268,7 +269,7 @@ class TestImagesController(base.StoreClearingUnitTest):
def test_upload_storage_write_denied(self):
request = unit_test_utils.get_fake_request(user=unit_test_utils.USER3)
image = FakeImage()
image.set_data = Raise(exception.StorageWriteDenied)
image.set_data = Raise(glance_store.StorageWriteDenied)
self.image_repo.result = image
self.assertRaises(webob.exc.HTTPServiceUnavailable,
self.controller.upload,
@ -328,7 +329,7 @@ class TestImagesController(base.StoreClearingUnitTest):
def test_restore_image_when_upload_failed(self):
request = unit_test_utils.get_fake_request()
image = FakeImage('fake')
image.set_data = Raise(exception.StorageWriteDenied)
image.set_data = Raise(glance_store.StorageWriteDenied)
self.image_repo.result = image
self.assertRaises(webob.exc.HTTPServiceUnavailable,
self.controller.upload,

View File

@ -15,6 +15,7 @@
import datetime
import glance_store
from oslo.config import cfg
import webob
@ -101,7 +102,7 @@ class TestImageMembersController(test_utils.BaseTestCase):
self.policy,
self.notifier,
self.store)
glance.store.create_stores()
glance_store.create_stores()
def _create_images(self):
self.db.reset()

View File

@ -14,8 +14,10 @@
# under the License.
import datetime
import os
import uuid
import glance_store as store
from oslo.config import cfg
import six
import testtools
@ -25,7 +27,6 @@ import glance.api.v2.images
from glance.common import exception
from glance.openstack.common import jsonutils
import glance.schema
import glance.store
from glance.tests.unit import base
import glance.tests.unit.utils as unit_test_utils
import glance.tests.utils as test_utils
@ -126,7 +127,7 @@ class TestImagesController(base.IsolatedUnitTest):
self.notifier,
self.store)
self.controller.gateway.store_utils = self.store_utils
glance.store.create_stores()
store.create_stores()
def _create_images(self):
self.db.reset()
@ -1270,7 +1271,7 @@ class TestImagesController(base.IsolatedUnitTest):
another_request, created_image.image_id, changes)
def test_update_replace_locations(self):
self.stubs.Set(glance.store, 'get_size_from_backend',
self.stubs.Set(store, 'get_size_from_backend',
unit_test_utils.fake_get_size_from_backend)
request = unit_test_utils.get_fake_request()
changes = [{'op': 'replace', 'path': ['locations'], 'value': []}]
@ -1537,7 +1538,7 @@ class TestImagesController(base.IsolatedUnitTest):
as long as the image has fewer than the limited number of image
locations after the transaction.
"""
self.stubs.Set(glance.store, 'get_size_from_backend',
self.stubs.Set(store, 'get_size_from_backend',
unit_test_utils.fake_get_size_from_backend)
self.config(show_multiple_locations=True)
request = unit_test_utils.get_fake_request()
@ -1599,7 +1600,7 @@ class TestImagesController(base.IsolatedUnitTest):
self.controller.update, request, UUID1, changes)
def test_update_remove_location(self):
self.stubs.Set(glance.store, 'get_size_from_backend',
self.stubs.Set(store, 'get_size_from_backend',
unit_test_utils.fake_get_size_from_backend)
request = unit_test_utils.get_fake_request()
@ -1729,7 +1730,9 @@ class TestImagesController(base.IsolatedUnitTest):
Ensure status of queued image is updated (LP bug #1048851)
to 'deleted' when delayed_delete isenabled
"""
self.config(delayed_delete=True)
scrubber_dir = os.path.join(self.test_dir, 'scrubber')
self.config(delayed_delete=True, scrubber_datadir=scrubber_dir)
request = unit_test_utils.get_fake_request(is_admin=True)
image = self.db.image_create(request.context, {'status': 'queued'})
image_id = image['id']
@ -1756,7 +1759,8 @@ class TestImagesController(base.IsolatedUnitTest):
self.assertNotIn('%s/%s' % (BASE_URI, UUID1), self.store.data)
def test_delayed_delete(self):
self.config(delayed_delete=True)
scrubber_dir = os.path.join(self.test_dir, 'scrubber')
self.config(delayed_delete=True, scrubber_datadir=scrubber_dir)
request = unit_test_utils.get_fake_request()
self.assertIn('%s/%s' % (BASE_URI, UUID1), self.store.data)

View File

@ -32,7 +32,6 @@ from glance.openstack.common import jsonutils
from glance.openstack.common import timeutils
from glance.registry.api import v2 as rserver
import glance.store.filesystem
from glance.tests.unit import base
from glance.tests import utils as test_utils

View File

@ -51,3 +51,6 @@ oslo.messaging>=1.4.0.0a3
retrying>=1.2.2 # Apache-2.0
osprofiler>=0.3.0
# Glance Store
glance_store>=0.1.1