Per datastore volume support
Implements blueprint: per-datastore-volume-support Introduces volume support on a datastore basis using config values in datastore config groups. DocImpact: New config values device_path and volume_support for each datastore have been added, instead of the DEFAULT conf section. Change-Id: I871cbed1f825d719b189f71a3ff2c748fb8abdc0
This commit is contained in:
parent
5f98abb9df
commit
2d12d7a2d0
|
@ -188,14 +188,27 @@ pydev_debug = disabled
|
|||
# Format (single port or port range): A, B-C
|
||||
# where C greater than B
|
||||
tcp_ports = 3306
|
||||
volume_support = True
|
||||
device_path = /dev/vdb
|
||||
|
||||
[redis]
|
||||
# Format (single port or port range): A, B-C
|
||||
# where C greater than B
|
||||
tcp_ports = 6379
|
||||
# redis uses local storage
|
||||
volume_support = False
|
||||
# default device_path = None
|
||||
|
||||
[cassandra]
|
||||
tcp_ports = 7000, 7001, 9042, 9160
|
||||
volume_support = True
|
||||
device_path = /dev/vdb
|
||||
|
||||
[couchbase]
|
||||
tcp_ports = 8091, 8092, 4369, 11209-11211, 21100-21199
|
||||
volume_support = True
|
||||
device_path = /dev/vdb
|
||||
|
||||
[mongodb]
|
||||
volume_support = True
|
||||
device_path = /dev/vdb
|
||||
|
|
|
@ -193,12 +193,25 @@ root_on_create = False
|
|||
# Format (single port or port range): A, B-C
|
||||
# where C greater than B
|
||||
tcp_ports = 3306
|
||||
volume_support = True
|
||||
device_path = /dev/vdb
|
||||
|
||||
[redis]
|
||||
tcp_ports = 6379
|
||||
#redis uses local storage
|
||||
volume_support = False
|
||||
# default device_path = None
|
||||
|
||||
[cassandra]
|
||||
tcp_ports = 7000, 7001, 9042, 9160
|
||||
volume_support = True
|
||||
device_path = /dev/vdb
|
||||
|
||||
[couchbase]
|
||||
tcp_ports = 8091, 8092, 4369, 11209-11211, 21100-21199
|
||||
volume_support = True
|
||||
device_path = /dev/vdb
|
||||
|
||||
[mongodb]
|
||||
volume_support = True
|
||||
device_path = /dev/vdb
|
||||
|
|
|
@ -137,6 +137,27 @@ control_exchange = trove
|
|||
|
||||
paste_config_file=api-paste.ini.test
|
||||
|
||||
[mysql]
|
||||
volume_support = True
|
||||
device_path = /dev/vdb
|
||||
|
||||
[redis]
|
||||
# redis uses local storage
|
||||
volume_support = False
|
||||
# default device_path = None
|
||||
|
||||
[cassandra]
|
||||
volume_support = True
|
||||
device_path = /dev/vdb
|
||||
|
||||
[couchbase]
|
||||
volume_support = True
|
||||
device_path = /dev/vdb
|
||||
|
||||
[mongodb]
|
||||
volume_support = True
|
||||
device_path = /dev/vdb
|
||||
|
||||
[composite:trove]
|
||||
use = call:trove.common.wsgi:versioned_urlmap
|
||||
/: versions
|
||||
|
|
|
@ -188,8 +188,7 @@ instance = {
|
|||
"properties": {
|
||||
"instance": {
|
||||
"type": "object",
|
||||
"required": ["name", "flavorRef",
|
||||
"volume" if CONF.trove_volume_support else None],
|
||||
"required": ["name", "flavorRef"],
|
||||
"additionalProperties": True,
|
||||
"properties": {
|
||||
"name": non_empty_string,
|
||||
|
|
|
@ -313,6 +313,10 @@ mysql_opts = [
|
|||
default='trove.guestagent.strategies.backup.mysql_impl'),
|
||||
cfg.StrOpt('restore_namespace',
|
||||
default='trove.guestagent.strategies.restore.mysql_impl'),
|
||||
cfg.BoolOpt('volume_support',
|
||||
default=True,
|
||||
help='Whether to provision a cinder volume for datadir.'),
|
||||
cfg.StrOpt('device_path', default='/dev/vdb'),
|
||||
]
|
||||
|
||||
# Percona
|
||||
|
@ -344,6 +348,10 @@ percona_opts = [
|
|||
default='trove.guestagent.strategies.backup.mysql_impl'),
|
||||
cfg.StrOpt('restore_namespace',
|
||||
default='trove.guestagent.strategies.restore.mysql_impl'),
|
||||
cfg.BoolOpt('volume_support',
|
||||
default=True,
|
||||
help='Whether to provision a cinder volume for datadir.'),
|
||||
cfg.StrOpt('device_path', default='/dev/vdb'),
|
||||
]
|
||||
|
||||
# Redis
|
||||
|
@ -366,6 +374,10 @@ redis_opts = [
|
|||
"volumes if volume support is enabled."),
|
||||
cfg.IntOpt('usage_timeout', default=450,
|
||||
help='Timeout to wait for a guest to become active.'),
|
||||
cfg.BoolOpt('volume_support',
|
||||
default=False,
|
||||
help='Whether to provision a cinder volume for datadir.'),
|
||||
cfg.StrOpt('device_path', default=None),
|
||||
]
|
||||
|
||||
# Cassandra
|
||||
|
@ -388,6 +400,10 @@ cassandra_opts = [
|
|||
"volumes if volume support is enabled."),
|
||||
cfg.IntOpt('usage_timeout', default=600,
|
||||
help='Timeout to wait for a guest to become active.'),
|
||||
cfg.BoolOpt('volume_support',
|
||||
default=True,
|
||||
help='Whether to provision a cinder volume for datadir.'),
|
||||
cfg.StrOpt('device_path', default='/dev/vdb'),
|
||||
]
|
||||
|
||||
# Couchbase
|
||||
|
@ -420,7 +436,11 @@ couchbase_opts = [
|
|||
cfg.StrOpt('backup_namespace',
|
||||
default='trove.guestagent.strategies.backup.couchbase_impl'),
|
||||
cfg.StrOpt('restore_namespace',
|
||||
default='trove.guestagent.strategies.restore.couchbase_impl')
|
||||
default='trove.guestagent.strategies.restore.couchbase_impl'),
|
||||
cfg.BoolOpt('volume_support',
|
||||
default=True,
|
||||
help='Whether to provision a cinder volume for datadir.'),
|
||||
cfg.StrOpt('device_path', default='/dev/vdb'),
|
||||
]
|
||||
|
||||
# MongoDB
|
||||
|
@ -443,6 +463,10 @@ mongodb_opts = [
|
|||
"volumes if volume support is enabled."),
|
||||
cfg.IntOpt('usage_timeout', default=450,
|
||||
help='Timeout to wait for a guest to become active.'),
|
||||
cfg.BoolOpt('volume_support',
|
||||
default=True,
|
||||
help='Whether to provision a cinder volume for datadir.'),
|
||||
cfg.StrOpt('device_path', default='/dev/vdb'),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
|
|
@ -334,6 +334,14 @@ class SimpleInstance(object):
|
|||
def datastore(self):
|
||||
return self.ds
|
||||
|
||||
@property
|
||||
def volume_support(self):
|
||||
return CONF.get(self.datastore_version.manager).volume_support
|
||||
|
||||
@property
|
||||
def device_path(self):
|
||||
return CONF.get(self.datastore_version.manager).device_path
|
||||
|
||||
@property
|
||||
def root_password(self):
|
||||
return self.root_pass
|
||||
|
@ -510,7 +518,7 @@ class BaseInstance(SimpleInstance):
|
|||
task_api.API(self.context).delete_instance(self.id)
|
||||
|
||||
deltas = {'instances': -1}
|
||||
if CONF.trove_volume_support:
|
||||
if self.volume_support:
|
||||
deltas['volumes'] = -self.volume_size
|
||||
return run_with_quotas(self.tenant_id,
|
||||
deltas,
|
||||
|
@ -616,15 +624,17 @@ class Instance(BuiltInstance):
|
|||
raise exception.FlavorNotFound(uuid=flavor_id)
|
||||
|
||||
deltas = {'instances': 1}
|
||||
if CONF.trove_volume_support:
|
||||
volume_support = CONF.get(datastore_version.manager).volume_support
|
||||
if volume_support:
|
||||
validate_volume_size(volume_size)
|
||||
deltas['volumes'] = volume_size
|
||||
else:
|
||||
if volume_size is not None:
|
||||
raise exception.VolumeNotSupported()
|
||||
ephemeral_support = CONF.device_path
|
||||
if ephemeral_support and flavor.ephemeral == 0:
|
||||
raise exception.LocalStorageNotSpecified(flavor=flavor_id)
|
||||
ephemeral_support = CONF.get(datastore_version.manager).device_path
|
||||
if ephemeral_support:
|
||||
if flavor.ephemeral == 0:
|
||||
raise exception.LocalStorageNotSpecified(flavor=flavor_id)
|
||||
|
||||
if backup_id is not None:
|
||||
backup_info = Backup.get_by_id(context, backup_id)
|
||||
|
@ -726,18 +736,21 @@ class Instance(BuiltInstance):
|
|||
old_flavor = client.flavors.get(self.flavor_id)
|
||||
new_flavor_size = new_flavor.ram
|
||||
old_flavor_size = old_flavor.ram
|
||||
if CONF.trove_volume_support:
|
||||
if self.volume_support:
|
||||
if new_flavor.ephemeral != 0:
|
||||
raise exception.LocalStorageNotSupported()
|
||||
if new_flavor_size == old_flavor_size:
|
||||
raise exception.CannotResizeToSameSize()
|
||||
elif CONF.device_path is not None:
|
||||
elif self.device_path is not None:
|
||||
# ephemeral support enabled
|
||||
if new_flavor.ephemeral == 0:
|
||||
raise exception.LocalStorageNotSpecified(flavor=new_flavor_id)
|
||||
if (new_flavor_size == old_flavor_size and
|
||||
new_flavor.ephemeral == new_flavor.ephemeral):
|
||||
raise exception.CannotResizeToSameSize()
|
||||
elif new_flavor_size == old_flavor_size:
|
||||
# uses local storage
|
||||
raise exception.CannotResizeToSameSize()
|
||||
|
||||
# Set the task to RESIZING and begin the async call before returning.
|
||||
self.update_db(task_status=InstanceTasks.RESIZING)
|
||||
|
@ -749,9 +762,6 @@ class Instance(BuiltInstance):
|
|||
def _resize_resources():
|
||||
self.validate_can_perform_action()
|
||||
LOG.info("Resizing volume of instance %s..." % self.id)
|
||||
if not self.volume_size:
|
||||
raise exception.BadRequest(_("Instance %s has no volume.")
|
||||
% self.id)
|
||||
old_size = self.volume_size
|
||||
if int(new_size) <= old_size:
|
||||
raise exception.BadRequest(_("The new volume 'size' must be "
|
||||
|
@ -761,6 +771,9 @@ class Instance(BuiltInstance):
|
|||
self.update_db(task_status=InstanceTasks.RESIZING)
|
||||
task_api.API(self.context).resize_volume(new_size, self.id)
|
||||
|
||||
if not self.volume_size:
|
||||
raise exception.BadRequest(_("Instance %s has no volume.")
|
||||
% self.id)
|
||||
new_size_l = long(new_size)
|
||||
validate_volume_size(new_size_l)
|
||||
return run_with_quotas(self.tenant_id,
|
||||
|
|
|
@ -39,10 +39,6 @@ class InstanceController(wsgi.Controller):
|
|||
|
||||
"""Controller for instance functionality."""
|
||||
schemas = apischema.instance.copy()
|
||||
if not CONF.trove_volume_support:
|
||||
# see instance.models.create for further validation around this
|
||||
LOG.info("Removing volume attributes from schema")
|
||||
schemas['create']['properties']['instance']['required'].pop()
|
||||
|
||||
@classmethod
|
||||
def get_action_schema(cls, body, action_schema):
|
||||
|
|
|
@ -39,7 +39,7 @@ class InstanceView(object):
|
|||
"datastore": {"type": self.instance.datastore.name,
|
||||
"version": self.instance.datastore_version.name},
|
||||
}
|
||||
if CONF.trove_volume_support:
|
||||
if self.instance.volume_support:
|
||||
instance_dict['volume'] = {'size': self.instance.volume_size}
|
||||
|
||||
if self.instance.hostname:
|
||||
|
@ -88,7 +88,7 @@ class InstanceDetailView(InstanceView):
|
|||
if (isinstance(self.instance, models.DetailInstance) and
|
||||
self.instance.volume_used):
|
||||
used = self.instance.volume_used
|
||||
if CONF.trove_volume_support:
|
||||
if self.instance.volume_support:
|
||||
result['instance']['volume']['used'] = used
|
||||
else:
|
||||
# either ephemeral or root partition
|
||||
|
|
|
@ -312,9 +312,8 @@ QUOTAS = QuotaEngine()
|
|||
|
||||
''' Define all kind of resources here '''
|
||||
resources = [Resource(Resource.INSTANCES, 'max_instances_per_user'),
|
||||
Resource(Resource.BACKUPS, 'max_backups_per_user')]
|
||||
if CONF.trove_volume_support:
|
||||
resources.append(Resource(Resource.VOLUMES, 'max_volumes_per_user'))
|
||||
Resource(Resource.BACKUPS, 'max_backups_per_user'),
|
||||
Resource(Resource.VOLUMES, 'max_volumes_per_user')]
|
||||
|
||||
QUOTAS.register_resources(resources)
|
||||
|
||||
|
|
|
@ -118,7 +118,7 @@ class NotifyMixin(object):
|
|||
'user_id': self.context.user,
|
||||
}
|
||||
|
||||
if CONF.trove_volume_support:
|
||||
if CONF.get(self.datastore_version.manager).volume_support:
|
||||
payload.update({
|
||||
'volume_size': self.volume_size,
|
||||
'nova_volume_id': self.volume_id
|
||||
|
@ -365,7 +365,7 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
|
|||
err = inst_models.InstanceTasks.BUILDING_ERROR_SERVER
|
||||
self._log_and_raise(e, msg, err)
|
||||
|
||||
device_path = CONF.device_path
|
||||
device_path = self.device_path
|
||||
mount_point = CONF.get(datastore_manager).mount_point
|
||||
volume_info = {'device_path': device_path, 'mount_point': mount_point}
|
||||
LOG.debug("end _create_server_volume for id: %s" % self.id)
|
||||
|
@ -395,7 +395,7 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
|
|||
ifaces, ports = self._build_heat_nics(nics)
|
||||
template_obj = template.load_heat_template(datastore_manager)
|
||||
heat_template_unicode = template_obj.render(
|
||||
volume_support=CONF.trove_volume_support,
|
||||
volume_support=self.volume_support,
|
||||
ifaces=ifaces, ports=ports,
|
||||
tcp_rules=tcp_rules_mapping_list,
|
||||
udp_rules=udp_ports_mapping_list,
|
||||
|
@ -438,7 +438,7 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
|
|||
raise TroveError("Heat Resource Provisioning Failed.")
|
||||
instance_id = resource.physical_resource_id
|
||||
|
||||
if CONF.trove_volume_support:
|
||||
if self.volume_support:
|
||||
resource = client.resources.get(stack.id, 'DataVolume')
|
||||
if resource.resource_status != HEAT_RESOURCE_SUCCESSFUL_STATE:
|
||||
raise TroveError("Heat Resource Provisioning Failed.")
|
||||
|
@ -469,7 +469,7 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
|
|||
err = inst_models.InstanceTasks.BUILDING_ERROR_SERVER
|
||||
self._log_and_raise(e, msg, err)
|
||||
|
||||
device_path = CONF.device_path
|
||||
device_path = self.device_path
|
||||
mount_point = CONF.get(datastore_manager).mount_point
|
||||
volume_info = {'device_path': device_path, 'mount_point': mount_point}
|
||||
|
||||
|
@ -504,7 +504,9 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
|
|||
|
||||
def _build_volume_info(self, datastore_manager, volume_size=None):
|
||||
volume_info = None
|
||||
volume_support = CONF.trove_volume_support
|
||||
volume_support = self.volume_support
|
||||
device_path = self.device_path
|
||||
mount_point = CONF.get(datastore_manager).mount_point
|
||||
LOG.debug("trove volume support = %s" % volume_support)
|
||||
if volume_support:
|
||||
try:
|
||||
|
@ -515,13 +517,12 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
|
|||
err = inst_models.InstanceTasks.BUILDING_ERROR_VOLUME
|
||||
self._log_and_raise(e, msg, err)
|
||||
else:
|
||||
LOG.debug("device_path = %s" % CONF.device_path)
|
||||
LOG.debug("mount_point = %s" %
|
||||
CONF.get(datastore_manager).mount_point)
|
||||
LOG.debug("device_path = %s" % device_path)
|
||||
LOG.debug("mount_point = %s" % mount_point)
|
||||
volume_info = {
|
||||
'block_device': None,
|
||||
'device_path': CONF.device_path,
|
||||
'mount_point': CONF.get(datastore_manager).mount_point,
|
||||
'device_path': device_path,
|
||||
'mount_point': mount_point,
|
||||
'volumes': None,
|
||||
}
|
||||
return volume_info
|
||||
|
@ -570,7 +571,7 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
|
|||
LOG.debug("block_device = %s" % block_device)
|
||||
LOG.debug("volume = %s" % created_volumes)
|
||||
|
||||
device_path = CONF.device_path
|
||||
device_path = self.device_path
|
||||
mount_point = CONF.get(datastore_manager).mount_point
|
||||
LOG.debug("device_path = %s" % device_path)
|
||||
LOG.debug("mount_point = %s" % mount_point)
|
||||
|
@ -1021,6 +1022,9 @@ class ResizeVolumeAction(object):
|
|||
self.instance.datastore_version.manager).mount_point
|
||||
return mount_point
|
||||
|
||||
def get_device_path(self):
|
||||
return self.instance.device_path
|
||||
|
||||
def _fail(self, orig_func):
|
||||
LOG.exception(_("%(func)s encountered an error when attempting to "
|
||||
"resize the volume for instance %(id)s. Setting service "
|
||||
|
@ -1066,7 +1070,8 @@ class ResizeVolumeAction(object):
|
|||
LOG.debug("Unmounting the volume on instance %(id)s" % {
|
||||
'id': self.instance.id})
|
||||
mount_point = self.get_mount_point()
|
||||
self.instance.guest.unmount_volume(device_path=CONF.device_path,
|
||||
device_path = self.get_device_path()
|
||||
self.instance.guest.unmount_volume(device_path=device_path,
|
||||
mount_point=mount_point)
|
||||
LOG.debug("Successfully unmounted the volume %(vol_id)s for "
|
||||
"instance %(id)s" % {'vol_id': self.instance.volume_id,
|
||||
|
@ -1094,11 +1099,12 @@ class ResizeVolumeAction(object):
|
|||
|
||||
@try_recover
|
||||
def _attach_volume(self):
|
||||
device_path = self.get_device_path()
|
||||
LOG.debug("Attach volume %(vol_id)s to instance %(id)s at "
|
||||
"%(dev)s" % {'vol_id': self.instance.volume_id,
|
||||
'id': self.instance.id, 'dev': CONF.device_path})
|
||||
'id': self.instance.id, 'dev': device_path})
|
||||
self.instance.nova_client.volumes.create_server_volume(
|
||||
self.instance.server.id, self.instance.volume_id, CONF.device_path)
|
||||
self.instance.server.id, self.instance.volume_id, device_path)
|
||||
|
||||
def volume_in_use():
|
||||
volume = self.instance.volume_client.volumes.get(
|
||||
|
@ -1117,7 +1123,8 @@ class ResizeVolumeAction(object):
|
|||
LOG.debug("Resizing the filesystem for instance %(id)s" % {
|
||||
'id': self.instance.id})
|
||||
mount_point = self.get_mount_point()
|
||||
self.instance.guest.resize_fs(device_path=CONF.device_path,
|
||||
device_path = self.get_device_path()
|
||||
self.instance.guest.resize_fs(device_path=device_path,
|
||||
mount_point=mount_point)
|
||||
LOG.debug("Successfully resized volume %(vol_id)s filesystem for "
|
||||
"instance %(id)s" % {'vol_id': self.instance.volume_id,
|
||||
|
@ -1128,7 +1135,8 @@ class ResizeVolumeAction(object):
|
|||
LOG.debug("Mount the volume on instance %(id)s" % {
|
||||
'id': self.instance.id})
|
||||
mount_point = self.get_mount_point()
|
||||
self.instance.guest.mount_volume(device_path=CONF.device_path,
|
||||
device_path = self.get_device_path()
|
||||
self.instance.guest.mount_volume(device_path=device_path,
|
||||
mount_point=mount_point)
|
||||
LOG.debug("Successfully mounted the volume %(vol_id)s on instance "
|
||||
"%(id)s" % {'vol_id': self.instance.volume_id,
|
||||
|
|
|
@ -238,9 +238,8 @@ class CreateInstanceQuotaTest(unittest.TestCase):
|
|||
self.test_info.dbaas_datastore = CONFIG.dbaas_datastore
|
||||
|
||||
def tearDown(self):
|
||||
quota_dict = {'instances': CONFIG.trove_max_instances_per_user}
|
||||
if VOLUME_SUPPORT:
|
||||
quota_dict['volumes'] = CONFIG.trove_max_volumes_per_user
|
||||
quota_dict = {'instances': CONFIG.trove_max_instances_per_user,
|
||||
'volumes': CONFIG.trove_max_volumes_per_user}
|
||||
dbaas_admin.quota.update(self.test_info.user.tenant_id,
|
||||
quota_dict)
|
||||
|
||||
|
@ -410,6 +409,17 @@ class CreateInstanceFail(object):
|
|||
volume, databases)
|
||||
assert_equal(501, dbaas.last_http_code)
|
||||
|
||||
def test_create_failure_with_volume_size_and_disabled_for_datastore(self):
|
||||
instance_name = "instance-failure-volume-size_and_volume_disabled"
|
||||
databases = []
|
||||
datastore = 'redis'
|
||||
assert_equal(CONFIG.get(datastore, 'redis')['volume_support'], False)
|
||||
volume = {'size': 2}
|
||||
assert_raises(exceptions.HTTPNotImplemented, dbaas.instances.create,
|
||||
instance_name, instance_info.dbaas_flavor_href,
|
||||
volume, databases, datastore=datastore)
|
||||
assert_equal(501, dbaas.last_http_code)
|
||||
|
||||
@test(enabled=EPHEMERAL_SUPPORT)
|
||||
def test_create_failure_with_no_ephemeral_flavor(self):
|
||||
instance_name = "instance-failure-with-no-ephemeral-flavor"
|
||||
|
|
|
@ -27,7 +27,6 @@ from trove.tests.util import create_dbaas_client
|
|||
from troveclient.compat import exceptions
|
||||
from datetime import datetime
|
||||
from trove.tests.util.users import Users
|
||||
from trove.tests.config import CONFIG
|
||||
|
||||
GROUP = "dbaas.api.limits"
|
||||
DEFAULT_RATE = 200
|
||||
|
@ -93,8 +92,7 @@ class Limits(object):
|
|||
assert_equal(abs_limits.verb, "ABSOLUTE")
|
||||
assert_equal(int(abs_limits.max_instances), DEFAULT_MAX_INSTANCES)
|
||||
assert_equal(int(abs_limits.max_backups), DEFAULT_MAX_BACKUPS)
|
||||
if CONFIG.trove_volume_support:
|
||||
assert_equal(int(abs_limits.max_volumes), DEFAULT_MAX_VOLUMES)
|
||||
assert_equal(int(abs_limits.max_volumes), DEFAULT_MAX_VOLUMES)
|
||||
|
||||
for k in d:
|
||||
assert_equal(d[k].verb, k)
|
||||
|
@ -116,8 +114,7 @@ class Limits(object):
|
|||
|
||||
assert_equal(int(abs_limits.max_instances), DEFAULT_MAX_INSTANCES)
|
||||
assert_equal(int(abs_limits.max_backups), DEFAULT_MAX_BACKUPS)
|
||||
if CONFIG.trove_volume_support:
|
||||
assert_equal(int(abs_limits.max_volumes), DEFAULT_MAX_VOLUMES)
|
||||
assert_equal(int(abs_limits.max_volumes), DEFAULT_MAX_VOLUMES)
|
||||
assert_equal(get.verb, "GET")
|
||||
assert_equal(get.unit, "MINUTE")
|
||||
assert_true(int(get.remaining) <= DEFAULT_RATE - 5)
|
||||
|
@ -146,9 +143,8 @@ class Limits(object):
|
|||
DEFAULT_MAX_INSTANCES)
|
||||
assert_equal(int(abs_limits.max_backups),
|
||||
DEFAULT_MAX_BACKUPS)
|
||||
if CONFIG.trove_volume_support:
|
||||
assert_equal(int(abs_limits.max_volumes),
|
||||
DEFAULT_MAX_VOLUMES)
|
||||
assert_equal(int(abs_limits.max_volumes),
|
||||
DEFAULT_MAX_VOLUMES)
|
||||
|
||||
except exceptions.OverLimit:
|
||||
encountered = True
|
||||
|
|
|
@ -81,6 +81,8 @@ def mgmt_instance_get():
|
|||
# a global.
|
||||
id = instance_info.id
|
||||
api_instance = mgmt.show(id)
|
||||
datastore = getattr(api_instance, 'datastore')
|
||||
datastore_type = datastore.get('type')
|
||||
|
||||
# Print out all fields for extra info if the test fails.
|
||||
for name in dir(api_instance):
|
||||
|
@ -102,7 +104,8 @@ def mgmt_instance_get():
|
|||
instance.has_field('tenant_id', basestring)
|
||||
instance.has_field('updated', basestring)
|
||||
# Can be None if no volume is given on this instance.
|
||||
if CONFIG.trove_volume_support:
|
||||
volume_support = CONFIG.get(datastore_type, 'mysql')['volume_support']
|
||||
if volume_support:
|
||||
instance.has_field('volume', dict, volume_check)
|
||||
else:
|
||||
instance.has_field('volume', None)
|
||||
|
@ -126,7 +129,7 @@ def mgmt_instance_get():
|
|||
server.has_element("status", basestring)
|
||||
server.has_element("tenant_id", basestring)
|
||||
|
||||
if (CONFIG.trove_volume_support and
|
||||
if (volume_support and
|
||||
CONFIG.trove_main_instance_has_volume):
|
||||
with CollectionCheck("volume", api_instance.volume) as volume:
|
||||
volume.has_element("attachments", list)
|
||||
|
@ -151,9 +154,11 @@ class WhenMgmtInstanceGetIsCalledButServerIsNotReady(object):
|
|||
# Fake volume will fail if the size is 13.
|
||||
# TODO(tim.simpson): This would be a lot nicer looking if we used a
|
||||
# traditional mock framework.
|
||||
body = None
|
||||
if CONFIG.trove_volume_support:
|
||||
body = {'size': 13}
|
||||
datastore = {'type': 'mysql', 'version': '5.5'}
|
||||
body = {'datastore': datastore}
|
||||
vol_support = CONFIG.get(datastore['type'], 'mysql')['volume_support']
|
||||
if vol_support:
|
||||
body.update({'size': 13})
|
||||
response = self.client.instances.create(
|
||||
'test_SERVER_ERROR',
|
||||
instance_info.dbaas_flavor_href,
|
||||
|
|
|
@ -116,8 +116,10 @@ class TestConfig(object):
|
|||
"key_buffer_size",
|
||||
"connect_timeout"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"volume_support": True,
|
||||
},
|
||||
"redis": {"volume_support": False},
|
||||
}
|
||||
self._frozen_values = FrozenDict(self._values)
|
||||
self._users = None
|
||||
|
|
|
@ -53,6 +53,7 @@ class InstanceDetailViewTest(TestCase):
|
|||
self.instance.updated = 'Now'
|
||||
self.instance.datastore_version = Mock()
|
||||
self.instance.datastore_version.name = 'mysql_test_version'
|
||||
self.instance.datastore_version.manager = 'mysql'
|
||||
self.instance.hostname = 'test.trove.com'
|
||||
self.ip = "1.2.3.4"
|
||||
self.instance.addresses = {"private": [{"addr": self.ip}]}
|
||||
|
|
|
@ -359,6 +359,7 @@ class ResizeVolumeTest(testtools.TestCase):
|
|||
class FakeGroup():
|
||||
def __init__(self):
|
||||
self.mount_point = 'var/lib/mysql'
|
||||
self.device_path = '/dev/vdb'
|
||||
taskmanager_models.CONF.get = Mock(return_value=FakeGroup())
|
||||
|
||||
def tearDown(self):
|
||||
|
|
Loading…
Reference in New Issue