Support to create public trove instance

- The users need to specify the network to create Trove instance, but
  trove-taskmanager will create port in that network for Nova instance
  creation. Using port gives Trove more capabilities to define how the
  database service is exposed.
- Deprecate ICMP protocol for the instance.
- Restrict 'nics' parameter for creating instance.
- Add 'access' parameter for creating instance.
- Add 'public_network_id' option in order to create floating IP for the
  instance.
- Do not create records for security groups, but Trove can still delete
  existing instances for backward compatibility.
- Delete unreasonable Host, Account, Storage API.

Story: 2006500
Task: 36468
Task: 36466
Change-Id: I80827e1ad5e6b130cbf94c2bb7a909c44d5cf1e5
This commit is contained in:
Lingxian Kong 2019-09-05 23:21:08 +12:00
parent 3c09e6178a
commit c33fa67066
81 changed files with 857 additions and 2441 deletions

View File

@ -240,18 +240,18 @@ function configure_trove {
iniset $TROVE_CONF DEFAULT remote_neutron_client trove.common.single_tenant_remote.neutron_client_trove_admin
iniset $TROVE_CONF DEFAULT default_datastore $TROVE_DATASTORE_TYPE
iniset $TROVE_CONF cassandra tcp_ports 22,7000,7001,7199,9042,9160
iniset $TROVE_CONF couchbase tcp_ports 22,8091,8092,4369,11209-11211,21100-21199
iniset $TROVE_CONF couchdb tcp_ports 22,5984
iniset $TROVE_CONF db2 tcp_ports 22,50000
iniset $TROVE_CONF mariadb tcp_ports 22,3306,4444,4567,4568
iniset $TROVE_CONF mongodb tcp_ports 22,2500,27017,27019
iniset $TROVE_CONF mysql tcp_ports 22,3306
iniset $TROVE_CONF percona tcp_ports 22,3306
iniset $TROVE_CONF postgresql tcp_ports 22,5432
iniset $TROVE_CONF pxc tcp_ports 22,3306,4444,4567,4568
iniset $TROVE_CONF redis tcp_ports 22,6379,16379
iniset $TROVE_CONF vertica tcp_ports 22,5433,5434,5444,5450,4803
iniset $TROVE_CONF cassandra tcp_ports 7000,7001,7199,9042,9160
iniset $TROVE_CONF couchbase tcp_ports 8091,8092,4369,11209-11211,21100-21199
iniset $TROVE_CONF couchdb tcp_ports 5984
iniset $TROVE_CONF db2 tcp_ports 50000
iniset $TROVE_CONF mariadb tcp_ports 3306,4444,4567,4568
iniset $TROVE_CONF mongodb tcp_ports 2500,27017,27019
iniset $TROVE_CONF mysql tcp_ports 3306
iniset $TROVE_CONF percona tcp_ports 3306
iniset $TROVE_CONF postgresql tcp_ports 5432
iniset $TROVE_CONF pxc tcp_ports 3306,4444,4567,4568
iniset $TROVE_CONF redis tcp_ports 6379,16379
iniset $TROVE_CONF vertica tcp_ports 5433,5434,5444,5450,4803
# configure apache related files
if [[ "${TROVE_USE_MOD_WSGI}" == "TRUE" ]]; then
@ -457,10 +457,10 @@ function start_trove {
enable_apache_site trove-api
restart_apache_server
else
run_process tr-api "$TROVE_BIN_DIR/trove-api --config-file=$TROVE_CONF --debug"
run_process tr-api "$TROVE_BIN_DIR/trove-api --config-file=$TROVE_CONF"
fi
run_process tr-tmgr "$TROVE_BIN_DIR/trove-taskmanager --config-file=$TROVE_CONF --debug"
run_process tr-cond "$TROVE_BIN_DIR/trove-conductor --config-file=$TROVE_CONF --debug"
run_process tr-tmgr "$TROVE_BIN_DIR/trove-taskmanager --config-file=$TROVE_CONF"
run_process tr-cond "$TROVE_BIN_DIR/trove-conductor --config-file=$TROVE_CONF"
}
# stop_trove() - Stop running processes

View File

@ -18,6 +18,7 @@
"nova_client": null,
"shared_network": "b19b5da0-d2f6-11e9-9382-00224d6b7bc1",
"users": [
{

View File

@ -5,6 +5,7 @@ remote_nova_client = trove.tests.fakes.nova.fake_create_nova_client
remote_guest_client = trove.tests.fakes.guestagent.fake_create_guest_client
remote_swift_client = trove.tests.fakes.swift.fake_create_swift_client
remote_cinder_client = trove.tests.fakes.nova.fake_create_cinder_client
remote_neutron_client = trove.tests.fakes.neutron.fake_create_neutron_client
# Fake out the RPC implementation
transport_url = 'fake:/'
@ -17,20 +18,15 @@ trove_dns_support = True
dns_driver = trove.tests.fakes.dns.FakeDnsDriver
dns_instance_entry_factory = trove.tests.fakes.dns.FakeDnsInstanceEntryFactory
# This will remove some of the verbose logging when trying to diagnose tox issues
default_log_levels=routes.middleware=ERROR,trove.common.auth=WARN
log_file = trovetest.log
use_stderr = False
# Show debugging output in logs (sets DEBUG log level output)
debug = True
# Address to bind the API server
bind_host = 0.0.0.0
# Port the bind the API server to
bind_port = 8779
@ -49,7 +45,6 @@ nova_proxy_admin_user = admin
nova_proxy_admin_pass = 3de4922d8b6ac5a1aad9
nova_proxy_admin_tenant_id =
trove_auth_url = http://0.0.0.0/identity/v2.0
os_region_name = RegionOne
nova_compute_service_type = compute
nova_service_name = Compute Service
@ -105,6 +100,7 @@ control_exchange = trove
paste_config_file=api-paste.ini.test
[mysql]
root_on_create = False
volume_support = True
device_path = /dev/vdb

View File

@ -213,13 +213,10 @@ def import_tests():
from trove.tests.api import instances_mysql_down # noqa
from trove.tests.api import instances_resize # noqa
from trove.tests.api import limits # noqa
from trove.tests.api.mgmt import accounts # noqa
from trove.tests.api.mgmt import admin_required # noqa
from trove.tests.api.mgmt import hosts # noqa
from trove.tests.api.mgmt import instances as mgmt_instances # noqa
from trove.tests.api.mgmt import instances_actions as mgmt_actions # noqa
from trove.tests.api.mgmt import malformed_json # noqa
from trove.tests.api.mgmt import storage # noqa
from trove.tests.api import replication # noqa
from trove.tests.api import root # noqa
from trove.tests.api import root_on_create # noqa

View File

@ -38,7 +38,6 @@ console_scripts =
trove-status = trove.cmd.status:main
trove.api.extensions =
account = trove.extensions.routes.account:Account
mgmt = trove.extensions.routes.mgmt:Mgmt
mysql = trove.extensions.routes.mysql:Mysql

View File

@ -100,7 +100,7 @@ class Backup(object):
try:
db_info = DBBackup.create(name=name,
description=description,
tenant_id=context.tenant,
tenant_id=context.project_id,
state=BackupState.NEW,
instance_id=instance_id,
parent_id=parent_id or
@ -124,7 +124,7 @@ class Backup(object):
}
api.API(context).create_backup(backup_info, instance_id)
return db_info
return run_with_quotas(context.tenant,
return run_with_quotas(context.project_id,
{'backups': 1},
_create_resources)
@ -188,7 +188,7 @@ class Backup(object):
filters = [DBBackup.deleted == 0]
if not all_projects:
filters.append(DBBackup.tenant_id == context.tenant)
filters.append(DBBackup.tenant_id == context.project_id)
if instance_id:
filters.append(DBBackup.instance_id == instance_id)
@ -215,7 +215,7 @@ class Backup(object):
deleted=False)
else:
query = query.filter_by(instance_id=instance_id,
tenant_id=context.tenant,
tenant_id=context.project_id,
deleted=False)
return cls._paginate(context, query)
@ -278,7 +278,7 @@ class Backup(object):
cls.verify_swift_auth_token(context)
api.API(context).delete_backup(backup_id)
return run_with_quotas(context.tenant,
return run_with_quotas(context.project_id,
{'backups': -1},
_delete_resources)
@ -288,9 +288,9 @@ class Backup(object):
client = create_swift_client(context)
client.get_account()
except ClientException:
raise exception.SwiftAuthError(tenant_id=context.tenant)
raise exception.SwiftAuthError(tenant_id=context.project_id)
except exception.NoServiceEndpoint:
raise exception.SwiftNotFound(tenant_id=context.tenant)
raise exception.SwiftNotFound(tenant_id=context.project_id)
except ConnectionError:
raise exception.SwiftConnectionError()
@ -365,4 +365,4 @@ class DBBackup(DatabaseModelBase):
if e.http_status == 404:
return False
else:
raise exception.SwiftAuthError(tenant_id=context.tenant)
raise exception.SwiftAuthError(tenant_id=context.project_id)

View File

@ -143,8 +143,10 @@ class ClusterController(wsgi.Controller):
# for all tenants.
# * As far as I can tell this is the only call which actually uses the
# passed-in 'tenant_id' for anything.
if not context.is_admin and context.tenant != tenant_id:
raise exception.TroveOperationAuthError(tenant_id=context.tenant)
if not context.is_admin and context.project_id != tenant_id:
raise exception.TroveOperationAuthError(
tenant_id=context.project_id
)
# The rule checks that the currently authenticated tenant can perform
# the 'cluster-list' action.

View File

@ -123,8 +123,13 @@ volume = {
nics = {
"type": "array",
"maxItems": 1,
"items": {
"type": "object",
"additionalProperties": False,
"properties": {
"net-id": uuid
}
}
}
@ -396,7 +401,23 @@ instance = {
"nics": nics,
"modules": module_list,
"region_name": non_empty_string,
"locality": non_empty_string
"locality": non_empty_string,
"access": {
"type": "object",
"properties": {
"is_public": {"type": "boolean"},
"allowed_cidrs": {
"type": "array",
"uniqueItems": True,
"items": {
"type": "string",
"pattern": "^([0-9]{1,3}\.){3}[0-9]{1,3}"
"(\/([0-9]|[1-2][0-9]|3[0-2]))?"
"$"
}
}
}
}
}
}
}

View File

@ -549,7 +549,8 @@ mysql_group = cfg.OptGroup(
help="Oslo option group designed for MySQL datastore")
mysql_opts = [
cfg.BoolOpt('icmp', default=False,
help='Whether to permit ICMP.'),
help='Whether to permit ICMP.',
deprecated_for_removal=True),
cfg.ListOpt('tcp_ports', default=["3306"], item_type=ListOfPortsType,
help='List of TCP ports and/or port ranges to open '
'in the security group (only applicable '
@ -633,7 +634,8 @@ percona_group = cfg.OptGroup(
help="Oslo option group designed for Percona datastore")
percona_opts = [
cfg.BoolOpt('icmp', default=False,
help='Whether to permit ICMP.'),
help='Whether to permit ICMP.',
deprecated_for_removal=True),
cfg.ListOpt('tcp_ports', default=["3306"], item_type=ListOfPortsType,
help='List of TCP ports and/or port ranges to open '
'in the security group (only applicable '
@ -721,7 +723,8 @@ pxc_group = cfg.OptGroup(
help="Oslo option group designed for Percona XtraDB Cluster datastore")
pxc_opts = [
cfg.BoolOpt('icmp', default=False,
help='Whether to permit ICMP.'),
help='Whether to permit ICMP.',
deprecated_for_removal=True),
cfg.ListOpt('tcp_ports', default=["3306", "4444", "4567", "4568"],
item_type=ListOfPortsType,
help='List of TCP ports and/or port ranges to open '
@ -815,7 +818,8 @@ redis_group = cfg.OptGroup(
help="Oslo option group designed for Redis datastore")
redis_opts = [
cfg.BoolOpt('icmp', default=False,
help='Whether to permit ICMP.'),
help='Whether to permit ICMP.',
deprecated_for_removal=True),
cfg.ListOpt('tcp_ports', default=["6379", "16379"],
item_type=ListOfPortsType,
help='List of TCP ports and/or port ranges to open '
@ -893,7 +897,8 @@ cassandra_group = cfg.OptGroup(
help="Oslo option group designed for Cassandra datastore")
cassandra_opts = [
cfg.BoolOpt('icmp', default=False,
help='Whether to permit ICMP.'),
help='Whether to permit ICMP.',
deprecated_for_removal=True),
cfg.ListOpt('tcp_ports', default=["7000", "7001", "7199", "9042", "9160"],
item_type=ListOfPortsType,
help='List of TCP ports and/or port ranges to open '
@ -996,7 +1001,8 @@ couchbase_group = cfg.OptGroup(
help="Oslo option group designed for Couchbase datastore")
couchbase_opts = [
cfg.BoolOpt('icmp', default=False,
help='Whether to permit ICMP.'),
help='Whether to permit ICMP.',
deprecated_for_removal=True),
cfg.ListOpt('tcp_ports', item_type=ListOfPortsType,
default=["8091", "8092", "4369", "11209-11211",
"21100-21199"],
@ -1060,7 +1066,8 @@ mongodb_group = cfg.OptGroup(
help="Oslo option group designed for MongoDB datastore")
mongodb_opts = [
cfg.BoolOpt('icmp', default=False,
help='Whether to permit ICMP.'),
help='Whether to permit ICMP.',
deprecated_for_removal=True),
cfg.ListOpt('tcp_ports', default=["2500", "27017", "27019"],
item_type=ListOfPortsType,
help='List of TCP ports and/or port ranges to open '
@ -1158,7 +1165,8 @@ postgresql_group = cfg.OptGroup(
help="Oslo option group for the PostgreSQL datastore.")
postgresql_opts = [
cfg.BoolOpt('icmp', default=False,
help='Whether to permit ICMP.'),
help='Whether to permit ICMP.',
deprecated_for_removal=True),
cfg.ListOpt('tcp_ports', default=["5432"], item_type=ListOfPortsType,
help='List of TCP ports and/or port ranges to open '
'in the security group (only applicable '
@ -1233,7 +1241,8 @@ couchdb_group = cfg.OptGroup(
help="Oslo option group designed for CouchDB datastore")
couchdb_opts = [
cfg.BoolOpt('icmp', default=False,
help='Whether to permit ICMP.'),
help='Whether to permit ICMP.',
deprecated_for_removal=True),
cfg.ListOpt('tcp_ports',
default=["5984"], item_type=ListOfPortsType,
help='List of TCP ports and/or port ranges to open '
@ -1295,9 +1304,10 @@ vertica_group = cfg.OptGroup(
help="Oslo option group designed for Vertica datastore")
vertica_opts = [
cfg.BoolOpt('icmp', default=False,
help='Whether to permit ICMP.'),
help='Whether to permit ICMP.',
deprecated_for_removal=True),
cfg.ListOpt('tcp_ports', item_type=ListOfPortsType,
default=["5433", "5434", "22", "5444", "5450", "4803"],
default=["5433", "5434", "5444", "5450", "4803"],
help='List of TCP ports and/or port ranges to open '
'in the security group (only applicable '
'if trove_security_groups_support is True).'),
@ -1365,7 +1375,8 @@ db2_group = cfg.OptGroup(
help="Oslo option group designed for DB2 datastore")
db2_opts = [
cfg.BoolOpt('icmp', default=False,
help='Whether to permit ICMP.'),
help='Whether to permit ICMP.',
deprecated_for_removal=True),
cfg.ListOpt('tcp_ports',
default=["50000"], item_type=ListOfPortsType,
help='List of TCP ports and/or port ranges to open '
@ -1425,7 +1436,8 @@ mariadb_group = cfg.OptGroup(
help="Oslo option group designed for MariaDB datastore")
mariadb_opts = [
cfg.BoolOpt('icmp', default=False,
help='Whether to permit ICMP.'),
help='Whether to permit ICMP.',
deprecated_for_removal=True),
cfg.ListOpt('tcp_ports', default=["3306", "4444", "4567", "4568"],
item_type=ListOfPortsType,
help='List of TCP ports and/or port ranges to open '
@ -1545,6 +1557,21 @@ rpcapi_cap_opts = [
help='Set Openstack Release compatibility for conductor services'),
]
network_group = cfg.OptGroup(
'network',
title='Networking options',
help="Options related to the trove instance networking."
)
network_opts = [
cfg.StrOpt(
'public_network_id',
default=None,
help='ID of the Neutron public network to create floating IP for the '
'public trove instance. If not given, Trove will try to query '
'all the public networks and use the first one in the list.'
)
]
CONF = cfg.CONF
CONF.register_opts(path_opts)
@ -1565,6 +1592,7 @@ CONF.register_group(couchdb_group)
CONF.register_group(vertica_group)
CONF.register_group(db2_group)
CONF.register_group(mariadb_group)
CONF.register_group(network_group)
CONF.register_opts(mysql_opts, mysql_group)
CONF.register_opts(percona_opts, percona_group)
@ -1578,6 +1606,7 @@ CONF.register_opts(couchdb_opts, couchdb_group)
CONF.register_opts(vertica_opts, vertica_group)
CONF.register_opts(db2_opts, db2_group)
CONF.register_opts(mariadb_opts, mariadb_group)
CONF.register_opts(network_opts, network_group)
CONF.register_opts(rpcapi_cap_opts, upgrade_levels)

View File

@ -405,33 +405,16 @@ class BackupUpdateError(TroveError):
message = _("Unable to update Backup table in database.")
class SecurityGroupCreationError(TroveError):
message = _("Failed to create Security Group.")
class SecurityGroupDeletionError(TroveError):
message = _("Failed to delete Security Group.")
class SecurityGroupRuleCreationError(TroveError):
message = _("Failed to create Security Group Rule.")
class SecurityGroupRuleDeletionError(TroveError):
message = _("Failed to delete Security Group Rule.")
class MalformedSecurityGroupRuleError(TroveError):
message = _("Error creating security group rules."
" Malformed port(s). Port must be an integer."
" FromPort = %(from)s greater than ToPort = %(to)s.")
class BackupNotCompleteError(TroveError):
message = _("Unable to create instance because backup %(backup_id)s is "
@ -609,6 +592,10 @@ class NetworkNotFound(TroveError):
message = _("Network Resource %(uuid)s cannot be found.")
class PublicNetworkNotFound(TroveError):
message = _("Public network cannot be found.")
class ClusterVolumeSizeRequired(TroveError):
message = _("A volume size is required for each instance in the cluster.")

View File

@ -37,7 +37,7 @@ def glance_client(context, region_name=None):
if CONF.glance_url:
endpoint_url = '%(url)s%(tenant)s' % {
'url': normalize_url(CONF.glance_url),
'tenant': context.tenant}
'tenant': context.project_id}
else:
endpoint_url = get_endpoint(
context.service_catalog, service_type=CONF.glance_service_type,

View File

@ -204,7 +204,7 @@ class RateLimitingMiddleware(wsgi.TroveMiddleware):
tenant_id = None
if context:
tenant_id = context.tenant
tenant_id = context.project_id
delay, error = self._limiter.check_for_delay(verb, url, tenant_id)

View File

@ -11,11 +11,15 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import netaddr
from oslo_log import log as logging
from trove.common import cfg
from trove.common import exception
from trove.common import remote
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
MGMT_NETWORKS = None
@ -47,3 +51,99 @@ def reset_management_networks():
global MGMT_NETWORKS
MGMT_NETWORKS = None
def create_port(client, name, description, network_id, security_groups,
is_public=False):
port_body = {
"port": {
"name": name,
"description": description,
"network_id": network_id,
"security_groups": security_groups
}
}
port = client.create_port(body=port_body)
port_id = port['port']['id']
if is_public:
public_network_id = get_public_network(client)
if not public_network_id:
raise exception.PublicNetworkNotFound()
fip_body = {
"floatingip": {
'floating_network_id': public_network_id,
'port_id': port_id,
}
}
client.create_floatingip(fip_body)
return port_id
def delete_port(client, id):
ret = client.list_floatingips(port_id=id)
if len(ret['floatingips']) > 0:
for fip in ret['floatingips']:
try:
client.delete_floatingip(fip['id'])
except Exception as e:
LOG.error(
'Failed to delete floating IP for port %s, error: %s',
id, str(e)
)
client.delete_port(id)
def get_public_network(client):
"""Get public network ID.
If not given in the config file, try to query all the public networks and
use the first one in the list.
"""
if CONF.network.public_network_id:
return CONF.network.public_network_id
kwargs = {'router:external': True}
ret = client.list_networks(**kwargs)
if len(ret.get('networks', [])) == 0:
return None
return ret['networks'][0].get('id')
def create_security_group(client, name, instance_id):
body = {
'security_group': {
'name': name,
'description': 'Security group for trove instance %s' % instance_id
}
}
ret = client.create_security_group(body=body)
return ret['security_group']['id']
def create_security_group_rule(client, sg_id, protocol, ports, remote_ips):
for remote_ip in remote_ips:
ip = netaddr.IPNetwork(remote_ip)
ethertype = 'IPv4' if ip.version == 4 else 'IPv6'
for port_or_range in set(ports):
from_, to_ = port_or_range[0], port_or_range[-1]
body = {
"security_group_rule": {
"direction": "ingress",
"ethertype": ethertype,
"protocol": protocol,
"security_group_id": sg_id,
"port_range_min": int(from_),
"port_range_max": int(to_),
"remote_ip_prefix": remote_ip
}
}
client.create_security_group_rule(body)

View File

@ -364,7 +364,7 @@ class DBaaSAPINotification(object):
'server_type': 'api',
'client_ip': request.remote_addr,
'server_ip': request.host,
'tenant_id': context.tenant,
'tenant_id': context.project_id,
})
elif 'request_id' not in kwargs:
raise TroveError(_("Notification %s must include 'request'"

View File

@ -69,7 +69,7 @@ def __authorize(context, rule, target=None):
:raises: :class:`PolicyNotAuthorized` if verification fails.
"""
target = target or {'tenant': context.tenant}
target = target or {'tenant': context.project_id}
return get_enforcer().authorize(
rule, target, context.to_dict(), do_raise=True,
exc=trove_exceptions.PolicyNotAuthorized, action=rule)

View File

@ -123,7 +123,7 @@ def cinder_client(context, region_name=None):
if CONF.cinder_url:
url = '%(cinder_url)s%(tenant)s' % {
'cinder_url': normalize_url(CONF.cinder_url),
'tenant': context.tenant}
'tenant': context.project_id}
else:
url = get_endpoint(context.service_catalog,
service_type=CONF.cinder_service_type,
@ -131,7 +131,7 @@ def cinder_client(context, region_name=None):
endpoint_type=CONF.cinder_endpoint_type)
client = CinderClient.Client(context.user, context.auth_token,
project_id=context.tenant,
project_id=context.project_id,
auth_url=CONF.trove_auth_url,
insecure=CONF.cinder_api_insecure)
client.client.auth_token = context.auth_token
@ -143,7 +143,7 @@ def swift_client(context, region_name=None):
if CONF.swift_url:
# swift_url has a different format so doesn't need to be normalized
url = '%(swift_url)s%(tenant)s' % {'swift_url': CONF.swift_url,
'tenant': context.tenant}
'tenant': context.project_id}
else:
url = get_endpoint(context.service_catalog,
service_type=CONF.swift_service_type,
@ -152,7 +152,7 @@ def swift_client(context, region_name=None):
client = Connection(preauthurl=url,
preauthtoken=context.auth_token,
tenant_name=context.tenant,
tenant_name=context.project_id,
snet=CONF.backup_use_snet,
insecure=CONF.swift_api_insecure)
return client

View File

@ -27,16 +27,21 @@ LOG = logging.getLogger(__name__)
class ServerGroup(object):
@classmethod
def load(cls, context, compute_id):
def load(cls, context, instance_id):
client = create_nova_client(context)
server_group = None
expected_name = "locality_%s" % instance_id
try:
for sg in client.server_groups.list():
if compute_id in sg.members:
if sg.name == expected_name:
server_group = sg
except Exception:
LOG.exception("Could not load server group for compute %s",
compute_id)
LOG.exception("Could not load server group for instance %s",
instance_id)
if not server_group:
LOG.info('No server group found for instance %s', instance_id)
return server_group
@classmethod
@ -58,9 +63,9 @@ class ServerGroup(object):
# it has no members
if server_group:
if force or len(server_group.members) <= 1:
LOG.info("Deleting server group %s", server_group.id)
client = create_nova_client(context)
client.server_groups.delete(server_group.id)
LOG.debug("Deleted server group %s.", server_group.id)
else:
LOG.debug("Skipping delete of server group %(id)s "
"(members: %(members)s).",

View File

@ -87,7 +87,7 @@ class CassandraCluster(models.Cluster):
# Updating Cluster Task.
db_info = models.DBCluster.create(
name=name, tenant_id=context.tenant,
name=name, tenant_id=context.project_id,
datastore_version_id=datastore_version.id,
task_status=ClusterTasks.BUILDING_INITIAL,
configuration_id=configuration)
@ -126,7 +126,7 @@ class CassandraCluster(models.Cluster):
num_new_instances = len(instances)
deltas = {'instances': num_new_instances, 'volumes': req_volume_size}
models.assert_homogeneous_cluster(instances)
check_quotas(context.tenant, deltas)
check_quotas(context.project_id, deltas)
# Checking networks are same for the cluster
models.validate_instance_nics(context, instances)

View File

@ -70,7 +70,7 @@ class GaleraCommonCluster(cluster_models.Cluster):
deltas = {'instances': num_instances, 'volumes': req_volume_size}
# quota check
check_quotas(context.tenant, deltas)
check_quotas(context.project_id, deltas)
# Checking networks are same for the cluster
cluster_models.validate_instance_nics(context, instances)
@ -122,7 +122,7 @@ class GaleraCommonCluster(cluster_models.Cluster):
datastore_version)
# Updating Cluster Task
db_info = cluster_models.DBCluster.create(
name=name, tenant_id=context.tenant,
name=name, tenant_id=context.project_id,
datastore_version_id=datastore_version.id,
task_status=ClusterTasks.BUILDING_INITIAL,
configuration_id=configuration)

View File

@ -107,7 +107,7 @@ class MongoDbCluster(models.Cluster):
all_instances, mongo_conf.volume_support)
deltas = {'instances': delta_instances, 'volumes': req_volume_size}
check_quotas(context.tenant, deltas)
check_quotas(context.project_id, deltas)
# Checking networks are same for the cluster
models.validate_instance_nics(context, instances)
@ -121,7 +121,7 @@ class MongoDbCluster(models.Cluster):
for instance in instances]
db_info = models.DBCluster.create(
name=name, tenant_id=context.tenant,
name=name, tenant_id=context.project_id,
datastore_version_id=datastore_version.id,
task_status=ClusterTasks.BUILDING_INITIAL)
@ -297,7 +297,7 @@ class MongoDbCluster(models.Cluster):
volume_size = a_member.volume_size
if volume_size:
deltas['volumes'] = volume_size * num_members_per_shard
check_quotas(self.context.tenant, deltas)
check_quotas(self.context.project_id, deltas)
new_replica_set_name = "rs" + str(num_unique_shards + 1)
new_shard_id = utils.generate_uuid()
dsv_manager = (datastore_models.DatastoreVersion.
@ -622,7 +622,7 @@ class MongoDbCluster(models.Cluster):
deltas = {'instances': len(instances),
'volumes': sum([instance['volume_size']
for instance in instances])}
check_quotas(context.tenant, deltas)
check_quotas(context.project_id, deltas)
@staticmethod
def _check_instances(context, instances, datastore_version,

View File

@ -74,7 +74,7 @@ class RedisCluster(models.Cluster):
# Check quotas
quota_request = {'instances': num_instances,
'volumes': total_volume_allocation}
check_quotas(context.tenant, quota_request)
check_quotas(context.project_id, quota_request)
# Creating member instances
return [inst_models.Instance.create(context,
@ -112,7 +112,7 @@ class RedisCluster(models.Cluster):
# Updating Cluster Task
db_info = models.DBCluster.create(
name=name, tenant_id=context.tenant,
name=name, tenant_id=context.project_id,
datastore_version_id=datastore_version.id,
task_status=ClusterTasks.BUILDING_INITIAL)

View File

@ -96,7 +96,7 @@ class VerticaCluster(models.Cluster):
deltas = {'instances': num_instances, 'volumes': req_volume_size}
check_quotas(context.tenant, deltas)
check_quotas(context.project_id, deltas)
flavor_id = instances[0]['flavor_id']
volume_size = instances[0].get('volume_size', None)
@ -149,7 +149,7 @@ class VerticaCluster(models.Cluster):
num_instances=vertica_conf.cluster_member_count)
db_info = models.DBCluster.create(
name=name, tenant_id=context.tenant,
name=name, tenant_id=context.project_id,
datastore_version_id=datastore_version.id,
task_status=ClusterTasks.BUILDING_INITIAL)

View File

@ -36,7 +36,7 @@ def trove_client(context, region_name=None):
if CONF.trove_url:
url = '%(url)s%(tenant)s' % {
'url': normalize_url(CONF.trove_url),
'tenant': context.tenant}
'tenant': context.project_id}
else:
url = get_endpoint(context.service_catalog,
service_type=CONF.trove_service_type,
@ -44,7 +44,7 @@ def trove_client(context, region_name=None):
endpoint_type=CONF.trove_endpoint_type)
client = TroveClient.Client(context.user, context.auth_token,
project_id=context.tenant,
project_id=context.project_id,
auth_url=CONF.trove_auth_url)
client.client.auth_token = context.auth_token
client.client.management_url = url

View File

@ -185,16 +185,19 @@ class MethodInspector(object):
def build_polling_task(retriever, condition=lambda value: value,
sleep_time=1, time_out=0):
"""Run a function in a loop with backoff on error.
The condition function runs based on the retriever function result.
"""
def poll_and_check():
obj = retriever()
if condition(obj):
raise loopingcall.LoopingCallDone(retvalue=obj)
return loopingcall.BackOffLoopingCall(
f=poll_and_check).start(initial_delay=False,
starting_interval=sleep_time,
max_interval=30, timeout=time_out)
call = loopingcall.BackOffLoopingCall(f=poll_and_check)
return call.start(initial_delay=False, starting_interval=sleep_time,
max_interval=30, timeout=time_out)
def wait_for_task(polling_task):

View File

@ -23,7 +23,7 @@ def create_links(resource_path, request, id):
link_info = {
'host': request.host,
'version': request.url_version,
'tenant_id': context.tenant,
'tenant_id': context.project_id,
'resource_path': resource_path,
'id': id,
}

View File

@ -48,11 +48,11 @@ class Configurations(object):
if db_info.count() == 0:
LOG.debug("No configurations found for admin user")
else:
db_info = DBConfiguration.find_all(tenant_id=context.tenant,
db_info = DBConfiguration.find_all(tenant_id=context.project_id,
deleted=False)
if db_info.count() == 0:
LOG.debug("No configurations found for tenant %s",
context.tenant)
context.project_id)
limit = utils.pagination_limit(context.limit,
Configurations.DEFAULT_LIMIT)
@ -133,7 +133,7 @@ class Configuration(object):
return DBConfiguration.find_by(id=id, deleted=False)
else:
return DBConfiguration.find_by(id=id,
tenant_id=context.tenant,
tenant_id=context.project_id,
deleted=False)
except ModelNotFoundError:
msg = _("Configuration group with ID %s could not be found.") % id

View File

@ -66,7 +66,7 @@ class ConfigurationsController(wsgi.Controller):
configuration_items = models.Configuration.load_items(context, id)
configuration.instance_count = instances_models.DBInstance.find_all(
tenant_id=context.tenant,
tenant_id=context.project_id,
configuration_id=configuration.id,
deleted=False).count()
@ -154,7 +154,7 @@ class ConfigurationsController(wsgi.Controller):
context, request=req)
with StartNotification(context, configuration_id=id):
instances = instances_models.DBInstance.find_all(
tenant_id=context.tenant,
tenant_id=context.project_id,
configuration_id=id,
deleted=False).all()
if instances:
@ -221,7 +221,7 @@ class ConfigurationsController(wsgi.Controller):
LOG.debug("Re-applying configuration group '%s' to all instances.",
configuration_id)
single_instances = instances_models.DBInstance.find_all(
tenant_id=context.tenant,
tenant_id=context.project_id,
configuration_id=configuration_id,
cluster_id=None,
deleted=False).all()

View File

@ -104,13 +104,13 @@ class DatabaseModelBase(models.ModelBase):
{"s_name": cls.__name__})
if ((context and not context.is_admin and hasattr(model, 'tenant_id')
and model.tenant_id != context.tenant)):
and model.tenant_id != context.project_id)):
log_fmt = ("Tenant %(s_tenant)s tried to access "
"%(s_name)s, owned by %(s_owner)s.")
exc_fmt = _("Tenant %(s_tenant)s tried to access "
"%(s_name)s, owned by %(s_owner)s.")
msg_content = {
"s_tenant": context.tenant,
"s_tenant": context.project_id,
"s_name": cls.__name__,
"s_owner": model.tenant_id}
LOG.error(log_fmt, msg_content)

View File

@ -1,58 +0,0 @@
# Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from trove.instance.models import DBInstance
LOG = logging.getLogger(__name__)
class Account(object):
"""Shows all trove instance ids owned by an account."""
def __init__(self, id, instance_ids):
self.id = id
self.instance_ids = instance_ids
@staticmethod
def load(context, id):
db_infos = DBInstance.find_all(tenant_id=id, deleted=False)
instance_ids = []
for db_info in db_infos:
instance_ids.append(db_info.id)
return Account(id, instance_ids)
class AccountsSummary(object):
def __init__(self, accounts):
self.accounts = accounts
@classmethod
def load(cls):
# TODO(pdmars): This should probably be changed to a more generic
# database filter query if one is added, however, this should suffice
# for now.
db_infos = DBInstance.find_all(deleted=False)
tenant_ids_for_instances = [db_info.tenant_id for db_info in db_infos]
tenant_ids = set(tenant_ids_for_instances)
LOG.debug("All tenants with instances: %s", tenant_ids)
accounts = []
for tenant_id in tenant_ids:
num_instances = tenant_ids_for_instances.count(tenant_id)
accounts.append({'id': tenant_id, 'num_instances': num_instances})
return cls(accounts)

View File

@ -1,48 +0,0 @@
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
import trove.common.apischema as apischema
from trove.common.auth import admin_context
from trove.common import wsgi
from trove.extensions.account import models
from trove.extensions.account import views
LOG = logging.getLogger(__name__)
class AccountController(wsgi.Controller):
"""Controller for account functionality."""
schemas = apischema.account
@admin_context
def show(self, req, tenant_id, id):
"""Return a account and instances associated with a single account."""
LOG.info("req : '%s'\n\n", req)
LOG.info("Showing account information for '%(account)s' "
"to '%(tenant)s'", {'account': id, 'tenant': tenant_id})
context = req.environ[wsgi.CONTEXT_KEY]
account = models.Account.load(context, id)
return wsgi.Result(views.AccountView(account).data(), 200)
@admin_context
def index(self, req, tenant_id):
"""Return a list of all accounts with non-deleted instances."""
LOG.info("req : '%s'\n\n", req)
LOG.info("Showing all accounts with instances for '%s'", tenant_id)
accounts_summary = models.AccountsSummary.load()
return wsgi.Result(views.AccountsView(accounts_summary).data(), 200)

View File

@ -1,37 +0,0 @@
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class AccountsView(object):
def __init__(self, accounts_summary):
self.accounts_summary = accounts_summary
def data(self):
return {'accounts': self.accounts_summary.accounts}
class AccountView(object):
def __init__(self, account):
self.account = account
def data(self):
return {
'account': {
'id': self.account.id,
'instance_ids': self.account.instance_ids,
}
}

View File

@ -1,60 +0,0 @@
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from trove.common import exception
from trove.common.i18n import _
from trove.common import wsgi
from trove.extensions.mgmt.host import models
LOG = logging.getLogger(__name__)
class HostInstanceController(wsgi.Controller):
"""Controller for all instances on specific hosts."""
def action(self, req, body, tenant_id, host_id):
LOG.info("Committing an ACTION against host %(host_id)s for "
"tenant '%(tenant_id)s'\n"
"req : '%(req)s'\n\n", {"req": req, "host_id": host_id,
"tenant_id": tenant_id})
if not body:
raise exception.BadRequest(_("Invalid request body."))
context = req.environ[wsgi.CONTEXT_KEY]
host = models.DetailedHost.load(context, host_id)
_actions = {'update': self._action_update}
selected_action = None
for key in body:
if key in _actions:
if selected_action is not None:
msg = _("Only one action can be specified per request.")
raise exception.BadRequest(msg)
selected_action = _actions[key]
else:
msg = _("Invalid host action: %s") % key
raise exception.BadRequest(msg)
if selected_action:
return selected_action(context, host, body)
else:
raise exception.BadRequest(_("Invalid request body."))
def _action_update(self, context, host, body):
LOG.debug("Updating all instances for host: %s", host.name)
host.update_all(context)
return wsgi.Result(None, 202)

View File

@ -1,102 +0,0 @@
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Model classes that extend the instances functionality for MySQL instances.
"""
from novaclient import exceptions as nova_exceptions
from oslo_log import log as logging
from trove.common import exception
from trove.common.i18n import _
from trove.common.remote import create_guest_client
from trove.common.remote import create_nova_client
from trove.instance.models import DBInstance
from trove.instance.models import InstanceServiceStatus
from trove.instance.models import SimpleInstance
LOG = logging.getLogger(__name__)
class SimpleHost(object):
def __init__(self, name, instance_count):
self.name = name
self.instance_count = instance_count
@staticmethod
def load_all(context):
client = create_nova_client(context)
LOG.debug("Client.rdhosts=" + str(client.rdhosts))
rdhosts = client.rdhosts.list()
LOG.debug("RDHOSTS=" + str(rdhosts))
for rdhost in rdhosts:
LOG.debug("rdhost=" + str(rdhost))
return [SimpleHost(rdhost.name, rdhost.instanceCount)
for rdhost in rdhosts]
class DetailedHost(object):
def __init__(self, host_info):
self.name = host_info.name
self.percent_used = host_info.percentUsed
self.total_ram = host_info.totalRAM
self.used_ram = host_info.usedRAM
self.instances = host_info.instances
for instance in self.instances:
instance['server_id'] = instance['uuid']
del instance['uuid']
try:
db_info = DBInstance.find_by(
compute_instance_id=instance['server_id'])
instance['id'] = db_info.id
instance['tenant_id'] = db_info.tenant_id
status = InstanceServiceStatus.find_by(
instance_id=db_info.id)
instance_info = SimpleInstance(None, db_info, status)
instance['status'] = instance_info.status
except exception.TroveError as re:
LOG.error(re)
LOG.error("Compute Instance ID found with no associated RD "
"instance: %s.", instance['server_id'])
instance['id'] = None
def update_all(self, context):
num_i = len(self.instances)
LOG.debug("Host %(name)s has %(num)s instances to update.",
{'name': self.name, 'num': num_i})
failed_instances = []
for instance in self.instances:
client = create_guest_client(context, instance['id'])
try:
client.update_guest()
except exception.TroveError as re:
LOG.error(re)
LOG.error("Unable to update instance: %s.", instance['id'])
failed_instances.append(instance['id'])
if len(failed_instances) > 0:
msg = _("Failed to update instances: %s.") % failed_instances
raise exception.UpdateGuestError(msg)
@staticmethod
def load(context, name):
client = create_nova_client(context)
try:
return DetailedHost(client.rdhosts.get(name))
except nova_exceptions.NotFound:
raise exception.NotFound(uuid=name)

View File

@ -1,47 +0,0 @@
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from trove.common.auth import admin_context
from trove.common import wsgi
from trove.extensions.mgmt.host import models
from trove.extensions.mgmt.host import views
from trove.instance.service import InstanceController
LOG = logging.getLogger(__name__)
class HostController(InstanceController):
"""Controller for instance functionality."""
@admin_context
def index(self, req, tenant_id, detailed=False):
"""Return all hosts."""
LOG.info("req : '%s'\n\n", req)
LOG.info("Indexing a host for tenant '%s'", tenant_id)
context = req.environ[wsgi.CONTEXT_KEY]
hosts = models.SimpleHost.load_all(context)
return wsgi.Result(views.HostsView(hosts).data(), 200)
@admin_context
def show(self, req, tenant_id, id):
"""Return a single host."""
LOG.info("req : '%s'\n\n", req)
LOG.info("Showing a host for tenant '%s'", tenant_id)
LOG.info("id : '%s'\n\n", id)
context = req.environ[wsgi.CONTEXT_KEY]
host = models.DetailedHost.load(context, id)
return wsgi.Result(views.HostDetailedView(host).data(), 200)

View File

@ -1,51 +0,0 @@
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class HostView(object):
def __init__(self, host):
self.host = host
def data(self):
return {
'instanceCount': self.host.instance_count,
'name': self.host.name
}
class HostDetailedView(object):
def __init__(self, host):
self.host = host
def data(self):
return {'host': {
'instances': self.host.instances,
'name': self.host.name,
'percentUsed': self.host.percent_used,
'totalRAM': self.host.total_ram,
'usedRAM': self.host.used_ram
}}
class HostsView(object):
def __init__(self, hosts):
self.hosts = hosts
def data(self):
data = [HostView(host).data() for host in self.hosts]
return {'hosts': data}

View File

@ -1,50 +0,0 @@
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Model classes that extend the instances functionality for volumes.
"""
from oslo_log import log as logging
from trove.common.remote import create_cinder_client
LOG = logging.getLogger(__name__)
class StorageDevice(object):
def __init__(self, storage_info):
self.name = storage_info.name
self.type = storage_info.type
self.total_space = storage_info.capacity['total']
self.total_avail = storage_info.capacity['available']
self.prov_total = storage_info.provision['total']
self.prov_avail = storage_info.provision['available']
self.prov_percent = storage_info.provision['percent']
self.used = storage_info.used
class StorageDevices(object):
@staticmethod
def load(context, region_name):
client = create_cinder_client(context, region_name)
rdstorages = client.rdstorage.list()
for rdstorage in rdstorages:
LOG.debug("rdstorage=" + str(rdstorage))
return [StorageDevice(storage_info)
for storage_info in rdstorages]

View File

@ -1,39 +0,0 @@
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from trove.common.auth import admin_context
from trove.common import cfg
from trove.common import wsgi
from trove.extensions.mgmt.volume import models
from trove.extensions.mgmt.volume import views
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class StorageController(wsgi.Controller):
"""Controller for storage device functionality."""
@admin_context
def index(self, req, tenant_id):
"""Return all storage devices."""
LOG.info("req : '%s'\n\n", req)
LOG.info("Indexing storage info for tenant '%s'", tenant_id)
context = req.environ[wsgi.CONTEXT_KEY]
storages = models.StorageDevices.load(context, CONF.os_region_name)
return wsgi.Result(views.StoragesView(storages).data(), 200)

View File

@ -1,40 +0,0 @@
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class StorageView(object):
def __init__(self, storage):
self.storage = storage
def data(self):
return {'name': self.storage.name,
'type': self.storage.type,
'capacity': {'total': self.storage.total_space,
'available': self.storage.total_avail},
'provision': {'total': self.storage.prov_total,
'available': self.storage.prov_avail,
'percent': self.storage.prov_percent},
'used': self.storage.used}
class StoragesView(object):
def __init__(self, storages):
self.storages = storages
def data(self):
data = [StorageView(storage).data() for storage in self.storages]
return {'devices': data}

View File

@ -1,44 +0,0 @@
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from trove.common import extensions
from trove.extensions.account import service
class Account(extensions.ExtensionDescriptor):
def get_name(self):
return "Account"
def get_description(self):
return "Account information with instances"
def get_alias(self):
return "Account"
def get_namespace(self):
return "http://TBD"
def get_updated(self):
return "2012-06-07T13:25:27-06:00"
def get_resources(self):
resources = []
resource = extensions.ResourceExtension(
'{tenant_id}/mgmt/accounts',
service.AccountController())
resources.append(resource)
return resources

View File

@ -17,12 +17,9 @@ from trove.common import extensions
from trove.extensions.mgmt.clusters.service import MgmtClusterController
from trove.extensions.mgmt.configuration import service as conf_service
from trove.extensions.mgmt.datastores.service import DatastoreVersionController
from trove.extensions.mgmt.host.instance import service as hostservice
from trove.extensions.mgmt.host.service import HostController
from trove.extensions.mgmt.instances.service import MgmtInstanceController
from trove.extensions.mgmt.quota.service import QuotaController
from trove.extensions.mgmt.upgrade.service import UpgradeController
from trove.extensions.mgmt.volume.service import StorageController
class Mgmt(extensions.ExtensionDescriptor):
@ -61,32 +58,12 @@ class Mgmt(extensions.ExtensionDescriptor):
member_actions={'action': 'POST'})
resources.append(clusters)
hosts = extensions.ResourceExtension(
'{tenant_id}/mgmt/hosts',
HostController(),
member_actions={})
resources.append(hosts)
quota = extensions.ResourceExtension(
'{tenant_id}/mgmt/quotas',
QuotaController(),
member_actions={})
resources.append(quota)
storage = extensions.ResourceExtension(
'{tenant_id}/mgmt/storage',
StorageController(),
member_actions={})
resources.append(storage)
host_instances = extensions.ResourceExtension(
'instances',
hostservice.HostInstanceController(),
parent={'member_name': 'host',
'collection_name': '{tenant_id}/mgmt/hosts'},
collection_actions={'action': 'POST'})
resources.append(host_instances)
upgrade = extensions.ResourceExtension(
'{tenant_id}/mgmt/instances/{instance_id}/upgrade',
UpgradeController(),

View File

@ -46,47 +46,8 @@ class SecurityGroup(DatabaseModelBase):
@property
def instance_id(self):
return SecurityGroupInstanceAssociation\
.get_instance_id_by_security_group_id(self.id)
@classmethod
def create_sec_group(cls, name, description, context, region_name):
try:
remote_sec_group = RemoteSecurityGroup.create(
name, description, context, region_name)
if not remote_sec_group:
raise exception.SecurityGroupCreationError(
_("Failed to create Security Group."))
else:
return cls.create(
id=remote_sec_group.data()['id'],
name=name,
description=description,
user=context.user,
tenant_id=context.tenant)
except exception.SecurityGroupCreationError:
LOG.exception("Failed to create remote security group.")
raise
@classmethod
def create_for_instance(cls, instance_id, context, region_name):
# Create a new security group
name = "%s_%s" % (CONF.trove_security_group_name_prefix, instance_id)
description = _("Security Group for %s") % instance_id
sec_group = cls.create_sec_group(name, description, context,
region_name)
# Currently this locked down by default, since we don't create any
# default security group rules for the security group.
# Create security group instance association
SecurityGroupInstanceAssociation.create(
security_group_id=sec_group["id"],
instance_id=instance_id)
return sec_group
return SecurityGroupInstanceAssociation.\
get_instance_id_by_security_group_id(self.id)
@classmethod
def get_security_group_by_id_or_instance_id(cls, id, tenant_id):
@ -127,11 +88,8 @@ class SecurityGroup(DatabaseModelBase):
if sec_group:
sec_group.delete(context, region_name)
association.delete()
except (exception.ModelNotFoundError,
exception.TroveError):
LOG.info('Security Group with id: %(id)s '
'already had been deleted',
{'id': instance_id})
except (exception.ModelNotFoundError, exception.TroveError):
pass
class SecurityGroupRule(DatabaseModelBase):
@ -140,36 +98,6 @@ class SecurityGroupRule(DatabaseModelBase):
'updated', 'deleted', 'deleted_at']
_table_name = 'security_group_rules'
@classmethod
def create_sec_group_rule(cls, sec_group, protocol, from_port,
to_port, cidr, context, region_name):
try:
remote_rule_id = RemoteSecurityGroup.add_rule(
sec_group_id=sec_group['id'],
protocol=protocol,
from_port=from_port,
to_port=to_port,
cidr=cidr,
context=context,
region_name=region_name)
if not remote_rule_id:
raise exception.SecurityGroupRuleCreationError(
"Failed to create Remote Security Group Rule")
else:
# Create db record
return cls.create(
id=remote_rule_id,
protocol=protocol,
from_port=from_port,
to_port=to_port,
cidr=cidr,
group_id=sec_group['id'])
except exception.SecurityGroupRuleCreationError:
LOG.exception("Failed to create remote security group rule.")
raise
def get_security_group(self, tenant_id):
return SecurityGroup.find_by(id=self.group_id,
tenant_id=tenant_id,
@ -226,30 +154,12 @@ class RemoteSecurityGroup(NetworkRemoteModelBase):
else:
self._data_object = security_group
@classmethod
def create(cls, name, description, context, region_name):
"""Creates a new Security Group."""
driver = cls.get_driver(context, region_name)
sec_group = driver.create_security_group(
name=name, description=description)
return RemoteSecurityGroup(security_group=sec_group)
@classmethod
def delete(cls, sec_group_id, context, region_name):
"""Deletes a Security Group."""
driver = cls.get_driver(context, region_name)
driver.delete_security_group(sec_group_id)
@classmethod
def add_rule(cls, sec_group_id, protocol, from_port,
to_port, cidr, context, region_name):
"""Adds a new rule to an existing security group."""
driver = cls.get_driver(context, region_name)
sec_group_rule = driver.add_security_group_rule(
sec_group_id, protocol, from_port, to_port, cidr)
return sec_group_rule.id
@classmethod
def delete_rule(cls, sec_group_rule_id, context, region_name):
"""Deletes a rule from an existing security group."""

View File

@ -292,7 +292,7 @@ class DebianPackagerMixin(BasePackagerMixin):
if selections:
with NamedTemporaryFile(delete=False) as f:
fname = f.name
f.write(selections)
f.write(encodeutils.safe_encode(selections))
try:
utils.execute("debconf-set-selections", fname,
run_as_root=True, root_helper="sudo")

View File

@ -565,7 +565,7 @@ def load_instance_with_info(cls, context, id, cluster_id=None):
{'instance_id': id, 'service_status': service_status.status})
instance = cls(context, db_info, service_status)
load_guest_info(instance, context, id)
load_server_group_info(instance, context, db_info.compute_instance_id)
load_server_group_info(instance, context)
return instance
@ -581,8 +581,9 @@ def load_guest_info(instance, context, id):
return instance
def load_server_group_info(instance, context, compute_id):
server_group = srv_grp.ServerGroup.load(context, compute_id)
def load_server_group_info(instance, context):
instance_id = instance.slave_of_id if instance.slave_of_id else instance.id
server_group = srv_grp.ServerGroup.load(context, instance_id)
if server_group:
instance.locality = srv_grp.ServerGroup.get_locality(server_group)
@ -675,8 +676,9 @@ class BaseInstance(SimpleInstance):
task_status=InstanceTasks.NONE)
self.set_servicestatus_deleted()
self.set_instance_fault_deleted()
# Delete associated security group
if CONF.trove_security_groups_support:
# Delete associated security group for backward compatibility
SecurityGroup.delete_for_instance(self.db_info.id, self.context,
self.db_info.region_id)
@ -736,8 +738,8 @@ class BaseInstance(SimpleInstance):
def server_group(self):
# The server group could be empty, so we need a flag to cache it
if not self._server_group_loaded:
self._server_group = srv_grp.ServerGroup.load(
self.context, self.db_info.compute_instance_id)
self._server_group = srv_grp.ServerGroup.load(self.context,
self.id)
self._server_group_loaded = True
return self._server_group
@ -868,7 +870,7 @@ class Instance(BuiltInstance):
availability_zone=None, nics=None,
configuration_id=None, slave_of_id=None, cluster_config=None,
replica_count=None, volume_type=None, modules=None,
locality=None, region_name=None):
locality=None, region_name=None, access=None):
region_name = region_name or CONF.os_region_name
@ -1052,7 +1054,8 @@ class Instance(BuiltInstance):
root_password = None
for instance_index in range(0, instance_count):
db_info = DBInstance.create(
name=name, flavor_id=flavor_id, tenant_id=context.tenant,
name=name, flavor_id=flavor_id,
tenant_id=context.project_id,
volume_size=volume_size,
datastore_version_id=datastore_version.id,
task_status=InstanceTasks.BUILDING,
@ -1062,7 +1065,7 @@ class Instance(BuiltInstance):
region_id=region_name)
LOG.debug("Tenant %(tenant)s created new Trove instance "
"%(db)s in region %(region)s.",
{'tenant': context.tenant, 'db': db_info.id,
{'tenant': context.project_id, 'db': db_info.id,
'region': region_name})
instance_id = db_info.id
@ -1109,13 +1112,14 @@ class Instance(BuiltInstance):
volume_size, backup_id, availability_zone, root_password,
nics, overrides, slave_of_id, cluster_config,
volume_type=volume_type, modules=module_list,
locality=locality)
locality=locality, access=access)
return SimpleInstance(context, db_info, service_status,
root_password, locality=locality)
with StartNotification(context, **call_args):
return run_with_quotas(context.tenant, deltas, _create_resources)
return run_with_quotas(context.project_id, deltas,
_create_resources)
@classmethod
def add_instance_modules(cls, context, instance_id, modules):
@ -1507,7 +1511,7 @@ class Instances(object):
raise TypeError(_("Argument context not defined."))
client = create_nova_client(context)
servers = client.servers.list()
query_opts = {'tenant_id': context.tenant,
query_opts = {'tenant_id': context.project_id,
'deleted': False}
if not include_clustered:
query_opts['cluster_id'] = None
@ -1731,7 +1735,7 @@ def module_instance_count(context, module_id, include_clustered=False):
if not include_clustered:
filters.append(DBInstance.cluster_id.is_(None))
if not context.is_admin:
filters.append(DBInstance.tenant_id == context.tenant)
filters.append(DBInstance.tenant_id == context.project_id)
query = query.group_by(module_models.DBInstanceModule.md5)
query = query.add_columns(*columns)
query = query.filter(*filters)

View File

@ -249,8 +249,9 @@ class InstanceController(wsgi.Controller):
server = models.load_instance_with_info(models.DetailInstance,
context, id)
self.authorize_instance_action(context, 'show', server)
return wsgi.Result(views.InstanceDetailView(server,
req=req).data(), 200)
return wsgi.Result(
views.InstanceDetailView(server, req=req).data(), 200
)
def delete(self, req, tenant_id, id):
"""Delete a single instance."""
@ -340,7 +341,7 @@ class InstanceController(wsgi.Controller):
backup_id = None
availability_zone = body['instance'].get('availability_zone')
nics = body['instance'].get('nics')
nics = body['instance'].get('nics', [])
slave_of_id = body['instance'].get('replica_of',
# also check for older name
@ -360,7 +361,9 @@ class InstanceController(wsgi.Controller):
'Cannot specify locality when adding replicas to existing '
'master.')
raise exception.BadRequest(message=dupe_locality_msg)
region_name = body['instance'].get('region_name', CONF.os_region_name)
access = body['instance'].get('access', None)
instance = models.Instance.create(context, name, flavor_id,
image_id, databases, users,
@ -372,7 +375,8 @@ class InstanceController(wsgi.Controller):
volume_type=volume_type,
modules=modules,
locality=locality,
region_name=region_name)
region_name=region_name,
access=access)
view = views.InstanceDetailView(instance, req=req)
return wsgi.Result(view.data(), 200)

View File

@ -119,7 +119,7 @@ class InstanceTasks(object):
'guestagent timeout.',
is_error=True)
BUILDING_ERROR_PORT = InstanceTask(0x5c, 'BUILDING',
'Build error: Management port.',
'Build error: Port.',
is_error=True)
# Dissuade further additions at run-time.

View File

@ -65,10 +65,12 @@ class Modules(object):
# plus the 'all' tenant ones
query_opts['visible'] = True
db_info = DBModule.query().filter_by(**query_opts)
db_info = db_info.filter(or_(DBModule.tenant_id == context.tenant,
DBModule.tenant_id.is_(None)))
db_info = db_info.filter(
or_(DBModule.tenant_id == context.project_id,
DBModule.tenant_id.is_(None))
)
if db_info.count() == 0:
LOG.debug("No modules found for tenant %s", context.tenant)
LOG.debug("No modules found for tenant %s", context.project_id)
modules = db_info.all()
return modules
@ -83,12 +85,12 @@ class Modules(object):
query_opts = {'deleted': False,
'auto_apply': True}
db_info = DBModule.query().filter_by(**query_opts)
db_info = Modules.add_tenant_filter(db_info, context.tenant)
db_info = Modules.add_tenant_filter(db_info, context.project_id)
db_info = Modules.add_datastore_filter(db_info, datastore_id)
db_info = Modules.add_ds_version_filter(db_info, datastore_version_id)
if db_info.count() == 0:
LOG.debug("No auto-apply modules found for tenant %s",
context.tenant)
context.project_id)
modules = db_info.all()
return modules
@ -123,7 +125,8 @@ class Modules(object):
query_opts = {'deleted': False}
db_info = DBModule.query().filter_by(**query_opts)
if not context.is_admin:
db_info = Modules.add_tenant_filter(db_info, context.tenant)
db_info = Modules.add_tenant_filter(db_info,
context.project_id)
db_info = db_info.filter(DBModule.id.in_(module_ids))
modules = db_info.all()
return modules
@ -285,7 +288,7 @@ class Module(object):
module = DBModule.find_by(id=module_id, deleted=False)
else:
module = DBModule.find_by(
id=module_id, tenant_id=context.tenant, visible=True,
id=module_id, tenant_id=context.project_id, visible=True,
deleted=False)
except exception.ModelNotFoundError:
# See if we have the module in the 'all' tenant section

View File

@ -28,24 +28,10 @@ class NetworkDriver(object):
Returns security group with given group_id
"""
@abc.abstractmethod
def create_security_group(self, name, description):
"""
Creates the security group with given name and description
"""
@abc.abstractmethod
def delete_security_group(self, sec_group_id):
"""Deletes the security group by given ID."""
@abc.abstractmethod
def add_security_group_rule(self, sec_group_id, protocol,
from_port, to_port, cidr):
"""
Adds the rule identified by the security group ID,
transport protocol, port range: from -> to, CIDR.
"""
@abc.abstractmethod
def delete_security_group_rule(self, sec_group_rule_id):
"""Deletes the rule by given ID."""

View File

@ -12,8 +12,6 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from neutronclient.common import exceptions as neutron_exceptions
from oslo_log import log as logging
@ -21,23 +19,8 @@ from trove.common import exception
from trove.common import remote
from trove.network import base
LOG = logging.getLogger(__name__)
CONST = {'IPv4': "IPv4",
'IPv6': "IPv6",
'INGRESS': "ingress",
'EGRESS': "egress",
'PROTO_NAME_TCP': 'tcp',
'PROTO_NAME_ICMP': 'icmp',
'PROTO_NAME_ICMP_V6': 'icmpv6',
'PROTO_NAME_UDP': 'udp'}
class NovaNetworkStruct(object):
def __init__(self, **properties):
self.__dict__.update(properties)
class NeutronDriver(base.NetworkDriver):
@ -54,18 +37,6 @@ class NeutronDriver(base.NetworkDriver):
LOG.exception('Failed to get remote security group')
raise exception.TroveError(str(e))
def create_security_group(self, name, description):
try:
sec_group_body = {"security_group": {"name": name,
"description": description}}
sec_group = self.client.create_security_group(body=sec_group_body)
return self._convert_to_nova_security_group_format(
sec_group.get('security_group', sec_group))
except neutron_exceptions.NeutronClientException as e:
LOG.exception('Failed to create remote security group')
raise exception.SecurityGroupCreationError(str(e))
def delete_security_group(self, sec_group_id):
try:
self.client.delete_security_group(security_group=sec_group_id)
@ -73,34 +44,6 @@ class NeutronDriver(base.NetworkDriver):
LOG.exception('Failed to delete remote security group')
raise exception.SecurityGroupDeletionError(str(e))
def add_security_group_rule(self, sec_group_id, protocol,
from_port, to_port, cidr,
direction=CONST['INGRESS'],
ethertype=CONST['IPv4']):
try:
secgroup_rule_body = {"security_group_rule":
{"security_group_id": sec_group_id,
"protocol": protocol,
"port_range_min": from_port,
"port_range_max": to_port,
"remote_ip_prefix": cidr,
"direction": direction, # ingress | egress
"ethertype": ethertype, # IPv4 | IPv6
}}
secgroup_rule = self.client.create_security_group_rule(
secgroup_rule_body)
return self._convert_to_nova_security_group_rule_format(
secgroup_rule.get('security_group_rule', secgroup_rule))
except neutron_exceptions.NeutronClientException as e:
# ignore error if rule already exists
if e.status_code == 409:
LOG.exception("Security group rule already exists")
else:
LOG.exception('Failed to add rule to remote security '
'group')
raise exception.SecurityGroupRuleCreationError(str(e))
def delete_security_group_rule(self, sec_group_rule_id):
try:
self.client.delete_security_group_rule(
@ -109,37 +52,3 @@ class NeutronDriver(base.NetworkDriver):
except neutron_exceptions.NeutronClientException as e:
LOG.exception('Failed to delete rule to remote security group')
raise exception.SecurityGroupRuleDeletionError(str(e))
def _convert_to_nova_security_group_format(self, security_group):
nova_group = {}
nova_group['id'] = security_group['id']
nova_group['description'] = security_group['description']
nova_group['name'] = security_group['name']
nova_group['project_id'] = security_group['tenant_id']
nova_group['rules'] = []
for rule in security_group.get('security_group_rules', []):
if rule['direction'] == 'ingress':
nova_group['rules'].append(
self._convert_to_nova_security_group_rule_format(rule))
return NovaNetworkStruct(**nova_group)
def _convert_to_nova_security_group_rule_format(self, rule):
nova_rule = {}
nova_rule['id'] = rule['id']
nova_rule['parent_group_id'] = rule['security_group_id']
nova_rule['protocol'] = rule['protocol']
if (nova_rule['protocol'] and rule.get('port_range_min') is None and
rule.get('port_range_max') is None):
if rule['protocol'].upper() in ['TCP', 'UDP']:
nova_rule['from_port'] = 1
nova_rule['to_port'] = 65535
else:
nova_rule['from_port'] = -1
nova_rule['to_port'] = -1
else:
nova_rule['from_port'] = rule.get('port_range_min')
nova_rule['to_port'] = rule.get('port_range_max')
nova_rule['group_id'] = rule['remote_group_id']
nova_rule['cidr'] = rule.get('remote_ip_prefix')
return NovaNetworkStruct(**nova_rule)

View File

@ -21,7 +21,6 @@ from trove.common import exception
from trove.common import remote
from trove.network import base
LOG = logging.getLogger(__name__)
@ -41,15 +40,6 @@ class NovaNetwork(base.NetworkDriver):
LOG.exception('Failed to get remote security group')
raise exception.TroveError(str(e))
def create_security_group(self, name, description):
try:
sec_group = self.client.security_groups.create(
name=name, description=description)
return sec_group
except nova_exceptions.ClientException as e:
LOG.exception('Failed to create remote security group')
raise exception.SecurityGroupCreationError(str(e))
def delete_security_group(self, sec_group_id):
try:
self.client.security_groups.delete(sec_group_id)
@ -57,21 +47,6 @@ class NovaNetwork(base.NetworkDriver):
LOG.exception('Failed to delete remote security group')
raise exception.SecurityGroupDeletionError(str(e))
def add_security_group_rule(self, sec_group_id, protocol,
from_port, to_port, cidr):
try:
sec_group_rule = self.client.security_group_rules.create(
parent_group_id=sec_group_id,
ip_protocol=protocol,
from_port=from_port,
to_port=to_port,
cidr=cidr)
return sec_group_rule
except nova_exceptions.ClientException as e:
LOG.exception('Failed to add rule to remote security group')
raise exception.SecurityGroupRuleCreationError(str(e))
def delete_security_group_rule(self, sec_group_rule_id):
try:
self.client.security_group_rules.delete(sec_group_rule_id)

View File

@ -193,7 +193,7 @@ class API(object):
availability_zone=None, root_password=None,
nics=None, overrides=None, slave_of_id=None,
cluster_config=None, volume_type=None,
modules=None, locality=None):
modules=None, locality=None, access=None):
LOG.debug("Making async call to create instance %s ", instance_id)
version = self.API_BASE_VERSION
@ -214,7 +214,7 @@ class API(object):
slave_of_id=slave_of_id,
cluster_config=cluster_config,
volume_type=volume_type,
modules=modules, locality=locality)
modules=modules, locality=locality, access=access)
def create_cluster(self, cluster_id):
LOG.debug("Making async call to create cluster %s ", cluster_id)

View File

@ -330,7 +330,8 @@ class Manager(periodic_task.PeriodicTasks):
master_instance_tasks = BuiltInstanceTasks.load(context, slave_of_id)
server_group = master_instance_tasks.server_group
scheduler_hints = srv_grp.ServerGroup.convert_to_hint(server_group)
LOG.debug("Using scheduler hints for locality: %s", scheduler_hints)
LOG.info("Using scheduler hints %s for creating instance %s",
scheduler_hints, instance_id)
try:
for replica_index in range(0, len(ids)):
@ -371,7 +372,8 @@ class Manager(periodic_task.PeriodicTasks):
image_id, databases, users, datastore_manager,
packages, volume_size, backup_id, availability_zone,
root_password, nics, overrides, slave_of_id,
cluster_config, volume_type, modules, locality):
cluster_config, volume_type, modules, locality,
access=None):
if slave_of_id:
self._create_replication_slave(context, instance_id, name,
flavor, image_id, databases, users,
@ -384,17 +386,24 @@ class Manager(periodic_task.PeriodicTasks):
if type(instance_id) in [list]:
raise AttributeError(_(
"Cannot create multiple non-replica instances."))
instance_tasks = FreshInstanceTasks.load(context, instance_id)
scheduler_hints = srv_grp.ServerGroup.build_scheduler_hint(
context, locality, instance_id)
instance_tasks.create_instance(flavor, image_id, databases, users,
datastore_manager, packages,
volume_size, backup_id,
availability_zone, root_password,
nics, overrides, cluster_config,
None, volume_type, modules,
scheduler_hints)
context, locality, instance_id
)
LOG.info("Using scheduler hints %s for creating instance %s",
scheduler_hints, instance_id)
instance_tasks = FreshInstanceTasks.load(context, instance_id)
instance_tasks.create_instance(
flavor, image_id, databases, users,
datastore_manager, packages,
volume_size, backup_id,
availability_zone, root_password,
nics, overrides, cluster_config,
None, volume_type, modules,
scheduler_hints, access=access
)
timeout = (CONF.restore_usage_timeout if backup_id
else CONF.usage_timeout)
instance_tasks.wait_for_instance(timeout, flavor)
@ -403,7 +412,8 @@ class Manager(periodic_task.PeriodicTasks):
image_id, databases, users, datastore_manager,
packages, volume_size, backup_id, availability_zone,
root_password, nics, overrides, slave_of_id,
cluster_config, volume_type, modules, locality):
cluster_config, volume_type, modules, locality,
access=None):
with EndNotification(context,
instance_id=(instance_id[0]
if isinstance(instance_id, list)
@ -414,7 +424,7 @@ class Manager(periodic_task.PeriodicTasks):
backup_id, availability_zone,
root_password, nics, overrides, slave_of_id,
cluster_config, volume_type, modules,
locality)
locality, access=access)
def upgrade(self, context, instance_id, datastore_version_id):
instance_tasks = models.BuiltInstanceTasks.load(context, instance_id)

View File

@ -12,6 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import copy
import os.path
import time
import traceback
@ -39,13 +40,13 @@ from trove.common.exception import BackupCreationError
from trove.common.exception import GuestError
from trove.common.exception import GuestTimeout
from trove.common.exception import InvalidModelError
from trove.common.exception import MalformedSecurityGroupRuleError
from trove.common.exception import PollTimeOut
from trove.common.exception import TroveError
from trove.common.exception import VolumeCreationFailure
from trove.common.i18n import _
from trove.common import instance as rd_instance
from trove.common.instance import ServiceStatuses
from trove.common import neutron
from trove.common.notification import (
DBaaSInstanceRestart,
DBaaSInstanceUpgrade,
@ -59,7 +60,6 @@ import trove.common.remote as remote
from trove.common.remote import create_cinder_client
from trove.common.remote import create_dns_client
from trove.common.remote import create_guest_client
from trove.common.remote import create_neutron_client
from trove.common import server_group as srv_grp
from trove.common.strategies.cluster import strategy
from trove.common import template
@ -67,8 +67,6 @@ from trove.common import timeutils
from trove.common import utils
from trove.common.utils import try_recover
from trove.extensions.mysql import models as mysql_models
from trove.extensions.security_group.models import SecurityGroup
from trove.extensions.security_group.models import SecurityGroupRule
from trove.instance import models as inst_models
from trove.instance.models import BuiltInstance
from trove.instance.models import DBInstance
@ -425,15 +423,12 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
# If volume has "available" status, delete it manually.
try:
if self.volume_id:
volume_client = create_cinder_client(self.context)
volume = volume_client.volumes.get(self.volume_id)
volume = self.volume_client.volumes.get(self.volume_id)
if volume.status == "available":
LOG.info("Deleting volume %(v)s for instance: %(i)s.",
{'v': self.volume_id, 'i': self.id})
volume.delete()
except Exception:
LOG.exception("Error deleting volume of instance %(id)s.",
{'id': self.db_info.id})
except Exception as e:
LOG.warning("Failed to delete volume for instance %s, error: %s",
self.id, six.text_type(e))
LOG.debug("End _delete_resource for instance %s", self.id)
@ -466,83 +461,116 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
self.id, error_message, error_details,
skip_delta=CONF.usage_sleep_time + 1)
def _create_management_port(self, network, default_sgs=[]):
"""Create port in the management network."""
security_groups = default_sgs
if len(CONF.management_security_groups) > 0:
security_groups = CONF.management_security_groups
def _create_port(self, network, security_groups, is_mgmt=False,
is_public=False):
name = 'trove-%s' % self.id
type = 'Management' if is_mgmt else 'User'
description = '%s port for trove instance %s' % (type, self.id)
try:
neutron_client = create_neutron_client(self.context)
body = {
'port': {
'name': 'trove-%s' % self.id,
'description': ('Management port for Trove instance %s'
% self.id),
'network_id': network,
'admin_state_up': True,
'security_groups': security_groups
}
}
port = neutron_client.create_port(body)
return port['port']['id']
port_id = neutron.create_port(
self.neutron_client, name,
description, network,
security_groups,
is_public=is_public
)
except Exception:
error = "Failed to create management port."
error = ("Failed to create %s port for instance %s"
% (type, self.id))
LOG.exception(error)
self.update_db(
task_status=inst_models.InstanceTasks.BUILDING_ERROR_PORT
)
raise TroveError(message=error)
return port_id
def _prepare_networks_for_instance(self, datastore_manager, nics,
access=None):
"""Prepare the networks for the trove instance.
the params are all passed from trove-taskmanager.
Exception is raised if any error happens.
"""
LOG.info("Preparing networks for the instance %s", self.id)
security_group = None
networks = copy.deepcopy(nics)
access = access or {}
if CONF.trove_security_groups_support:
security_group = self._create_secgroup(
datastore_manager,
access.get('allowed_cidrs', [])
)
LOG.info(
"Security group %s created for instance %s",
security_group, self.id
)
# Create management port
if CONF.management_networks:
port_sgs = [security_group] if security_group else []
if len(CONF.management_security_groups) > 0:
port_sgs = CONF.management_security_groups
# The management network is always the last one
networks.pop(-1)
port_id = self._create_port(
CONF.management_networks[-1],
port_sgs,
is_mgmt=True
)
LOG.info("Management port %s created for instance: %s", port_id,
self.id)
networks.append({"port-id": port_id})
# Create port in the user defined network, associate floating IP if
# needed
if len(networks) > 1 or not CONF.management_networks:
network = networks.pop(0).get("net-id")
port_sgs = [security_group] if security_group else []
port_id = self._create_port(
network,
port_sgs,
is_mgmt=False,
is_public=access.get('is_public', False)
)
LOG.info("User port %s created for instance %s", port_id,
self.id)
networks.insert(0, {"port-id": port_id})
LOG.info(
"Finished to prepare networks for the instance %s, networks: %s",
self.id, networks
)
return networks
def create_instance(self, flavor, image_id, databases, users,
datastore_manager, packages, volume_size,
backup_id, availability_zone, root_password, nics,
overrides, cluster_config, snapshot, volume_type,
modules, scheduler_hints):
# It is the caller's responsibility to ensure that
# FreshInstanceTasks.wait_for_instance is called after
# create_instance to ensure that the proper usage event gets sent
modules, scheduler_hints, access=None):
"""Create trove instance.
LOG.info("Creating instance %s.", self.id)
security_groups = None
if CONF.trove_security_groups_support:
try:
security_groups = self._create_secgroup(datastore_manager)
except Exception as e:
log_fmt = "Error creating security group for instance: %s"
exc_fmt = _("Error creating security group for instance: %s")
err = inst_models.InstanceTasks.BUILDING_ERROR_SEC_GROUP
self._log_and_raise(e, log_fmt, exc_fmt, self.id, err)
else:
LOG.info("Successfully created security group %s for "
"instance: %s", security_groups, self.id)
if CONF.management_networks:
# The management network is always the last one
nics.pop(-1)
port_id = self._create_management_port(
CONF.management_networks[-1],
security_groups
)
LOG.info("Management port %s created for instance: %s",
port_id, self.id)
nics.append({"port-id": port_id})
It is the caller's responsibility to ensure that
FreshInstanceTasks.wait_for_instance is called after
create_instance to ensure that the proper usage event gets sent
"""
LOG.info(
"Creating instance %s, nics: %s, access: %s",
self.id, nics, access
)
networks = self._prepare_networks_for_instance(
datastore_manager, nics, access=access
)
files = self.get_injected_files(datastore_manager)
cinder_volume_type = volume_type or CONF.cinder_volume_type
volume_info = self._create_server_volume(
flavor['id'],
image_id,
security_groups,
datastore_manager,
volume_size,
availability_zone,
nics,
files,
cinder_volume_type,
flavor['id'], image_id,
datastore_manager, volume_size,
availability_zone, networks,
files, cinder_volume_type,
scheduler_hints
)
@ -785,11 +813,9 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
'to_': str(to_)})
return final
def _create_server_volume(self, flavor_id, image_id,
security_groups, datastore_manager,
volume_size, availability_zone,
nics, files, volume_type,
scheduler_hints):
def _create_server_volume(self, flavor_id, image_id, datastore_manager,
volume_size, availability_zone, nics, files,
volume_type, scheduler_hints):
LOG.debug("Begin _create_server_volume for id: %s", self.id)
server = None
volume_info = self._build_volume_info(datastore_manager,
@ -797,11 +823,13 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
volume_type=volume_type)
block_device_mapping_v2 = volume_info['block_device']
try:
server = self._create_server(flavor_id, image_id, security_groups,
datastore_manager,
block_device_mapping_v2,
availability_zone, nics, files,
scheduler_hints)
server = self._create_server(
flavor_id, image_id,
datastore_manager,
block_device_mapping_v2,
availability_zone, nics, files,
scheduler_hints
)
server_id = server.id
# Save server ID.
self.update_db(compute_instance_id=server_id)
@ -863,7 +891,7 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
volume_client = create_cinder_client(self.context, self.region_name)
volume_desc = ("datastore volume for %s" % self.id)
volume_ref = volume_client.volumes.create(
volume_size, name="datastore-%s" % self.id,
volume_size, name="trove-%s" % self.id,
description=volume_desc,
volume_type=volume_type)
@ -932,10 +960,9 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
userdata = f.read()
return userdata
def _create_server(self, flavor_id, image_id, security_groups,
datastore_manager, block_device_mapping_v2,
availability_zone, nics, files={},
scheduler_hints=None):
def _create_server(self, flavor_id, image_id, datastore_manager,
block_device_mapping_v2, availability_zone,
nics, files={}, scheduler_hints=None):
userdata = self._prepare_userdata(datastore_manager)
name = self.hostname or self.name
bdmap_v2 = block_device_mapping_v2
@ -943,11 +970,12 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
key_name = CONF.nova_keypair
server = self.nova_client.servers.create(
name, image_id, flavor_id, files=files, userdata=userdata,
security_groups=security_groups, block_device_mapping_v2=bdmap_v2,
availability_zone=availability_zone, nics=nics,
name, image_id, flavor_id, key_name=key_name, nics=nics,
block_device_mapping_v2=bdmap_v2,
files=files, userdata=userdata,
availability_zone=availability_zone,
config_drive=config_drive, scheduler_hints=scheduler_hints,
key_name=key_name)
)
LOG.debug("Created new compute instance %(server_id)s "
"for database instance %(id)s",
{'server_id': server.id, 'id': self.id})
@ -1015,47 +1043,35 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
LOG.debug("%(gt)s: DNS not enabled for instance: %(id)s",
{'gt': greenthread.getcurrent(), 'id': self.id})
def _create_secgroup(self, datastore_manager):
security_group = SecurityGroup.create_for_instance(
self.id, self.context, self.region_name)
tcp_ports = CONF.get(datastore_manager).tcp_ports
udp_ports = CONF.get(datastore_manager).udp_ports
icmp = CONF.get(datastore_manager).icmp
self._create_rules(security_group, tcp_ports, 'tcp')
self._create_rules(security_group, udp_ports, 'udp')
if icmp:
self._create_rules(security_group, None, 'icmp')
return [security_group["name"]]
def _create_secgroup(self, datastore_manager, allowed_cidrs):
name = "%s-%s" % (CONF.trove_security_group_name_prefix, self.id)
def _create_rules(self, s_group, ports, protocol):
err = inst_models.InstanceTasks.BUILDING_ERROR_SEC_GROUP
err_msg = _("Failed to create security group rules for instance "
"%(instance_id)s: Invalid port format - "
"FromPort = %(from)s, ToPort = %(to)s")
try:
sg_id = neutron.create_security_group(
self.neutron_client, name, self.id
)
def set_error_and_raise(port_or_range):
from_port, to_port = port_or_range
self.update_db(task_status=err)
msg = err_msg % {'instance_id': self.id, 'from': from_port,
'to': to_port}
raise MalformedSecurityGroupRuleError(message=msg)
if not allowed_cidrs:
allowed_cidrs = [CONF.trove_security_group_rule_cidr]
tcp_ports = CONF.get(datastore_manager).tcp_ports
udp_ports = CONF.get(datastore_manager).udp_ports
cidr = CONF.trove_security_group_rule_cidr
neutron.create_security_group_rule(
self.neutron_client, sg_id, 'tcp', tcp_ports, allowed_cidrs
)
neutron.create_security_group_rule(
self.neutron_client, sg_id, 'udp', udp_ports, allowed_cidrs
)
except Exception:
message = ("Failed to create security group for instance %s"
% self.id)
LOG.exception(message)
self.update_db(
task_status=inst_models.InstanceTasks.BUILDING_ERROR_SEC_GROUP
)
raise TroveError(message=message)
if protocol == 'icmp':
SecurityGroupRule.create_sec_group_rule(
s_group, 'icmp', None, None,
cidr, self.context, self.region_name)
else:
for port_or_range in set(ports):
try:
from_, to_ = (None, None)
from_, to_ = port_or_range[0], port_or_range[-1]
SecurityGroupRule.create_sec_group_rule(
s_group, protocol, int(from_), int(to_),
cidr, self.context, self.region_name)
except (ValueError, TroveError):
set_error_and_raise([from_, to_])
return sg_id
class BuiltInstanceTasks(BuiltInstance, NotifyMixin, ConfigurationMixin):
@ -1064,7 +1080,9 @@ class BuiltInstanceTasks(BuiltInstance, NotifyMixin, ConfigurationMixin):
"""
def _delete_resources(self, deleted_at):
LOG.debug("Begin _delete_resources for instance %s", self.id)
LOG.info("Starting to delete resources for instance %s", self.id)
# Stop db
server_id = self.db_info.compute_instance_id
old_server = self.nova_client.servers.get(server_id)
try:
@ -1078,79 +1096,98 @@ class BuiltInstanceTasks(BuiltInstance, NotifyMixin, ConfigurationMixin):
"any resources.", self.id)
self.guest.stop_db()
except Exception as e:
LOG.exception("Failed to stop the datastore before attempting "
"to delete instance id %s, error: %s", self.id,
six.text_type(e))
LOG.warning("Failed to stop the datastore before attempting "
"to delete instance id %s, error: %s", self.id,
six.text_type(e))
# Nova VM
try:
LOG.info("Deleting server for instance %s", self.id)
self.server.delete()
except Exception as e:
LOG.exception("Failed to delete compute server %s", self.server.id,
six.text_type(e))
LOG.warning("Failed to delete compute server %s", self.server.id,
six.text_type(e))
# Neutron ports
try:
neutron_client = create_neutron_client(self.context)
ret = neutron_client.list_ports(name='trove-%s' % self.id)
if ret.get("ports", []):
neutron_client.delete_port(ret["ports"][0]["id"])
ret = self.neutron_client.list_ports(name='trove-%s' % self.id)
ports = ret.get("ports", [])
for port in ports:
LOG.info("Deleting port %s for instance %s", port["id"],
self.id)
neutron.delete_port(self.neutron_client, port["id"])
except Exception as e:
LOG.error("Failed to delete management port of instance %s, "
"error: %s", self.id, six.text_type(e))
LOG.warning("Failed to delete ports for instance %s, "
"error: %s", self.id, six.text_type(e))
# Neutron security groups
try:
name = "%s-%s" % (CONF.trove_security_group_name_prefix, self.id)
ret = self.neutron_client.list_security_groups(name=name)
sgs = ret.get("security_groups", [])
for sg in sgs:
LOG.info("Deleting security group %s for instance %s",
sg["id"], self.id)
self.neutron_client.delete_security_group(sg["id"])
except Exception as e:
LOG.warning("Failed to delete security groups for instance %s, "
"error: %s", self.id, six.text_type(e))
# DNS resources, e.g. Designate
try:
dns_support = CONF.trove_dns_support
LOG.debug("trove dns support = %s", dns_support)
if dns_support:
dns_api = create_dns_client(self.context)
dns_api.delete_instance_entry(instance_id=self.id)
except Exception as e:
LOG.error("Failed to delete dns entry of instance %s, error: %s",
self.id, six.text_type(e))
LOG.warning("Failed to delete dns entry of instance %s, error: %s",
self.id, six.text_type(e))
# Nova server group
try:
srv_grp.ServerGroup.delete(self.context, self.server_group)
except Exception as e:
LOG.error("Failed to delete server group for %s, error: %s",
self.id, six.text_type(e))
LOG.warning("Failed to delete server group for %s, error: %s",
self.id, six.text_type(e))
def server_is_finished():
try:
server = self.nova_client.servers.get(server_id)
if not self.server_status_matches(['SHUTDOWN', 'ACTIVE'],
server=server):
LOG.error("Server %(server_id)s entered ERROR status "
"when deleting instance %(instance_id)s!",
{'server_id': server.id, 'instance_id': self.id})
LOG.warning("Server %(server_id)s entered ERROR status "
"when deleting instance %(instance_id)s!",
{'server_id': server.id,
'instance_id': self.id})
return False
except nova_exceptions.NotFound:
return True
try:
LOG.info("Waiting for server %s removal for instance %s",
server_id, self.id)
utils.poll_until(server_is_finished, sleep_time=2,
time_out=CONF.server_delete_time_out)
except PollTimeOut:
LOG.error("Failed to delete instance %(instance_id)s: "
"Timeout deleting compute server %(server_id)s",
{'instance_id': self.id, 'server_id': server_id})
LOG.warning("Failed to delete instance %(instance_id)s: "
"Timeout deleting compute server %(server_id)s",
{'instance_id': self.id, 'server_id': server_id})
# If volume has been resized it must be manually removed
try:
if self.volume_id:
volume_client = create_cinder_client(self.context,
self.region_name)
volume = volume_client.volumes.get(self.volume_id)
volume = self.volume_client.volumes.get(self.volume_id)
if volume.status == "available":
LOG.info("Deleting volume %(v)s for instance: %(i)s.",
{'v': self.volume_id, 'i': self.id})
volume.delete()
except Exception as e:
LOG.error("Failed to delete volume of instance %s, error: %s",
self.id, six.text_type(e))
LOG.warning("Failed to delete volume for instance %s, error: %s",
self.id, six.text_type(e))
TroveInstanceDelete(instance=self,
deleted_at=timeutils.isotime(deleted_at),
server=old_server).notify()
LOG.debug("End _delete_resources for instance %s", self.id)
LOG.info("Finished to delete resources for instance %s", self.id)
def server_status_matches(self, expected_status, server=None):
if not server:
@ -1205,7 +1242,7 @@ class BuiltInstanceTasks(BuiltInstance, NotifyMixin, ConfigurationMixin):
self.id)
raise
return run_with_quotas(self.context.tenant, {'backups': 1},
return run_with_quotas(self.context.project_id, {'backups': 1},
_get_replication_snapshot)
def detach_replica(self, master, for_failover=False):

View File

@ -20,7 +20,6 @@ import unittest
import uuid
from proboscis.asserts import assert_equal
from proboscis.asserts import assert_false
from proboscis.asserts import assert_is_not_none
from proboscis.asserts import assert_not_equal
from proboscis.asserts import assert_raises
@ -39,7 +38,6 @@ from trove import tests
from trove.tests.config import CONFIG
from trove.tests.util.check import AttrCheck
from trove.tests.util import create_dbaas_client
from trove.tests.util import create_nova_client
from trove.tests.util import dns_checker
from trove.tests.util import iso_time
from trove.tests.util import test_config
@ -377,25 +375,60 @@ class CreateInstanceFail(object):
self.delete_async(result.id)
@test
def test_create_with_bad_nics(self):
instance_name = "instance-failure-with-bad-nics"
def test_create_with_invalid_net_id(self):
instance_name = "instance-failure-with-invalid-net"
if VOLUME_SUPPORT:
volume = {'size': CONFIG.get('trove_volume_size', 1)}
else:
volume = None
databases = []
bad_nic = [{"port-id": "UNKNOWN", "net-id": "1234",
"v4-fixed-ip": "1.2.3.4"}]
result = dbaas.instances.create(instance_name,
instance_info.dbaas_flavor_href,
volume, databases, nics=bad_nic)
bad_nic = [{"net-id": "1234"}]
poll_until(self.instance_in_error(result.id))
instance = dbaas.instances.get(result.id)
assert_equal("ERROR", instance.status)
assert_raises(
exceptions.BadRequest,
dbaas.instances.create,
instance_name, instance_info.dbaas_flavor_href,
volume, databases, nics=bad_nic
)
assert_equal(400, dbaas.last_http_code)
self.delete_async(result.id)
@test
def test_create_with_multiple_net_id(self):
instance_name = "instance_failure_with_multiple_net_id"
volume = {'size': CONFIG.get('trove_volume_size', 1)}
databases = []
multi_nics = [
{"net-id": str(uuid.uuid4())},
{"net-id": str(uuid.uuid4())}
]
assert_raises(
exceptions.BadRequest,
dbaas.instances.create,
instance_name, instance_info.dbaas_flavor_href,
volume, databases, nics=multi_nics
)
assert_equal(400, dbaas.last_http_code)
@test
def test_create_with_port_id(self):
instance_name = "instance-failure-with-port-id"
if VOLUME_SUPPORT:
volume = {'size': CONFIG.get('trove_volume_size', 1)}
else:
volume = None
databases = []
bad_nic = [{"port-id": "1234"}]
assert_raises(
exceptions.BadRequest,
dbaas.instances.create,
instance_name, instance_info.dbaas_flavor_href,
volume, databases, nics=bad_nic
)
assert_equal(400, dbaas.last_http_code)
@test
def test_create_failure_with_empty_flavor(self):
instance_name = "instance-failure-with-empty-flavor"
databases = []
@ -442,18 +475,6 @@ class CreateInstanceFail(object):
nics=instance_info.nics)
assert_equal(501, dbaas.last_http_code)
def test_create_failure_with_volume_size_and_disabled_for_datastore(self):
instance_name = "instance-failure-volume-size_and_volume_disabled"
databases = []
datastore = 'redis'
assert_equal(CONFIG.get(datastore, 'redis')['volume_support'], False)
volume = {'size': 2}
assert_raises(exceptions.HTTPNotImplemented, dbaas.instances.create,
instance_name, instance_info.dbaas_flavor_href,
volume, databases, datastore=datastore,
nics=instance_info.nics)
assert_equal(501, dbaas.last_http_code)
@test(enabled=EPHEMERAL_SUPPORT)
def test_create_failure_with_no_ephemeral_flavor(self):
instance_name = "instance-failure-with-no-ephemeral-flavor"
@ -788,91 +809,6 @@ class CreateInstanceFlavors(object):
self._create_with_flavor('custom')
@test(depends_on_classes=[InstanceSetup], groups=[GROUP_NEUTRON])
class CreateInstanceWithNeutron(unittest.TestCase):
@time_out(TIMEOUT_INSTANCE_CREATE)
def setUp(self):
if not CONFIG.values.get('neutron_enabled'):
raise SkipTest("neutron is not enabled, skipping")
user = test_config.users.find_user(
Requirements(is_admin=False, services=["nova", "trove"]))
self.nova_client = create_nova_client(user)
self.dbaas_client = create_dbaas_client(user)
self.result = None
self.instance_name = ("TEST_INSTANCE_CREATION_WITH_NICS"
+ str(uuid.uuid4()))
databases = []
self.default_cidr = CONFIG.values.get('shared_network_subnet', None)
if VOLUME_SUPPORT:
volume = {'size': CONFIG.get('trove_volume_size', 1)}
else:
volume = None
self.result = self.dbaas_client.instances.create(
self.instance_name,
instance_info.dbaas_flavor_href,
volume, databases,
nics=instance_info.nics)
self.instance_id = self.result.id
def verify_instance_is_active():
result = self.dbaas_client.instances.get(self.instance_id)
if result.status == "ACTIVE":
return True
else:
assert_equal("BUILD", result.status)
return False
poll_until(verify_instance_is_active)
def tearDown(self):
if self.result.id is not None:
self.dbaas_client.instances.delete(self.result.id)
while True:
try:
self.dbaas_client.instances.get(self.result.id)
except exceptions.NotFound:
return True
time.sleep(1)
def check_ip_within_network(self, ip, network):
octet_list = str(ip).split(".")
octets, mask = str(network).split("/")
octet_list_ = octets.split(".")
for i in range(int(mask) / 8):
if octet_list[i] != octet_list_[i]:
return False
return True
def test_ip_within_cidr(self):
nova_instance = None
for server in self.nova_client.servers.list():
if str(server.name) == self.instance_name:
nova_instance = server
break
if nova_instance is None:
fail("instance created with neutron enabled is not found in nova")
for address in nova_instance.addresses['private']:
ip = address['addr']
assert_true(self.check_ip_within_network(ip, self.default_cidr))
# black list filtered ip not visible via troveclient
trove_instance = self.dbaas_client.instances.get(self.result.id)
for ip in trove_instance.ip:
if str(ip).startswith('10.'):
assert_true(self.check_ip_within_network(ip, "10.0.0.0/24"))
assert_false(self.check_ip_within_network(ip, "10.0.1.0/24"))
@test(depends_on_classes=[CreateInstance],
groups=[GROUP,
GROUP_START,

View File

@ -1,207 +0,0 @@
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nose.plugins.skip import SkipTest
from proboscis import after_class
from proboscis import asserts
from proboscis import before_class
from proboscis.decorators import time_out
from proboscis import test
import six
from troveclient.compat import exceptions
from trove.common.utils import poll_until
from trove import tests
from trove.tests.api.instances import instance_info
from trove.tests.config import CONFIG
from trove.tests.util import create_dbaas_client
from trove.tests.util import test_config
from trove.tests.util.users import Requirements
GROUP = "dbaas.api.mgmt.accounts"
@test(groups=[tests.DBAAS_API, GROUP, tests.PRE_INSTANCES],
depends_on_groups=["services.initialize"])
class AccountsBeforeInstanceCreation(object):
@before_class
def setUp(self):
self.user = test_config.users.find_user(Requirements(is_admin=True))
self.client = create_dbaas_client(self.user)
@test
def test_invalid_account_fails(self):
account_info = self.client.accounts.show("badaccount")
asserts.assert_not_equal(self.user.tenant_id, account_info.id)
@test(groups=[tests.INSTANCES, GROUP], depends_on_groups=["dbaas.listing"])
class AccountsAfterInstanceCreation(object):
@before_class
def setUp(self):
self.user = test_config.users.find_user(Requirements(is_admin=True))
self.client = create_dbaas_client(self.user)
@test
def test_account_details_available(self):
if CONFIG.fake_mode:
raise SkipTest("Skipping this as auth is faked anyway.")
account_info = self.client.accounts.show(instance_info.user.tenant_id)
# Now check the results.
expected = instance_info.user.tenant_id
if expected is None:
expected = "None"
print("account_id.id = '%s'" % account_info.id)
print("expected = '%s'" % expected)
asserts.assert_equal(expected, account_info.id)
# Instances: there should at least be one instance
asserts.assert_true(len(account_info.instance_ids) > 0)
# The instance id should be one of the instances for the account
asserts.assert_true(instance_info.id in account_info.instance_ids)
@test
def test_list_accounts(self):
if CONFIG.fake_mode:
raise SkipTest("Skipping this as auth is faked anyway.")
accounts_info = self.client.accounts.index()
asserts.assert_equal(1, len(accounts_info.accounts))
account = accounts_info.accounts[0]
asserts.assert_true(account['num_instances'] > 0)
asserts.assert_equal(instance_info.user.tenant_id, account['id'])
@test(groups=[tests.POST_INSTANCES, GROUP],
depends_on_groups=["dbaas.guest.shutdown"])
class AccountsAfterInstanceDeletion(object):
@before_class
def setUp(self):
self.user = test_config.users.find_user(Requirements(is_admin=True))
self.client = create_dbaas_client(self.user)
@test
def test_instance_id_removed_from_account(self):
account_info = self.client.accounts.show(instance_info.user.tenant_id)
asserts.assert_true(instance_info.id not in account_info.instance_ids)
@test(groups=["fake.dbaas.api.mgmt.allaccounts"],
depends_on_groups=["services.initialize"])
class AllAccounts(object):
max = 5
def _delete_instances_for_users(self):
for user in self.users:
user_client = create_dbaas_client(user)
while True:
deleted_count = 0
user_instances = user_client.instances.list()
for instance in user_instances:
try:
instance.delete()
except exceptions.NotFound:
deleted_count += 1
except Exception:
print("Failed to delete instance")
if deleted_count == len(user_instances):
break
def _create_instances_for_users(self):
for user in self.users:
user_client = create_dbaas_client(user)
for index in range(self.max):
name = "instance-%s-%03d" % (user.auth_user, index)
user_client.instances.create(name, 1, {'size': 1}, [], [])
@before_class
def setUp(self):
admin_req = Requirements(is_admin=True)
self.admin_user = test_config.users.find_user(admin_req)
self.admin_client = create_dbaas_client(self.admin_user)
user_req = Requirements(is_admin=False)
self.users = test_config.users.find_all_users_who_satisfy(user_req)
self.user_tenant_ids = [user.tenant_id for user in self.users]
self._create_instances_for_users()
@test
def test_list_accounts_with_multiple_users(self):
accounts_info = self.admin_client.accounts.index()
for account in accounts_info.accounts:
asserts.assert_true(account['id'] in self.user_tenant_ids)
asserts.assert_equal(self.max, account['num_instances'])
@after_class(always_run=True)
@time_out(60)
def tear_down(self):
self._delete_instances_for_users()
@test(groups=["fake.%s.broken" % GROUP],
depends_on_groups=["services.initialize"],
runs_after_groups=["dbaas.guest.shutdown"])
class AccountWithBrokenInstance(object):
@before_class
def setUpACCR(self):
from trove.taskmanager.models import CONF
self.old_dns_support = CONF.trove_dns_support
CONF.trove_dns_support = False
self.user = test_config.users.find_user(Requirements(is_admin=True))
self.client = create_dbaas_client(self.user)
self.name = 'test_SERVER_ERROR'
# Create an instance with a broken compute instance.
volume = None
if CONFIG.trove_volume_support:
volume = {'size': 1}
self.response = self.client.instances.create(
self.name,
instance_info.dbaas_flavor_href,
volume,
[])
poll_until(lambda: self.client.instances.get(self.response.id),
lambda instance: instance.status == 'ERROR',
time_out=10)
self.instance = self.client.instances.get(self.response.id)
print("Status: %s" % self.instance.status)
msg = "Instance did not drop to error after server prov failure."
asserts.assert_equal(self.instance.status, "ERROR", msg)
@test
def no_compute_instance_no_problem(self):
"""Get account by ID shows even instances lacking computes."""
if test_config.auth_strategy == "fake":
raise SkipTest("Skipping this as auth is faked anyway.")
account_info = self.client.accounts.show(self.user.tenant_id)
# All we care about is that accounts.show doesn't 500 on us
# for having a broken instance in the roster.
asserts.assert_equal(len(account_info.instances), 1)
instance = account_info.instances[0]
asserts.assert_true(isinstance(instance['id'], six.string_types))
asserts.assert_equal(len(instance['id']), 36)
asserts.assert_equal(instance['name'], self.name)
asserts.assert_equal(instance['status'], "ERROR")
assert_is_none(instance['host'])
@after_class
def tear_down(self):
self.client.instances.delete(self.response.id)
@after_class
def restore_dns(self):
from trove.taskmanager.models import CONF
CONF.trove_dns_support = self.old_dns_support

View File

@ -39,21 +39,6 @@ class TestAdminRequired(object):
self.user = test_config.users.find_user(Requirements(is_admin=False))
self.dbaas = create_dbaas_client(self.user)
@test
def test_accounts_show(self):
"""A regular user may not view the details of any account."""
assert_raises(Unauthorized, self.dbaas.accounts.show, 0)
@test
def test_hosts_index(self):
"""A regular user may not view the list of hosts."""
assert_raises(Unauthorized, self.dbaas.hosts.index)
@test
def test_hosts_get(self):
"""A regular user may not view the details of any host."""
assert_raises(Unauthorized, self.dbaas.hosts.get, 0)
@test
def test_mgmt_show(self):
"""
@ -81,11 +66,6 @@ class TestAdminRequired(object):
"""A regular user may not perform an instance task status reset."""
assert_raises(Unauthorized, self.dbaas.management.reset_task_status, 0)
@test
def test_storage_index(self):
"""A regular user may not view the list of storage available."""
assert_raises(Unauthorized, self.dbaas.storage.index)
@test
def test_diagnostics_get(self):
"""A regular user may not view the diagnostics."""

View File

@ -1,214 +0,0 @@
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from proboscis.asserts import assert_not_equal
from proboscis.asserts import assert_raises
from proboscis.asserts import assert_true
from proboscis import before_class
from proboscis.check import Check
from proboscis import test
from troveclient.compat import exceptions
from trove.tests.api.instances import create_new_instance
from trove.tests.api.instances import CreateInstance
from trove.tests.config import CONFIG
from trove.tests import DBAAS_API
from trove.tests import INSTANCES
from trove.tests import PRE_INSTANCES
from trove.tests.util import create_dbaas_client
from trove.tests.util.users import Requirements
GROUP = "dbaas.api.mgmt.hosts"
def percent_boundary(used_ram, total_ram):
"""Return a upper and lower bound for percent ram used."""
calc = int((1.0 * used_ram / total_ram) * 100)
# return calculated percent +/- 2 to account for rounding errors
lower_boundary = calc - 2
upper_boundary = calc + 2
return lower_boundary, upper_boundary
@test(groups=[DBAAS_API, GROUP, PRE_INSTANCES],
depends_on_groups=["services.initialize"],
enabled=create_new_instance())
class HostsBeforeInstanceCreation(object):
@before_class
def setUp(self):
self.user = CONFIG.users.find_user(Requirements(is_admin=True))
self.client = create_dbaas_client(self.user)
self.host = None
@test
def test_empty_index_host_list(self):
host_index_result = self.client.hosts.index()
assert_not_equal(host_index_result, None,
"list hosts call should not be empty: %s" %
str(host_index_result))
assert_true(len(host_index_result) > 0,
"list hosts length should be greater than zero: %r" %
host_index_result)
self.host = host_index_result[0]
assert_true(self.host is not None, "Expected to find a host.")
@test(depends_on=[test_empty_index_host_list])
def test_empty_index_host_list_single(self):
self.host.name = self.host.name.replace(".", r"\.")
result = self.client.hosts.get(self.host)
assert_not_equal(result, None,
"Get host should not be empty for: %s" % self.host)
with Check() as check:
used_ram = int(result.usedRAM)
total_ram = int(result.totalRAM)
percent_used = int(result.percentUsed)
lower, upper = percent_boundary(used_ram, total_ram)
check.true(percent_used > lower,
"percentUsed %r is below the lower boundary %r"
% (percent_used, lower))
check.true(percent_used < upper,
"percentUsed %r is above the upper boundary %r"
% (percent_used, upper))
check.true(used_ram < total_ram,
"usedRAM %r should be less than totalRAM %r"
% (used_ram, total_ram))
check.true(percent_used < 100,
"percentUsed should be less than 100 but was %r"
% percent_used)
check.true(total_ram > 0,
"totalRAM should be greater than 0 but was %r"
% total_ram)
check.true(used_ram < total_ram,
"usedRAM %r should be less than totalRAM %r"
% (used_ram, total_ram))
@test(groups=[INSTANCES, GROUP],
depends_on=[CreateInstance],
enabled=create_new_instance())
class HostsMgmtCommands(object):
@before_class
def setUp(self):
self.user = CONFIG.users.find_user(Requirements(is_admin=True))
self.client = create_dbaas_client(self.user)
self.host = None
@test
def test_index_host_list(self):
result = self.client.hosts.index()
assert_not_equal(len(result), 0,
"list hosts should not be empty: %s" % str(result))
hosts = []
# Find a host with an instanceCount > 0
for host in result:
msg = 'Host: %s, Count: %s' % (host.name, host.instanceCount)
hosts.append(msg)
if int(host.instanceCount) > 0:
self.host = host
break
msg = "Unable to find a host with instances: %r" % hosts
assert_not_equal(self.host, None, msg)
@test(depends_on=[test_index_host_list])
def test_index_host_list_single(self):
self.host.name = self.host.name.replace(".", r"\.")
result = self.client.hosts.get(self.host)
assert_not_equal(result, None,
"list hosts should not be empty: %s" % str(result))
assert_true(len(result.instances) > 0,
"instance list on the host should not be empty: %r"
% result.instances)
with Check() as check:
used_ram = int(result.usedRAM)
total_ram = int(result.totalRAM)
percent_used = int(result.percentUsed)
lower, upper = percent_boundary(used_ram, total_ram)
check.true(percent_used > lower,
"percentUsed %r is below the lower boundary %r"
% (percent_used, lower))
check.true(percent_used < upper,
"percentUsed %r is above the upper boundary %r"
% (percent_used, upper))
check.true(used_ram < total_ram,
"usedRAM %r should be less than totalRAM %r"
% (used_ram, total_ram))
check.true(percent_used < 100,
"percentUsed should be less than 100 but was %r"
% percent_used)
check.true(total_ram > 0,
"totalRAM should be greater than 0 but was %r"
% total_ram)
check.true(used_ram < total_ram,
"usedRAM %r should be less than totalRAM %r"
% (used_ram, total_ram))
# Check all active instances and validate all the fields exist
active_instance = None
for instance in result.instances:
print("instance: %s" % instance)
if instance['status'] != 'ACTIVE':
continue
active_instance = instance
check.is_not_none(instance['id'])
check.is_not_none(instance['name'])
check.is_not_none(instance['status'])
check.is_not_none(instance['server_id'])
check.is_not_none(instance['tenant_id'])
check.true(active_instance is not None, "No active instances")
def _get_ids(self):
"""Get all the ids of instances that are ACTIVE."""
ids = []
results = self.client.hosts.index()
for host in results:
result = self.client.hosts.get(host)
for instance in result.instances:
if instance['status'] == 'ACTIVE':
ids.append(instance['id'])
return ids
@test
def test_update_hosts(self):
ids = self._get_ids()
assert_not_equal(ids, [], "No active instances found")
before_versions = {}
for _id in ids:
diagnostics = self.client.diagnostics.get(_id)
before_versions[_id] = diagnostics.version
hosts = self.client.hosts.index()
for host in hosts:
self.client.hosts.update_all(host.name)
after_versions = {}
for _id in ids:
diagnostics = self.client.diagnostics.get(_id)
after_versions[_id] = diagnostics.version
assert_not_equal(before_versions, {},
"No versions found before update")
assert_not_equal(after_versions, {},
"No versions found after update")
if CONFIG.fake_mode:
for _id in after_versions:
assert_not_equal(before_versions[_id], after_versions[_id])
@test
def test_host_not_found(self):
hostname = "host@$%3dne"
assert_raises(exceptions.NotFound, self.client.hosts.get, hostname)

View File

@ -159,11 +159,19 @@ class WhenMgmtInstanceGetIsCalledButServerIsNotReady(object):
vol_support = CONFIG.get(datastore['type'], 'mysql')['volume_support']
if vol_support:
body.update({'size': 13})
shared_network = CONFIG.get('shared_network', None)
if shared_network:
nics = [{'net-id': shared_network}]
response = self.client.instances.create(
'test_SERVER_ERROR',
instance_info.dbaas_flavor_href,
body,
[])
[], [],
nics=nics
)
poll_until(lambda: self.client.instances.get(response.id),
lambda instance: instance.status == 'ERROR',
time_out=10)

View File

@ -43,6 +43,10 @@ class MalformedJson(object):
volume = None
if VOLUME_SUPPORT:
volume = {"size": 1}
shared_network = CONFIG.get('shared_network', None)
if shared_network:
nics = [{'net-id': shared_network}]
self.instance = self.dbaas.instances.create(
name="qe_instance",
flavor_id=instance_info.dbaas_flavor_href,
@ -50,7 +54,9 @@ class MalformedJson(object):
datastore_version=instance_info.dbaas_datastore_version,
volume=volume,
databases=[{"name": "firstdb", "character_set": "latin2",
"collate": "latin2_general_ci"}])
"collate": "latin2_general_ci"}],
nics=nics
)
@after_class
def tearDown(self):

View File

@ -1,115 +0,0 @@
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nose.plugins.skip import SkipTest
from proboscis import asserts
from proboscis import before_class
from proboscis import test
from trove import tests
from trove.tests.api.instances import CheckInstance
from trove.tests.api.instances import instance_info
from trove.tests.util import create_dbaas_client
from trove.tests.util import test_config
from trove.tests.util.users import Requirements
FAKE_MODE = test_config.values['fake_mode']
GROUP = "dbaas.api.mgmt.storage"
@test(groups=[tests.DBAAS_API, GROUP, tests.PRE_INSTANCES],
depends_on_groups=["services.initialize"])
class StorageBeforeInstanceCreation(object):
@before_class
def setUp(self):
self.user = test_config.users.find_user(Requirements(is_admin=True))
self.client = create_dbaas_client(self.user)
@test
def test_storage_on_host(self):
if not FAKE_MODE:
raise SkipTest("Volume driver currently not working.")
storage = self.client.storage.index()
print("storage : %r" % storage)
for device in storage:
asserts.assert_true(hasattr(device, 'name'),
"device.name: %r" % device.name)
asserts.assert_true(hasattr(device, 'type'),
"device.type: %r" % device.name)
asserts.assert_true(hasattr(device, 'used'),
"device.used: %r" % device.used)
asserts.assert_true(hasattr(device, 'provision'),
"device.provision: %r" % device.provision)
provision = device.provision
asserts.assert_true('available' in provision,
"provision.available: "
+ "%r" % provision['available'])
asserts.assert_true('percent' in provision,
"provision.percent: %r" % provision['percent'])
asserts.assert_true('total' in provision,
"provision.total: %r" % provision['total'])
asserts.assert_true(hasattr(device, 'capacity'),
"device.capacity: %r" % device.capacity)
capacity = device.capacity
asserts.assert_true('available' in capacity,
"capacity.available: "
+ "%r" % capacity['available'])
asserts.assert_true('total' in capacity,
"capacity.total: %r" % capacity['total'])
instance_info.storage = storage
@test(groups=[tests.INSTANCES, GROUP],
depends_on_groups=["dbaas.listing"])
class StorageAfterInstanceCreation(object):
@before_class
def setUp(self):
self.user = test_config.users.find_user(Requirements(is_admin=True))
self.client = create_dbaas_client(self.user)
@test
def test_storage_on_host(self):
if not FAKE_MODE:
raise SkipTest("Volume driver currently not working.")
storage = self.client.storage.index()
print("storage : %r" % storage)
print("instance_info.storage : %r" % instance_info.storage)
allowed_attrs = ['name', 'type', 'used', 'provision', 'capacity']
for index, device in enumerate(storage):
CheckInstance(None).contains_allowed_attrs(
device._info,
allowed_attrs, msg="Storage")
asserts.assert_equal(device.name,
instance_info.storage[index].name)
asserts.assert_equal(device.used,
instance_info.storage[index].used)
asserts.assert_equal(device.type,
instance_info.storage[index].type)
provision = instance_info.storage[index].provision
asserts.assert_equal(device.provision['available'],
provision['available'])
asserts.assert_equal(device.provision['percent'],
provision['percent'])
asserts.assert_equal(device.provision['total'], provision['total'])
capacity = instance_info.storage[index].capacity
asserts.assert_equal(device.capacity['available'],
capacity['available'])
asserts.assert_equal(device.capacity['total'], capacity['total'])

View File

@ -12,31 +12,28 @@
# License for the specific language governing permissions and limitations
# under the License.
from nose.plugins.skip import SkipTest
import proboscis
from proboscis.asserts import assert_equal
from proboscis.asserts import assert_false
from proboscis.asserts import assert_not_equal
from proboscis.asserts import assert_raises
from proboscis.asserts import assert_true
from proboscis import before_class
from proboscis import test
from troveclient.compat import exceptions
from trove.common import utils
from trove import tests
from trove.tests.api.databases import TestMysqlAccess
from trove.tests.api.instances import instance_info
from trove.tests.api.users import TestUsers
from trove.tests.api import instances
from trove.tests.config import CONFIG
from trove.tests import util
from trove.tests.util import test_config
from trove.tests.util import users as users_util
GROUP = "dbaas.api.root"
@test(depends_on_classes=[TestMysqlAccess],
runs_after=[TestUsers],
groups=[tests.DBAAS_API, GROUP, tests.INSTANCES])
@test(groups=[tests.DBAAS_API, GROUP, tests.INSTANCES])
class TestRoot(object):
"""
Test the root operations
@ -45,10 +42,79 @@ class TestRoot(object):
root_enabled_timestamp = 'Never'
system_users = ['root', 'debian_sys_maint']
@before_class
@proboscis.before_class
def setUp(self):
self.dbaas = util.create_dbaas_client(instance_info.user)
self.dbaas_admin = util.create_dbaas_client(instance_info.admin_user)
self.info = instances.InstanceTestInfo()
reqs = users_util.Requirements(is_admin=True)
self.info.admin_user = CONFIG.users.find_user(reqs)
self.info.dbaas_admin = self.dbaas_admin = util.create_dbaas_client(
self.info.admin_user
)
reqs = users_util.Requirements(is_admin=False)
self.info.user = CONFIG.users.find_user(reqs)
self.info.dbaas = self.dbaas = util.create_dbaas_client(self.info.user)
self.info.name = "TEST_%s" % self.__class__.__name__
flavor, flavor_href = self.info.find_default_flavor()
self.info.dbaas_flavor = flavor
self.info.dbaas_flavor_href = flavor_href
databases = []
databases.append({"name": "firstdb", "character_set": "latin2",
"collate": "latin2_general_ci"})
databases.append({"name": "db2"})
self.info.databases = databases
users = []
users.append({"name": "lite", "password": "litepass",
"databases": [{"name": "firstdb"}]})
self.info.users = users
self.info.dbaas_datastore = CONFIG.dbaas_datastore
self.info.dbaas_datastore_version = CONFIG.dbaas_datastore_version
self.info.volume = {'size': CONFIG.get('trove_volume_size', 2)}
self.info.initial_result = self.dbaas.instances.create(
self.info.name,
self.info.dbaas_flavor_href,
self.info.volume,
databases,
users,
nics=self.info.nics,
availability_zone="nova",
datastore=self.info.dbaas_datastore,
datastore_version=self.info.dbaas_datastore_version
)
assert_equal(200, self.dbaas.last_http_code)
self.id = self.info.initial_result.id
def result_is_active():
instance = self.dbaas.instances.get(self.id)
if instance.status == "ACTIVE":
return True
else:
# If its not ACTIVE, anything but BUILD must be
# an error.
assert_equal("BUILD", instance.status)
return False
utils.poll_until(result_is_active)
@proboscis.after_class
def tearDown(self):
self.dbaas.instances.delete(self.id)
def _is_delete():
try:
self.dbaas.instances.get(self.id)
except exceptions.NotFound:
return True
utils.poll_until(_is_delete)
def _verify_root_timestamp(self, id):
reh = self.dbaas_admin.management.root_enabled_history(id)
@ -57,16 +123,15 @@ class TestRoot(object):
assert_equal(id, reh.id)
def _root(self):
global root_password
self.dbaas.root.create(instance_info.id)
self.dbaas.root.create(self.id)
assert_equal(200, self.dbaas.last_http_code)
reh = self.dbaas_admin.management.root_enabled_history
self.root_enabled_timestamp = reh(instance_info.id).enabled
self.root_enabled_timestamp = reh(self.id).enabled
@test
def test_root_initially_disabled(self):
"""Test that root is disabled."""
enabled = self.dbaas.root.is_root_enabled(instance_info.id)
enabled = self.dbaas.root.is_root_enabled(self.id)
assert_equal(200, self.dbaas.last_http_code)
is_enabled = enabled
@ -78,18 +143,18 @@ class TestRoot(object):
def test_create_user_os_admin_failure(self):
users = [{"name": "os_admin", "password": "12345"}]
assert_raises(exceptions.BadRequest, self.dbaas.users.create,
instance_info.id, users)
self.id, users)
@test
def test_delete_user_os_admin_failure(self):
assert_raises(exceptions.BadRequest, self.dbaas.users.delete,
instance_info.id, "os_admin")
self.id, "os_admin")
@test(depends_on=[test_root_initially_disabled],
enabled=not test_config.values['root_removed_from_instance_api'])
def test_root_initially_disabled_details(self):
"""Use instance details to test that root is disabled."""
instance = self.dbaas.instances.get(instance_info.id)
instance = self.dbaas.instances.get(self.id)
assert_true(hasattr(instance, 'rootEnabled'),
"Instance has no rootEnabled property.")
assert_false(instance.rootEnabled, "Root SHOULD NOT be enabled.")
@ -98,15 +163,15 @@ class TestRoot(object):
@test(depends_on=[test_root_initially_disabled_details])
def test_root_disabled_in_mgmt_api(self):
"""Verifies in the management api that the timestamp exists."""
self._verify_root_timestamp(instance_info.id)
self._verify_root_timestamp(self.id)
@test(depends_on=[test_root_initially_disabled_details])
def test_root_disable_when_root_not_enabled(self):
reh = self.dbaas_admin.management.root_enabled_history
self.root_enabled_timestamp = reh(instance_info.id).enabled
self.root_enabled_timestamp = reh(self.id).enabled
assert_raises(exceptions.NotFound, self.dbaas.root.delete,
instance_info.id)
self._verify_root_timestamp(instance_info.id)
self.id)
self._verify_root_timestamp(self.id)
@test(depends_on=[test_root_disable_when_root_not_enabled])
def test_enable_root(self):
@ -122,14 +187,14 @@ class TestRoot(object):
Tests that despite having enabled root, user root doesn't appear
in the users list for the instance.
"""
users = self.dbaas.users.list(instance_info.id)
users = self.dbaas.users.list(self.id)
usernames = [user.name for user in users]
assert_true('root' not in usernames)
@test(depends_on=[test_enable_root])
def test_root_now_enabled(self):
"""Test that root is now enabled."""
enabled = self.dbaas.root.is_root_enabled(instance_info.id)
enabled = self.dbaas.root.is_root_enabled(self.id)
assert_equal(200, self.dbaas.last_http_code)
assert_true(enabled, "Root SHOULD be enabled.")
@ -137,12 +202,12 @@ class TestRoot(object):
enabled=not test_config.values['root_removed_from_instance_api'])
def test_root_now_enabled_details(self):
"""Use instance details to test that root is now enabled."""
instance = self.dbaas.instances.get(instance_info.id)
instance = self.dbaas.instances.get(self.id)
assert_true(hasattr(instance, 'rootEnabled'),
"Instance has no rootEnabled property.")
assert_true(instance.rootEnabled, "Root SHOULD be enabled.")
assert_not_equal(self.root_enabled_timestamp, 'Never')
self._verify_root_timestamp(instance_info.id)
self._verify_root_timestamp(self.id)
@test(depends_on=[test_root_now_enabled_details])
def test_reset_root(self):
@ -156,7 +221,7 @@ class TestRoot(object):
@test(depends_on=[test_reset_root])
def test_root_still_enabled(self):
"""Test that after root was reset it's still enabled."""
enabled = self.dbaas.root.is_root_enabled(instance_info.id)
enabled = self.dbaas.root.is_root_enabled(self.id)
assert_equal(200, self.dbaas.last_http_code)
assert_true(enabled, "Root SHOULD still be enabled.")
@ -166,23 +231,23 @@ class TestRoot(object):
"""Use instance details to test that after root was reset,
it's still enabled.
"""
instance = self.dbaas.instances.get(instance_info.id)
instance = self.dbaas.instances.get(self.id)
assert_true(hasattr(instance, 'rootEnabled'),
"Instance has no rootEnabled property.")
assert_true(instance.rootEnabled, "Root SHOULD still be enabled.")
assert_not_equal(self.root_enabled_timestamp, 'Never')
self._verify_root_timestamp(instance_info.id)
self._verify_root_timestamp(self.id)
@test(depends_on=[test_enable_root])
def test_root_cannot_be_deleted(self):
"""Even if root was enabled, the user root cannot be deleted."""
assert_raises(exceptions.BadRequest, self.dbaas.users.delete,
instance_info.id, "root")
self.id, "root")
@test(depends_on=[test_root_still_enabled_details])
def test_root_disable(self):
reh = self.dbaas_admin.management.root_enabled_history
self.root_enabled_timestamp = reh(instance_info.id).enabled
self.dbaas.root.delete(instance_info.id)
self.root_enabled_timestamp = reh(self.id).enabled
self.dbaas.root.delete(self.id)
assert_equal(204, self.dbaas.last_http_code)
self._verify_root_timestamp(instance_info.id)
self._verify_root_timestamp(self.id)

View File

@ -0,0 +1,67 @@
# Copyright 2019 Catalyst Cloud Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class FakeNeutronClient(object):
def __init__(self, context):
self.context = context
def show_network(self, *arg, **kwargs):
return {'network': {'name': 'fake-mgmt-net-name'}}
def list_networks(self, *arg, **kwargs):
if 'router:external' in kwargs:
return {'networks': [{'id': 'fake-public-net-id'}]}
return {'networks': []}
def create_port(self, body):
if 'Management' in body['port'].get('description', ''):
return {'port': {'id': 'fake-mgmt-port-id'}}
return {'port': {'id': 'fake-user-port-id'}}
def delete_port(self, *arg, **kwargs):
pass
def list_ports(self, *arg, **kwargs):
return {'ports': []}
def create_floatingip(self, *arg, **kwargs):
pass
def list_floatingips(self, *arg, **kwargs):
return {'floatingips': []}
def update_floatingip(self, *arg, **kwargs):
pass
def delete_floatingip(self, *arg, **kwargs):
pass
def create_security_group(self, *arg, **kwargs):
return {'security_group': {'id': 'fake-sg-id'}}
def create_security_group_rule(self, *arg, **kwargs):
pass
def list_security_groups(self, *arg, **kwargs):
return {'security_groups': []}
def delete_security_group(self, *arg, **kwargs):
pass
def fake_create_neutron_client(context, region_name=None):
return FakeNeutronClient(context)

View File

@ -264,7 +264,7 @@ class FakeServers(object):
"""Can this FakeServers, with its context, see some resource?"""
server = self.db[id]
return (self.context.is_admin or
server.owner.tenant == self.context.tenant)
server.owner.tenant == self.context.project_id)
def create(self, name, image_id, flavor_ref, files=None, userdata=None,
block_device_mapping_v2=None, security_groups=None,
@ -282,12 +282,6 @@ class FakeServers(object):
raise nova_exceptions.ClientException("The requested availability "
"zone is not available.")
if nics:
if 'port-id' in nics[0] and nics[0]['port-id'] == "UNKNOWN":
raise nova_exceptions.ClientException("The requested "
"port-id is not "
"available.")
server.schedule_status("ACTIVE", 1)
LOG.info("FAKE_SERVERS_DB : %s", str(FAKE_SERVERS_DB))
return server
@ -439,7 +433,7 @@ class FakeVolumes(object):
"""Can this FakeVolumes, with its context, see some resource?"""
server = self.db[id]
return (self.context.is_admin or
server.owner.tenant == self.context.tenant)
server.owner.tenant == self.context.project_id)
def get(self, id):
if id not in self.db:

View File

@ -20,12 +20,9 @@ from trove.tests.api import databases
from trove.tests.api import datastores
from trove.tests.api import instances
from trove.tests.api import instances_actions
from trove.tests.api.mgmt import accounts
from trove.tests.api.mgmt import admin_required
from trove.tests.api.mgmt import datastore_versions
from trove.tests.api.mgmt import hosts
from trove.tests.api.mgmt import instances as mgmt_instances
from trove.tests.api.mgmt import storage
from trove.tests.api import replication
from trove.tests.api import root
from trove.tests.api import user_access
@ -121,9 +118,6 @@ proboscis.register(groups=["simple_blackbox"],
depends_on_groups=simple_black_box_groups)
black_box_mgmt_groups = [
accounts.GROUP,
hosts.GROUP,
storage.GROUP,
instances_actions.GROUP_REBOOT,
admin_required.GROUP,
mgmt_instances.GROUP,

View File

@ -68,7 +68,9 @@ class DefaultRootLogger(object):
def __init__(self, enable_backtrace=False):
super(DefaultRootLogger, self).__init__()
handler = DefaultRootHandler.activate(enable_backtrace=False)
handler = DefaultRootHandler.activate(
enable_backtrace=enable_backtrace
)
handler.acquire()
if handler not in logging.getLogger('').handlers:

View File

@ -20,19 +20,16 @@ from six.moves import configparser as config_parser
import trove
from trove.common import extensions
from trove.extensions.routes.account import Account
from trove.extensions.routes.mgmt import Mgmt
from trove.extensions.routes.mysql import Mysql
from trove.tests.unittests import trove_testtools
DEFAULT_EXTENSION_MAP = {
'Account': [Account, extensions.ExtensionDescriptor],
'Mgmt': [Mgmt, extensions.ExtensionDescriptor],
'MYSQL': [Mysql, extensions.ExtensionDescriptor]
}
EP_TEXT = '''
account = trove.extensions.routes.account:Account
mgmt = trove.extensions.routes.mgmt:Mgmt
mysql = trove.extensions.routes.mysql:Mysql
invalid = trove.tests.unittests.api.common.test_extensions:InvalidExtension

View File

@ -62,7 +62,7 @@ class BackupCreateTest(trove_testtools.TestCase):
super(BackupCreateTest, self).tearDown()
if self.created:
models.DBBackup.find_by(
tenant_id=self.context.tenant).delete()
tenant_id=self.context.project_id).delete()
@patch.object(api.API, 'get_client', MagicMock(return_value=MagicMock()))
def test_create(self):
@ -274,7 +274,7 @@ class BackupORMTest(trove_testtools.TestCase):
super(BackupORMTest, self).setUp()
util.init_db()
self.context, self.instance_id = _prep_conf(timeutils.utcnow())
self.backup = models.DBBackup.create(tenant_id=self.context.tenant,
self.backup = models.DBBackup.create(tenant_id=self.context.project_id,
name=BACKUP_NAME,
state=BACKUP_STATE,
instance_id=self.instance_id,
@ -286,7 +286,7 @@ class BackupORMTest(trove_testtools.TestCase):
def tearDown(self):
super(BackupORMTest, self).tearDown()
if not self.deleted:
models.DBBackup.find_by(tenant_id=self.context.tenant).delete()
models.DBBackup.find_by(tenant_id=self.context.project_id).delete()
def test_list(self):
backups, marker = models.Backup.list(self.context)
@ -294,7 +294,7 @@ class BackupORMTest(trove_testtools.TestCase):
self.assertEqual(1, len(backups))
def test_list_for_instance(self):
models.DBBackup.create(tenant_id=self.context.tenant,
models.DBBackup.create(tenant_id=self.context.project_id,
name=BACKUP_NAME_2,
state=BACKUP_STATE,
instance_id=self.instance_id,
@ -306,26 +306,26 @@ class BackupORMTest(trove_testtools.TestCase):
self.assertEqual(2, len(backups))
def test_get_last_completed(self):
models.DBBackup.create(tenant_id=self.context.tenant,
models.DBBackup.create(tenant_id=self.context.project_id,
name=BACKUP_NAME_3,
state=BACKUP_STATE_COMPLETED,
instance_id=self.instance_id,
size=2.0,
deleted=False)
models.DBBackup.create(tenant_id=self.context.tenant,
models.DBBackup.create(tenant_id=self.context.project_id,
name=BACKUP_NAME_4,
state=BACKUP_STATE_COMPLETED,
instance_id=self.instance_id,
size=2.0,
deleted=False)
models.DBBackup.create(tenant_id=self.context.tenant,
models.DBBackup.create(tenant_id=self.context.project_id,
name=BACKUP_NAME_5,
state=BACKUP_STATE_COMPLETED,
instance_id=self.instance_id,
parent_id='parent_uuid',
size=2.0,
deleted=False)
models.DBBackup.create(tenant_id=self.context.tenant,
models.DBBackup.create(tenant_id=self.context.project_id,
name=BACKUP_NAME_6,
state=BACKUP_STATE_COMPLETED,
instance_id=self.instance_id,
@ -414,13 +414,13 @@ class BackupORMTest(trove_testtools.TestCase):
def test_check_swift_object_exist_client_exception(self):
with patch.object(remote, 'get_endpoint', return_value=None),\
patch.object(remote, 'Connection',
side_effect=ClientException(self.context.tenant)):
side_effect=ClientException(self.context.project_id)):
self.assertRaises(exception.SwiftAuthError,
self.backup.check_swift_object_exist,
self.context)
def test_check_swift_object_exist_client_exception_404(self):
e = ClientException(self.context.tenant)
e = ClientException(self.context.project_id)
e.http_status = 404
with patch.object(remote, 'get_endpoint', return_value=None),\
patch.object(remote, 'Connection',
@ -431,7 +431,7 @@ class BackupORMTest(trove_testtools.TestCase):
def test_swift_auth_token_client_exception(self):
with patch.object(remote, 'get_endpoint', return_value=None),\
patch.object(remote, 'Connection',
side_effect=ClientException(self.context.tenant)):
side_effect=ClientException(self.context.project_id)):
self.assertRaises(exception.SwiftAuthError,
models.Backup.verify_swift_auth_token,
self.context)
@ -453,7 +453,7 @@ class PaginationTests(trove_testtools.TestCase):
self.context, self.instance_id = _prep_conf(timeutils.utcnow())
# Create a bunch of backups
bkup_info = {
'tenant_id': self.context.tenant,
'tenant_id': self.context.project_id,
'state': BACKUP_STATE,
'instance_id': self.instance_id,
'size': 2.0,
@ -511,7 +511,7 @@ class OrderingTests(trove_testtools.TestCase):
now = timeutils.utcnow()
self.context, self.instance_id = _prep_conf(now)
info = {
'tenant_id': self.context.tenant,
'tenant_id': self.context.project_id,
'state': BACKUP_STATE,
'instance_id': self.instance_id,
'size': 2.0,

View File

@ -38,9 +38,12 @@ class TestPolicy(trove_testtools.TestCase):
trove_policy.authorize_on_tenant(self.context, test_rule)
self.mock_get_enforcer.assert_called_once_with()
self.mock_enforcer.authorize.assert_called_once_with(
test_rule, {'tenant': self.context.tenant}, self.context.to_dict(),
test_rule,
{'tenant': self.context.project_id},
self.context.to_dict(),
do_raise=True, exc=trove_exceptions.PolicyNotAuthorized,
action=test_rule)
action=test_rule
)
def test_authorize_on_target(self):
test_rule = NonCallableMock()

View File

@ -1,132 +0,0 @@
# Copyright 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from mock import MagicMock
from mock import Mock, patch
from neutronclient.common import exceptions as neutron_exceptions
from neutronclient.v2_0 import client as NeutronClient
from trove.common import cfg
from trove.common import exception
from trove.common.models import NetworkRemoteModelBase
from trove.common import remote
from trove.extensions.security_group.models import RemoteSecurityGroup
from trove.network import neutron
from trove.network.neutron import NeutronDriver as driver
from trove.tests.unittests import trove_testtools
CONF = cfg.CONF
class NeutronDriverTest(trove_testtools.TestCase):
def setUp(self):
super(NeutronDriverTest, self).setUp()
self.context = trove_testtools.TroveTestContext(self)
self.orig_neutron_driver = NetworkRemoteModelBase.get_driver
self.orig_create_sg = driver.create_security_group
self.orig_add_sg_rule = driver.add_security_group_rule
self.orig_del_sg_rule = driver.delete_security_group_rule
self.orig_del_sg = driver.delete_security_group
NetworkRemoteModelBase.get_driver = Mock(return_value=driver)
def tearDown(self):
super(NeutronDriverTest, self).tearDown()
NetworkRemoteModelBase.get_driver = self.orig_neutron_driver
driver.create_security_group = self.orig_create_sg
driver.add_security_group_rule = self.orig_add_sg_rule
driver.delete_security_group_rule = self.orig_del_sg_rule
driver.delete_security_group = self.orig_del_sg
def test_create_security_group(self):
driver.create_security_group = Mock()
RemoteSecurityGroup.create(name=Mock(), description=Mock(),
context=self.context,
region_name=CONF.os_region_name)
self.assertEqual(1, driver.create_security_group.call_count)
def test_add_security_group_rule(self):
driver.add_security_group_rule = Mock()
RemoteSecurityGroup.add_rule(sec_group_id=Mock(), protocol=Mock(),
from_port=Mock(), to_port=Mock(),
cidr=Mock(), context=self.context,
region_name=CONF.os_region_name)
self.assertEqual(1, driver.add_security_group_rule.call_count)
def test_delete_security_group_rule(self):
driver.delete_security_group_rule = Mock()
RemoteSecurityGroup.delete_rule(sec_group_rule_id=Mock(),
context=self.context,
region_name=CONF.os_region_name)
self.assertEqual(1, driver.delete_security_group_rule.call_count)
def test_delete_security_group(self):
driver.delete_security_group = Mock()
RemoteSecurityGroup.delete(sec_group_id=Mock(),
context=self.context,
region_name=CONF.os_region_name)
self.assertEqual(1, driver.delete_security_group.call_count)
class NeutronDriverExceptionTest(trove_testtools.TestCase):
def setUp(self):
super(NeutronDriverExceptionTest, self).setUp()
self.context = trove_testtools.TroveTestContext(self)
self.orig_neutron_driver = NetworkRemoteModelBase.get_driver
self.orig_NeutronClient = NeutronClient.Client
self.orig_get_endpoint = remote.get_endpoint
remote.get_endpoint = MagicMock(return_value="neutron_url")
mock_driver = neutron.NeutronDriver(self.context, "regionOne")
NetworkRemoteModelBase.get_driver = MagicMock(
return_value=mock_driver)
NeutronClient.Client = Mock(
side_effect=neutron_exceptions.NeutronClientException())
def tearDown(self):
super(NeutronDriverExceptionTest, self).tearDown()
NetworkRemoteModelBase.get_driver = self.orig_neutron_driver
NeutronClient.Client = self.orig_NeutronClient
remote.get_endpoint = self.orig_get_endpoint
@patch('trove.network.neutron.LOG')
def test_create_sg_with_exception(self, mock_logging):
self.assertRaises(exception.SecurityGroupCreationError,
RemoteSecurityGroup.create,
"sg_name", "sg_desc", self.context,
region_name=CONF.os_region_name)
@patch('trove.network.neutron.LOG')
def test_add_sg_rule_with_exception(self, mock_logging):
self.assertRaises(exception.SecurityGroupRuleCreationError,
RemoteSecurityGroup.add_rule,
"12234", "tcp", "22", "22",
"0.0.0.0/8", self.context,
region_name=CONF.os_region_name)
@patch('trove.network.neutron.LOG')
def test_delete_sg_rule_with_exception(self, mock_logging):
self.assertRaises(exception.SecurityGroupRuleDeletionError,
RemoteSecurityGroup.delete_rule,
"12234", self.context,
region_name=CONF.os_region_name)
@patch('trove.network.neutron.LOG')
def test_delete_sg_with_exception(self, mock_logging):
self.assertRaises(exception.SecurityGroupDeletionError,
RemoteSecurityGroup.delete,
"123445", self.context,
region_name=CONF.os_region_name)

View File

@ -1,182 +0,0 @@
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from mock import Mock
from mock import patch
from novaclient import exceptions as nova_exceptions
from trove.common import cfg
from trove.common import exception
import trove.common.remote
from trove.extensions.security_group import models as sec_mod
from trove.instance import models as inst_model
from trove.tests.fakes import nova
from trove.tests.unittests import trove_testtools
CONF = cfg.CONF
"""
Unit tests for testing the exceptions raised by Security Groups
"""
class Security_Group_Exceptions_Test(trove_testtools.TestCase):
def setUp(self):
super(Security_Group_Exceptions_Test, self).setUp()
self.createNovaClient = trove.common.remote.create_nova_client
self.context = trove_testtools.TroveTestContext(self)
self.FakeClient = nova.fake_create_nova_client(self.context)
fException = Mock(
side_effect=lambda *args, **kwargs: self._raise(
nova_exceptions.ClientException("Test")))
self.FakeClient.security_groups.create = fException
self.FakeClient.security_groups.delete = fException
self.FakeClient.security_group_rules.create = fException
self.FakeClient.security_group_rules.delete = fException
trove.common.remote.create_nova_client = (
lambda c, r: self._return_mocked_nova_client(c))
def tearDown(self):
super(Security_Group_Exceptions_Test, self).tearDown()
trove.common.remote.create_nova_client = self.createNovaClient
def _return_mocked_nova_client(self, context):
return self.FakeClient
def _raise(self, ex):
raise ex
@patch('trove.network.nova.LOG')
def test_failed_to_create_security_group(self, mock_logging):
self.assertRaises(exception.SecurityGroupCreationError,
sec_mod.RemoteSecurityGroup.create,
"TestName",
"TestDescription",
self.context,
region_name=CONF.os_region_name)
@patch('trove.network.nova.LOG')
def test_failed_to_delete_security_group(self, mock_logging):
self.assertRaises(exception.SecurityGroupDeletionError,
sec_mod.RemoteSecurityGroup.delete,
1, self.context,
region_name=CONF.os_region_name)
@patch('trove.network.nova.LOG')
def test_failed_to_create_security_group_rule(self, mock_logging):
self.assertRaises(exception.SecurityGroupRuleCreationError,
sec_mod.RemoteSecurityGroup.add_rule,
1, "tcp", 3306, 3306, "0.0.0.0/0", self.context,
region_name=CONF.os_region_name)
@patch('trove.network.nova.LOG')
def test_failed_to_delete_security_group_rule(self, mock_logging):
self.assertRaises(exception.SecurityGroupRuleDeletionError,
sec_mod.RemoteSecurityGroup.delete_rule,
1, self.context,
region_name=CONF.os_region_name)
class fake_RemoteSecGr(object):
def data(self):
self.id = uuid.uuid4()
return {'id': self.id}
def delete(self, context, region_name):
pass
class fake_SecGr_Association(object):
def get_security_group(self):
return fake_RemoteSecGr()
def delete(self):
pass
class SecurityGroupDeleteTest(trove_testtools.TestCase):
def setUp(self):
super(SecurityGroupDeleteTest, self).setUp()
self.inst_model_conf_patch = patch.object(inst_model, 'CONF')
self.inst_model_conf_mock = self.inst_model_conf_patch.start()
self.addCleanup(self.inst_model_conf_patch.stop)
self.context = trove_testtools.TroveTestContext(self)
self.original_find_by = (
sec_mod.SecurityGroupInstanceAssociation.find_by)
self.original_delete = sec_mod.SecurityGroupInstanceAssociation.delete
self.fException = Mock(
side_effect=lambda *args, **kwargs: self._raise(
exception.ModelNotFoundError()))
def tearDown(self):
super(SecurityGroupDeleteTest, self).tearDown()
(sec_mod.SecurityGroupInstanceAssociation.
find_by) = self.original_find_by
(sec_mod.SecurityGroupInstanceAssociation.
delete) = self.original_delete
def _raise(self, ex):
raise ex
def test_failed_to_get_assoc_on_delete(self):
sec_mod.SecurityGroupInstanceAssociation.find_by = self.fException
self.assertIsNone(
sec_mod.SecurityGroup.delete_for_instance(
uuid.uuid4(), self.context, CONF.os_region_name))
def test_get_security_group_from_assoc_with_db_exception(self):
fException = Mock(
side_effect=lambda *args, **kwargs: self._raise(
nova_exceptions.ClientException('TEST')))
i_id = uuid.uuid4()
class new_fake_RemoteSecGrAssoc(object):
def get_security_group(self):
return None
def delete(self):
return fException
sec_mod.SecurityGroupInstanceAssociation.find_by = Mock(
return_value=new_fake_RemoteSecGrAssoc())
self.assertIsNone(
sec_mod.SecurityGroup.delete_for_instance(
i_id, self.context, CONF.os_region_name))
def test_delete_secgr_assoc_with_db_exception(self):
i_id = uuid.uuid4()
sec_mod.SecurityGroupInstanceAssociation.find_by = Mock(
return_value=fake_SecGr_Association())
sec_mod.SecurityGroupInstanceAssociation.delete = self.fException
self.assertIsNotNone(sec_mod.SecurityGroupInstanceAssociation.find_by(
i_id, deleted=False).get_security_group())
self.assertTrue(hasattr(sec_mod.SecurityGroupInstanceAssociation.
find_by(i_id, deleted=False).
get_security_group(), 'delete'))
self.assertIsNone(
sec_mod.SecurityGroup.delete_for_instance(
i_id, self.context, CONF.os_region_name))

View File

@ -14,6 +14,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import mock
from mock import Mock
from mock import patch
@ -50,13 +51,13 @@ class ApiTest(trove_testtools.TestCase):
@patch.object(task_api.API, '_transform_obj', Mock(return_value='flv-id'))
def test_create_instance(self):
flavor = Mock()
self.api.create_instance(
'inst-id', 'inst-name', flavor, 'img-id', {'name': 'db1'},
'inst-id', 'inst-name', mock.ANY, 'img-id', {'name': 'db1'},
{'name': 'usr1'}, 'mysql', None, 1, backup_id='bk-id',
availability_zone='az', root_password='pwd', nics=['nic-id'],
overrides={}, slave_of_id='slv-id', cluster_config={},
volume_type='type', modules=['mod-id'], locality='affinity')
self._verify_rpc_prepare_before_cast()
self._verify_cast(
'create_instance', availability_zone='az', backup_id='bk-id',
@ -65,7 +66,7 @@ class ApiTest(trove_testtools.TestCase):
instance_id='inst-id', locality='affinity', modules=['mod-id'],
name='inst-name', nics=['nic-id'], overrides={}, packages=None,
root_password='pwd', slave_of_id='slv-id', users={'name': 'usr1'},
volume_size=1, volume_type='type')
volume_size=1, volume_type='type', access=None)
def test_detach_replica(self):
self.api.detach_replica('some-instance-id')

View File

@ -247,6 +247,7 @@ class TestManager(trove_testtools.TestCase):
'mysql-image-id', None, None, 'mysql', 'mysql-server', 2,
'temp-backup-id', None, 'password', None, mock_override,
None, None, None, None, 'affinity')
mock_tasks.create_instance.assert_called_with(mock_flavor,
'mysql-image-id', None,
None, 'mysql',
@ -255,7 +256,8 @@ class TestManager(trove_testtools.TestCase):
'password', None,
mock_override,
None, None, None, None,
{'group': 'sg-id'})
{'group': 'sg-id'},
access=None)
mock_tasks.wait_for_instance.assert_called_with(36000, mock_flavor)
def test_create_cluster(self):

View File

@ -13,11 +13,11 @@
# under the License.
import os
from tempfile import NamedTemporaryFile
import uuid
from cinderclient import exceptions as cinder_exceptions
import cinderclient.v2.client as cinderclient
from cinderclient.v2 import volumes as cinderclient_volumes
import mock
from mock import Mock, MagicMock, patch, PropertyMock, call
import neutronclient.v2_0.client as neutronclient
from novaclient import exceptions as nova_exceptions
@ -32,7 +32,6 @@ from trove.backup import models as backup_models
from trove.backup import state
import trove.common.context
from trove.common.exception import GuestError
from trove.common.exception import MalformedSecurityGroupRuleError
from trove.common.exception import PollTimeOut
from trove.common.exception import TroveError
from trove.common.instance import ServiceStatuses
@ -76,7 +75,6 @@ class fake_Server(object):
self.flavor_id = None
self.files = None
self.userdata = None
self.security_groups = None
self.block_device_mapping_v2 = None
self.status = 'ACTIVE'
self.key_name = None
@ -84,7 +82,7 @@ class fake_Server(object):
class fake_ServerManager(object):
def create(self, name, image_id, flavor_id, files, userdata,
security_groups, block_device_mapping_v2=None,
block_device_mapping_v2=None,
availability_zone=None,
nics=None, config_drive=False,
scheduler_hints=None, key_name=None):
@ -95,7 +93,6 @@ class fake_ServerManager(object):
server.flavor_id = flavor_id
server.files = files
server.userdata = userdata
server.security_groups = security_groups
server.block_device_mapping_v2 = block_device_mapping_v2
server.availability_zone = availability_zone
server.nics = nics
@ -210,16 +207,6 @@ class BaseFreshInstanceTasksTest(trove_testtools.TestCase):
f.write(self.guestconfig_content)
self.freshinstancetasks = taskmanager_models.FreshInstanceTasks(
None, Mock(), None, None)
self.tm_sg_create_inst_patch = patch.object(
trove.taskmanager.models.SecurityGroup, 'create_for_instance',
Mock(return_value={'id': uuid.uuid4(), 'name': uuid.uuid4()}))
self.tm_sg_create_inst_mock = self.tm_sg_create_inst_patch.start()
self.addCleanup(self.tm_sg_create_inst_patch.stop)
self.tm_sgr_create_sgr_patch = patch.object(
trove.taskmanager.models.SecurityGroupRule,
'create_sec_group_rule')
self.tm_sgr_create_sgr_mock = self.tm_sgr_create_sgr_patch.start()
self.addCleanup(self.tm_sgr_create_sgr_patch.stop)
def tearDown(self):
super(BaseFreshInstanceTasksTest, self).tearDown()
@ -239,14 +226,15 @@ class FreshInstanceTasksTest(BaseFreshInstanceTasksTest):
cfg.CONF.set_override('cloudinit_location', cloudinit_location)
server = self.freshinstancetasks._create_server(
None, None, None, datastore_manager, None, None, None)
None, None, datastore_manager, None, None, None)
self.assertEqual(server.userdata, self.userdata)
def test_create_instance_with_keypair(self):
cfg.CONF.set_override('nova_keypair', 'fake_keypair')
server = self.freshinstancetasks._create_server(
None, None, None, None, None, None, None)
None, None, None, None, None, None)
self.assertEqual('fake_keypair', server.key_name)
@ -287,21 +275,21 @@ class FreshInstanceTasksTest(BaseFreshInstanceTasksTest):
def test_create_instance_with_az_kwarg(self):
# execute
server = self.freshinstancetasks._create_server(
None, None, None, None, None, availability_zone='nova', nics=None)
None, None, None, None, availability_zone='nova', nics=None)
# verify
self.assertIsNotNone(server)
def test_create_instance_with_az(self):
# execute
server = self.freshinstancetasks._create_server(
None, None, None, None, None, 'nova', None)
None, None, None, None, 'nova', None)
# verify
self.assertIsNotNone(server)
def test_create_instance_with_az_none(self):
# execute
server = self.freshinstancetasks._create_server(
None, None, None, None, None, None, None)
None, None, None, None, None, None)
# verify
self.assertIsNotNone(server)
@ -313,13 +301,11 @@ class FreshInstanceTasksTest(BaseFreshInstanceTasksTest):
mock_nova_client = self.freshinstancetasks.nova_client = Mock()
mock_servers_create = mock_nova_client.servers.create
self.freshinstancetasks._create_server('fake-flavor', 'fake-image',
None, 'mysql', None, None,
None)
'mysql', None, None, None)
mock_servers_create.assert_called_with(
'fake-hostname', 'fake-image',
'fake-flavor', files={},
userdata=None,
security_groups=None,
block_device_mapping_v2=None,
availability_zone=None,
nics=None,
@ -341,28 +327,6 @@ class FreshInstanceTasksTest(BaseFreshInstanceTasksTest):
self.assertEqual(InstanceTasks.BUILDING_ERROR_TIMEOUT_GA,
fake_DBInstance.find_by().get_task_status())
@patch.object(BaseInstance, 'update_db')
@patch.object(backup_models.Backup, 'get_by_id')
@patch.object(taskmanager_models.FreshInstanceTasks, 'report_root_enabled')
@patch.object(taskmanager_models.FreshInstanceTasks, 'get_injected_files')
@patch.object(taskmanager_models.FreshInstanceTasks, '_create_secgroup')
@patch.object(taskmanager_models.FreshInstanceTasks, '_build_volume_info')
@patch.object(taskmanager_models.FreshInstanceTasks, '_create_server')
@patch.object(taskmanager_models.FreshInstanceTasks, '_guest_prepare')
@patch.object(template, 'SingleInstanceConfigTemplate')
@patch.object(taskmanager_models.FreshInstanceTasks, '_create_dns_entry',
side_effect=TroveError)
@patch('trove.taskmanager.models.LOG')
def test_error_create_dns_entry_create_instance(self, *args):
mock_flavor = {'id': 6, 'ram': 512, 'name': 'big_flavor'}
self.assertRaisesRegex(
TroveError,
'Error creating DNS entry for instance',
self.freshinstancetasks.create_instance, mock_flavor,
'mysql-image-id', None, None, 'mysql', 'mysql-server',
2, Mock(), None, 'root_password', None, Mock(), None, None, None,
None, None)
@patch.object(BaseInstance, 'update_db')
@patch.object(taskmanager_models.FreshInstanceTasks, '_create_dns_entry')
@patch.object(taskmanager_models.FreshInstanceTasks, 'get_injected_files')
@ -371,7 +335,9 @@ class FreshInstanceTasksTest(BaseFreshInstanceTasksTest):
@patch.object(taskmanager_models.FreshInstanceTasks, '_build_volume_info')
@patch.object(taskmanager_models.FreshInstanceTasks, '_guest_prepare')
@patch.object(template, 'SingleInstanceConfigTemplate')
@patch('trove.taskmanager.models.FreshInstanceTasks._create_port')
def test_create_instance(self,
mock_create_port,
mock_single_instance_template,
mock_guest_prepare,
mock_build_volume_info,
@ -388,78 +354,112 @@ class FreshInstanceTasksTest(BaseFreshInstanceTasksTest):
mock_single_instance_template.return_value.config_contents = (
config_content)
overrides = Mock()
self.freshinstancetasks.create_instance(mock_flavor, 'mysql-image-id',
None, None, 'mysql',
'mysql-server', 2,
None, None, None, None,
overrides, None, None,
'volume_type', None,
{'group': 'sg-id'})
mock_create_secgroup.assert_called_with('mysql')
mock_build_volume_info.assert_called_with('mysql', volume_size=2,
volume_type='volume_type')
mock_create_secgroup.return_value = 'fake_security_group_id'
mock_create_port.return_value = 'fake-port-id'
self.freshinstancetasks.create_instance(
mock_flavor, 'mysql-image-id', None,
None, 'mysql', 'mysql-server',
2, None, None,
None, [{'net-id': 'fake-net-id'}], overrides,
None, None, 'volume_type',
None, {'group': 'sg-id'}
)
mock_create_secgroup.assert_called_with('mysql', [])
mock_create_port.assert_called_once_with(
'fake-net-id',
['fake_security_group_id'],
is_mgmt=False,
is_public=False
)
mock_build_volume_info.assert_called_with(
'mysql', volume_size=2,
volume_type='volume_type'
)
mock_guest_prepare.assert_called_with(
768, mock_build_volume_info(), 'mysql-server', None, None, None,
config_content, None, overrides, None, None, None)
config_content, None, overrides, None, None, None
)
mock_create_server.assert_called_with(
8, 'mysql-image-id', mock_create_secgroup(),
'mysql', mock_build_volume_info()['block_device'], None,
None, mock_get_injected_files(), {'group': 'sg-id'})
8, 'mysql-image-id', 'mysql',
mock_build_volume_info()['block_device'], None,
[{'port-id': 'fake-port-id'}],
mock_get_injected_files(),
{'group': 'sg-id'}
)
@patch.object(BaseInstance, 'update_db')
@patch.object(taskmanager_models.FreshInstanceTasks, '_create_dns_entry')
@patch.object(taskmanager_models.FreshInstanceTasks, 'get_injected_files')
@patch.object(taskmanager_models.FreshInstanceTasks, '_create_server')
@patch.object(taskmanager_models.FreshInstanceTasks, '_create_secgroup')
@patch.object(taskmanager_models.FreshInstanceTasks, '_build_volume_info')
@patch.object(taskmanager_models.FreshInstanceTasks, '_guest_prepare')
@patch.object(template, 'SingleInstanceConfigTemplate')
@patch(
"trove.taskmanager.models.FreshInstanceTasks._create_management_port"
)
@patch('trove.common.remote.neutron_client')
def test_create_instance_with_mgmt_port(self,
mock_create_mgmt_port,
mock_neutron_client,
mock_single_instance_template,
mock_guest_prepare,
mock_build_volume_info,
mock_create_secgroup,
mock_create_server,
mock_get_injected_files,
*args):
self.patch_conf_property('management_networks', ['fake-mgmt-uuid'])
mock_create_secgroup.return_value = ['fake-sg']
mock_create_mgmt_port.return_value = 'fake-port-id'
mock_client = Mock()
mock_client.create_security_group.return_value = {
'security_group': {'id': 'fake-sg-id'}
}
mock_client.create_port.side_effect = [
{'port': {'id': 'fake-mgmt-port-id'}},
{'port': {'id': 'fake-user-port-id'}}
]
mock_client.list_networks.return_value = {
'networks': [{'id': 'fake-public-net-id'}]
}
mock_neutron_client.return_value = mock_client
mock_flavor = {'id': 8, 'ram': 768, 'name': 'bigger_flavor'}
config_content = {'config_contents': 'some junk'}
mock_single_instance_template.return_value.config_contents = (
config_content)
overrides = Mock()
self.freshinstancetasks.create_instance(
mock_flavor, 'mysql-image-id',
None, None, 'mysql',
'mysql-server', 2,
None, None, None,
[{'net-id': 'fake-net-uuid'}, {'net-id': 'fake-mgmt-uuid'}],
overrides, None, None,
'volume_type', None,
{'group': 'sg-id'}
mock_flavor, 'mysql-image-id', None,
None, 'mysql', 'mysql-server',
2, None, None,
None, [{'net-id': 'fake-net-uuid'}, {'net-id': 'fake-mgmt-uuid'}],
mock.ANY,
None, None, 'volume_type',
None, {'group': 'sg-id'},
access={'is_public': True, 'allowed_cidrs': ['192.168.0.1/24']}
)
mock_create_secgroup.assert_called_with('mysql')
mock_create_mgmt_port.assert_called_once_with('fake-mgmt-uuid',
['fake-sg'])
mock_build_volume_info.assert_called_with('mysql', volume_size=2,
volume_type='volume_type')
mock_build_volume_info.assert_called_with(
'mysql', volume_size=2,
volume_type='volume_type'
)
mock_guest_prepare.assert_called_with(
768, mock_build_volume_info(), 'mysql-server', None, None, None,
config_content, None, overrides, None, None, None)
config_content, None, mock.ANY, None, None, None)
mock_create_server.assert_called_with(
8, 'mysql-image-id', ['fake-sg'],
'mysql', mock_build_volume_info()['block_device'], None,
[{'net-id': 'fake-net-uuid'}, {'port-id': 'fake-port-id'}],
mock_get_injected_files(), {'group': 'sg-id'})
8, 'mysql-image-id', 'mysql',
mock_build_volume_info()['block_device'], None,
[
{'port-id': 'fake-user-port-id'},
{'port-id': 'fake-mgmt-port-id'}
],
mock_get_injected_files(), {'group': 'sg-id'}
)
create_floatingip_param = {
"floatingip": {
'floating_network_id': 'fake-public-net-id',
'port_id': 'fake-user-port-id',
}
}
mock_client.create_floatingip.assert_called_once_with(
create_floatingip_param
)
@patch.object(BaseInstance, 'update_db')
@patch.object(taskmanager_models, 'create_cinder_client')
@ -554,77 +554,6 @@ class FreshInstanceTasksTest(BaseFreshInstanceTasksTest):
snapshot, mock_flavor)
class InstanceSecurityGroupRuleTests(BaseFreshInstanceTasksTest):
def setUp(self):
super(InstanceSecurityGroupRuleTests, self).setUp()
self.task_models_conf_patch = patch('trove.taskmanager.models.CONF')
self.task_models_conf_mock = self.task_models_conf_patch.start()
self.addCleanup(self.task_models_conf_patch.stop)
self.inst_models_conf_patch = patch('trove.instance.models.CONF')
self.inst_models_conf_mock = self.inst_models_conf_patch.start()
self.addCleanup(self.inst_models_conf_patch.stop)
def test_create_sg_rules_success(self):
datastore_manager = 'mysql'
self.task_models_conf_mock.get = Mock(return_value=FakeOptGroup())
self.freshinstancetasks._create_secgroup(datastore_manager)
self.assertEqual(2, taskmanager_models.SecurityGroupRule.
create_sec_group_rule.call_count)
def test_create_sg_rules_format_exception_raised(self):
datastore_manager = 'mysql'
self.task_models_conf_mock.get = Mock(
return_value=FakeOptGroup(tcp_ports=['3306', '-3306']))
self.freshinstancetasks.update_db = Mock()
self.assertRaises(MalformedSecurityGroupRuleError,
self.freshinstancetasks._create_secgroup,
datastore_manager)
def test_create_sg_rules_success_with_duplicated_port_or_range(self):
datastore_manager = 'mysql'
self.task_models_conf_mock.get = Mock(
return_value=FakeOptGroup(
tcp_ports=['3306', '3306', '3306-3307', '3306-3307']))
self.freshinstancetasks.update_db = Mock()
self.freshinstancetasks._create_secgroup(datastore_manager)
self.assertEqual(2, taskmanager_models.SecurityGroupRule.
create_sec_group_rule.call_count)
def test_create_sg_rules_exception_with_malformed_ports_or_range(self):
datastore_manager = 'mysql'
self.task_models_conf_mock.get = Mock(
return_value=FakeOptGroup(tcp_ports=['A', 'B-C']))
self.freshinstancetasks.update_db = Mock()
self.assertRaises(MalformedSecurityGroupRuleError,
self.freshinstancetasks._create_secgroup,
datastore_manager)
def test_create_sg_rules_icmp(self):
datastore_manager = 'mysql'
self.task_models_conf_mock.get = Mock(
return_value=FakeOptGroup(icmp=True))
self.freshinstancetasks.update_db = Mock()
self.freshinstancetasks._create_secgroup(datastore_manager)
self.assertEqual(3, taskmanager_models.SecurityGroupRule.
create_sec_group_rule.call_count)
@patch.object(BaseInstance, 'update_db')
@patch('trove.taskmanager.models.CONF')
@patch('trove.taskmanager.models.LOG')
def test_error_sec_group_create_instance(self, mock_logging,
mock_conf, mock_update_db):
mock_conf.get = Mock(
return_value=FakeOptGroup(tcp_ports=['3306', '-3306']))
mock_flavor = {'id': 7, 'ram': 256, 'name': 'smaller_flavor'}
self.assertRaisesRegex(
TroveError,
'Error creating security group for instance',
self.freshinstancetasks.create_instance, mock_flavor,
'mysql-image-id', None, None, 'mysql', 'mysql-server', 2,
None, None, None, None, Mock(), None, None, None, None, None)
class ResizeVolumeTest(trove_testtools.TestCase):
def setUp(self):
@ -1256,11 +1185,11 @@ class RootReportTest(trove_testtools.TestCase):
def test_report_root_double_create(self):
context = Mock()
context.user = utils.generate_uuid()
uuid = utils.generate_uuid()
history = mysql_models.RootHistory(uuid, context.user).save()
id = utils.generate_uuid()
history = mysql_models.RootHistory(id, context.user).save()
with patch.object(mysql_models.RootHistory, 'load',
Mock(return_value=history)):
report = mysql_models.RootHistory.create(context, uuid)
report = mysql_models.RootHistory.create(context, id)
self.assertTrue(mysql_models.RootHistory.load.called)
self.assertEqual(history.user, report.user)
self.assertEqual(history.id, report.id)
@ -1273,12 +1202,12 @@ class ClusterRootTest(trove_testtools.TestCase):
def test_cluster_root_create(self, root_create, root_history_create):
context = Mock()
context.user = utils.generate_uuid()
uuid = utils.generate_uuid()
id = utils.generate_uuid()
password = "rootpassword"
cluster_instances = [utils.generate_uuid(), utils.generate_uuid()]
common_models.ClusterRoot.create(context, uuid, password,
common_models.ClusterRoot.create(context, id, password,
cluster_instances)
root_create.assert_called_with(context, uuid, password,
root_create.assert_called_with(context, id, password,
cluster_instances_list=None)
self.assertEqual(2, root_history_create.call_count)
calls = [

View File

@ -14,10 +14,7 @@
# under the License.
import abc
import inspect
import mock
import os
import sys
import testtools
from trove.common import cfg
@ -60,42 +57,12 @@ class TroveTestContext(TroveContext):
class TestCase(testtools.TestCase):
"""Base class of Trove unit tests.
Integrates automatic dangling mock detection.
"""
_NEWLINE = '\n'
# Number of nested levels to examine when searching for mocks.
# Higher setting will potentially uncover more dangling objects,
# at the cost of increased scanning time.
_max_recursion_depth = int(os.getenv(
'TROVE_TESTS_UNMOCK_RECURSION_DEPTH', 2))
# Should we skip the remaining tests after the first failure.
_fail_fast = is_bool(os.getenv(
'TROVE_TESTS_UNMOCK_FAIL_FAST', False))
# Should we report only unique dangling mock references.
_only_unique = is_bool(os.getenv(
'TROVE_TESTS_UNMOCK_ONLY_UNIQUE', True))
@classmethod
def setUpClass(cls):
super(TestCase, cls).setUpClass()
cls._dangling_mocks = set()
root_logger.DefaultRootLogger(enable_backtrace=False)
@classmethod
def tearDownClass(cls):
cls._assert_modules_unmocked()
super(TestCase, cls).tearDownClass()
def setUp(self):
if self.__class__._fail_fast and self.__class__._dangling_mocks:
self.skipTest("This test suite already has dangling mock "
"references from a previous test case.")
super(TestCase, self).setUp()
self.addCleanup(cfg.CONF.reset)
@ -117,61 +84,6 @@ class TestCase(testtools.TestCase):
root_logger.DefaultRootHandler.set_info(info=None)
super(TestCase, self).tearDown()
@classmethod
def _assert_modules_unmocked(cls):
"""Check that all members of loaded modules are currently unmocked.
"""
new_mocks = cls._find_mock_refs()
if cls._only_unique:
# Remove mock references that have already been reported once in
# this test suite (probably defined in setUp()).
new_mocks.difference_update(cls._dangling_mocks)
cls._dangling_mocks.update(new_mocks)
if new_mocks:
messages = ["Member '%s' needs to be unmocked." % item[0]
for item in new_mocks]
raise Exception(cls._NEWLINE + cls._NEWLINE.join(messages))
@classmethod
def _find_mock_refs(cls):
discovered_mocks = set()
for module_name, module in cls._get_loaded_modules().items():
cls._find_mocks(module_name, module, discovered_mocks, 1)
return discovered_mocks
@classmethod
def _find_mocks(cls, parent_name, parent, container, depth):
"""Search for mock members in the parent object.
Descend into class types.
"""
if depth <= cls._max_recursion_depth:
try:
if isinstance(parent, mock.Mock):
# Add just the parent if it's a mock itself.
container.add((parent_name, parent))
else:
# Add all mocked members of the parent.
for member_name, member in inspect.getmembers(parent):
full_name = '%s.%s' % (parent_name, member_name)
if isinstance(member, mock.Mock):
container.add((full_name, member))
elif inspect.isclass(member):
cls._find_mocks(
full_name, member, container, depth + 1)
except ImportError:
pass # Module cannot be imported - ignore it.
except RuntimeError:
# Something else went wrong when probing the class member.
# See: https://bugs.launchpad.net/trove/+bug/1524918
pass
@classmethod
def _get_loaded_modules(cls):
return {name: obj for name, obj in sys.modules.items() if obj}
def patch_datastore_manager(self, manager_name):
return self.patch_conf_property('datastore_manager', manager_name)

View File

@ -98,7 +98,6 @@ def create_dbaas_client(user):
from troveclient.compat import auth
class FakeAuth(auth.Authenticator):
def authenticate(self):
class FakeCatalog(object):
def __init__(self, auth):

View File

@ -89,14 +89,6 @@ class Users(object):
def find_all_users_who_satisfy(self, requirements, black_list=None):
"""Returns a list of all users who satisfy the given requirements."""
black_list = black_list or []
print("Searching for a user who meets requirements %s in our list..."
% requirements)
print("Users:")
for user in self.users:
print("\t" + str(user))
print("Black list")
for item in black_list:
print("\t" + str(item))
return (user for user in self.users
if user.auth_user not in black_list and
user.requirements.satisfies(requirements))