[ADMIN_API] Version 2.0 of the Admin API
Change-Id: I3ef16e525cdd2fa30c921888d1061f848e5fa287
This commit is contained in:
parent
796b760868
commit
2d8948833c
|
@ -21,6 +21,13 @@ cfg.CONF.register_group(adminapi_group)
|
|||
|
||||
cfg.CONF.register_opts(
|
||||
[
|
||||
cfg.BoolOpt('disable_keystone',
|
||||
default=False,
|
||||
help='Unauthenticated server, for testing only'),
|
||||
cfg.StrOpt('keystone_module',
|
||||
default='keystoneclient.middleware.auth_token:AuthProtocol',
|
||||
help='A colon separated module and class for keystone '
|
||||
' middleware'),
|
||||
cfg.StrOpt('datadog_api_key',
|
||||
help='API key for datadog alerting'),
|
||||
cfg.StrOpt('datadog_app_key',
|
||||
|
|
|
@ -0,0 +1,95 @@
|
|||
# Copyright 2013 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import ConfigParser
|
||||
import importlib
|
||||
|
||||
from oslo.config import cfg
|
||||
from pecan import request
|
||||
|
||||
from libra.openstack.common import log
|
||||
from libra.common.api.lbaas import db_session, AdminAuth
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
def get_limited_to_project(headers):
|
||||
"""Return the tenant the request should be limited to."""
|
||||
tenant_id = headers.get('X-Tenant-Id')
|
||||
LOG.info(
|
||||
'Admin API {0} request {1} ({2}) from {3} tenant {4}'.format(
|
||||
request.environ.get('REQUEST_METHOD'),
|
||||
request.environ.get('PATH_INFO'),
|
||||
request.environ.get('QUERY_STRING'),
|
||||
request.environ.get('REMOTE_ADDR'),
|
||||
tenant_id
|
||||
)
|
||||
)
|
||||
|
||||
return tenant_id
|
||||
|
||||
|
||||
def tenant_is_type(headers, tenant_types):
|
||||
""" Check the tenant ID is a user of the Admin API and allowed to use the
|
||||
API command specified
|
||||
"""
|
||||
tenant_id = get_limited_to_project(headers)
|
||||
if not tenant_id:
|
||||
return False
|
||||
with db_session() as session:
|
||||
is_auth = session.query(AdminAuth).\
|
||||
filter(AdminAuth.tenant_id == tenant_id).\
|
||||
filter(AdminAuth.level.in_(tenant_types)).count()
|
||||
if is_auth > 0:
|
||||
session.commit()
|
||||
return True
|
||||
session.commit()
|
||||
return False
|
||||
|
||||
|
||||
def tenant_is_user(headers):
|
||||
return tenant_is_type(headers, ['USER', 'ADMIN'])
|
||||
|
||||
|
||||
def tenant_is_admin(headers):
|
||||
return tenant_is_type(headers, ['ADMIN'])
|
||||
|
||||
|
||||
class AuthDirector(object):
|
||||
""" There are some paths we want to work unauthenticated. This class
|
||||
will direct intentionally unauthenticated requests to the relevant
|
||||
controllers. """
|
||||
|
||||
def __init__(self, app):
|
||||
self.unauthed_app = app
|
||||
if not cfg.CONF['admin_api']['disable_keystone']:
|
||||
self.app = self._install()
|
||||
else:
|
||||
self.app = app
|
||||
|
||||
def __call__(self, env, start_response):
|
||||
uri = env['PATH_INFO']
|
||||
if uri in ['/', '/v1', '/v1/', '/v2.0', '/v2.0/']:
|
||||
return self.unauthed_app(env, start_response)
|
||||
else:
|
||||
return self.app(env, start_response)
|
||||
|
||||
def _install(self):
|
||||
"""Install ACL check on application."""
|
||||
config = ConfigParser.SafeConfigParser()
|
||||
config.read(cfg.CONF['config_file'])
|
||||
module_details = cfg.CONF['admin_api']['keystone_module'].split(':')
|
||||
keystone = importlib.import_module(module_details[0])
|
||||
auth_class = getattr(keystone, module_details[1])
|
||||
return auth_class(self.unauthed_app, config._sections['keystone'])
|
|
@ -37,6 +37,7 @@ from libra.admin_api.device_pool.manage_pool import Pool
|
|||
from libra.admin_api.expunge.expunge import ExpungeScheduler
|
||||
from libra.admin_api import config as api_config
|
||||
from libra.admin_api import model
|
||||
from libra.admin_api import acl
|
||||
from libra.openstack.common import importutils
|
||||
from libra.openstack.common import log as logging
|
||||
from libra.common.log import get_descriptors
|
||||
|
@ -90,7 +91,9 @@ def setup_app(pecan_config):
|
|||
True)
|
||||
)
|
||||
|
||||
return app
|
||||
final_app = acl.AuthDirector(app)
|
||||
|
||||
return final_app
|
||||
|
||||
|
||||
class MaintThreads(object):
|
||||
|
|
|
@ -14,13 +14,19 @@
|
|||
# under the License.
|
||||
|
||||
from pecan import expose, response
|
||||
from v1 import V1Controller
|
||||
from v1.v1 import V1Controller
|
||||
from v2.v2_0 import V2Controller
|
||||
from libra.admin_api.model.responses import Responses
|
||||
|
||||
|
||||
class RootController(object):
|
||||
"""root control object."""
|
||||
|
||||
@expose('json')
|
||||
def index(self):
|
||||
response.status = 200
|
||||
return Responses.versions
|
||||
|
||||
@expose('json')
|
||||
def _default(self):
|
||||
"""default route.. acts as catch all for any wrong urls.
|
||||
|
@ -32,6 +38,8 @@ class RootController(object):
|
|||
def _lookup(self, primary_key, *remainder):
|
||||
if primary_key == 'v1':
|
||||
return V1Controller(), remainder
|
||||
if primary_key == 'v2.0':
|
||||
return V2Controller(), remainder
|
||||
else:
|
||||
response.status = 404
|
||||
return Responses._default
|
||||
|
|
|
@ -0,0 +1,13 @@
|
|||
# Copyright 2013 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
|
@ -21,6 +21,11 @@ from libra.admin_api.model.responses import Responses
|
|||
class V1Controller(object):
|
||||
"""v1 control object."""
|
||||
|
||||
@expose('json')
|
||||
def index(self):
|
||||
response.status = 200
|
||||
return Responses.versions_v1
|
||||
|
||||
@expose('json')
|
||||
def _default(self):
|
||||
"""default route.. acts as catch all for any wrong urls.
|
|
@ -0,0 +1,13 @@
|
|||
# Copyright 2013 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
|
@ -0,0 +1,251 @@
|
|||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
# Copyright 2014 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# pecan imports
|
||||
import ipaddress
|
||||
from pecan import expose, request, response
|
||||
from pecan.rest import RestController
|
||||
from libra.admin_api.library.rebuild import rebuild_device
|
||||
from libra.common.api.lbaas import LoadBalancer, Device, db_session
|
||||
from libra.common.api.lbaas import loadbalancers_devices, Vip
|
||||
from libra.openstack.common import log
|
||||
from libra.admin_api.stats.stats_gearman import GearJobs
|
||||
from libra.admin_api.acl import tenant_is_admin, tenant_is_user
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class DevicesController(RestController):
|
||||
@expose('json')
|
||||
def get(
|
||||
self, device_id=None, status=None, name=None, ip=None, vip=None
|
||||
):
|
||||
"""
|
||||
Gets either a list of all devices or a single device details.
|
||||
|
||||
:param device_id: id of device (unless getall)
|
||||
Url:
|
||||
GET /devices
|
||||
List all configured devices
|
||||
Url:
|
||||
GET /devices/{device_id}
|
||||
List details of a particular device
|
||||
Returns: dict
|
||||
"""
|
||||
|
||||
# Work around routing issue in Pecan, doesn't work as a separate class
|
||||
# due to this get accepting more than one parameter
|
||||
if status == 'discover':
|
||||
return self.discover(device_id)
|
||||
|
||||
if not tenant_is_user(request.headers):
|
||||
response.status = 401
|
||||
return dict(
|
||||
faultcode="Client",
|
||||
faultstring="Client not authorized to access this function"
|
||||
)
|
||||
|
||||
with db_session() as session:
|
||||
# if we don't have an id then we want a list of all devices
|
||||
if not device_id:
|
||||
# return all devices
|
||||
device = {'devices': []}
|
||||
|
||||
devices = session.query(
|
||||
Device.id, Device.az, Device.updated, Device.created,
|
||||
Device.status, Device.name, Device.type,
|
||||
Device.floatingIpAddr.label('ip'), Vip.id.label('vipid'),
|
||||
Vip.ip.label('vip')).outerjoin(Device.vip)
|
||||
|
||||
if vip is not None:
|
||||
# Search devices by vip, should only return one
|
||||
vip_num = int(ipaddress.IPv4Address(unicode(vip)))
|
||||
devices = devices.filter(Vip.ip == vip_num)
|
||||
|
||||
if status is not None:
|
||||
# Search devices by status
|
||||
status = status.upper()
|
||||
if status not in ['OFFLINE', 'ONLINE', 'ERROR']:
|
||||
# Invalid status specified
|
||||
response.status = 400
|
||||
return dict(
|
||||
faultcode="Client",
|
||||
faultstring="Invalid status: " + status
|
||||
)
|
||||
devices = devices.filter(Device.status == status)
|
||||
if name is not None:
|
||||
# Search devices by name, should only return one
|
||||
devices = devices.filter(Device.name == name)
|
||||
if ip is not None:
|
||||
# Search devices by IP, should only return one
|
||||
devices = devices.filter(Device.floatingIpAddr == ip)
|
||||
|
||||
devices.all()
|
||||
|
||||
for item in devices:
|
||||
dev = item._asdict()
|
||||
if dev['vip']:
|
||||
dev['vip'] = [{
|
||||
"id": dev['vipid'],
|
||||
"address": str(ipaddress.IPv4Address(dev['vip']))
|
||||
}]
|
||||
else:
|
||||
dev['vip'] = []
|
||||
del(dev['vipid'])
|
||||
device['devices'].append(dev)
|
||||
else:
|
||||
# return device detail
|
||||
device = session.query(
|
||||
Device.id, Device.az, Device.updated, Device.created,
|
||||
Device.status, Device.floatingIpAddr.label('ip'),
|
||||
Device.name, Device.type, Vip.id.label('vipid'),
|
||||
Vip.ip.label('vip')
|
||||
).outerjoin(Device.vip).filter(Device.id == device_id).first()
|
||||
|
||||
if not device:
|
||||
response.status = 404
|
||||
session.rollback()
|
||||
return dict(
|
||||
faultcode="Client",
|
||||
faultstring="device id " + device_id + "not found"
|
||||
)
|
||||
|
||||
device = device._asdict()
|
||||
if device['vip']:
|
||||
device['vip'] = [{
|
||||
"id": device['vipid'],
|
||||
"address": str(ipaddress.IPv4Address(device['vip']))
|
||||
}]
|
||||
else:
|
||||
device['vip'] = []
|
||||
del(device['vipid'])
|
||||
|
||||
device['loadBalancers'] = []
|
||||
|
||||
if device['status'] != "OFFLINE":
|
||||
lbids = session.query(
|
||||
loadbalancers_devices.c.loadbalancer).\
|
||||
filter(
|
||||
loadbalancers_devices.c.device == device['id']
|
||||
).\
|
||||
all()
|
||||
|
||||
lblist = [i[0] for i in lbids]
|
||||
lbs = session.query(
|
||||
LoadBalancer.id, LoadBalancer.tenantid).\
|
||||
filter(LoadBalancer.id.in_(lblist)).all()
|
||||
|
||||
if lbs:
|
||||
for item in lbs:
|
||||
lb = item._asdict()
|
||||
device['loadBalancers'].append(lb)
|
||||
|
||||
session.commit()
|
||||
response.status = 200
|
||||
return device
|
||||
|
||||
@expose('json')
|
||||
def delete(self, device_id):
|
||||
""" Deletes a given device
|
||||
:param device_id: id of device to delete
|
||||
Urls:
|
||||
DELETE /devices/{device_id}
|
||||
Returns: None
|
||||
"""
|
||||
|
||||
if not tenant_is_admin(request.headers):
|
||||
response.status = 401
|
||||
return dict(
|
||||
faultcode="Client",
|
||||
faultstring="Client not authorized to access this function"
|
||||
)
|
||||
|
||||
with db_session() as session:
|
||||
# check for the device
|
||||
device = session.query(Device.id).\
|
||||
filter(Device.id == device_id).first()
|
||||
|
||||
if device is None:
|
||||
session.rollback()
|
||||
response.status = 404
|
||||
return dict(
|
||||
faultcode="Client",
|
||||
faultstring="Device " + device_id + " not found"
|
||||
)
|
||||
|
||||
# Is the device is attached to a LB
|
||||
lb = session.query(
|
||||
loadbalancers_devices.c.loadbalancer).\
|
||||
filter(loadbalancers_devices.c.device == device_id).\
|
||||
all()
|
||||
|
||||
if lb:
|
||||
# Rebuild device
|
||||
resp = rebuild_device(device_id)
|
||||
response.status = resp[0]
|
||||
return resp[1]
|
||||
# If we get here there are no load balancers so delete device
|
||||
response.status = 204
|
||||
try:
|
||||
device = session.query(Device).\
|
||||
filter(Device.id == device_id).first()
|
||||
device.status = 'DELETED'
|
||||
session.commit()
|
||||
return None
|
||||
except:
|
||||
session.rollback()
|
||||
LOG.exception('Error deleting device from pool')
|
||||
response.status = 500
|
||||
return dict(
|
||||
faultcode="Server",
|
||||
faultstring="Error deleting device from pool"
|
||||
)
|
||||
return None
|
||||
|
||||
def discover(self, device_id):
|
||||
"""
|
||||
Discovers information about a given libra worker based on device ID
|
||||
"""
|
||||
|
||||
if not tenant_is_user(request.headers):
|
||||
response.status = 401
|
||||
return dict(
|
||||
faultcode="Client",
|
||||
faultstring="Client not authorized to access this function"
|
||||
)
|
||||
|
||||
with db_session() as session:
|
||||
device = session.query(Device.name).\
|
||||
filter(Device.id == device_id).scalar()
|
||||
device_name = str(device)
|
||||
session.commit()
|
||||
if device_name is None:
|
||||
response.status = 404
|
||||
return dict(
|
||||
faultcode="Client",
|
||||
faultstring="Device " + device_id + " not found"
|
||||
)
|
||||
gearman = GearJobs()
|
||||
discover = gearman.get_discover(device_name)
|
||||
if discover is None:
|
||||
response.status = 500
|
||||
return dict(
|
||||
faultcode="Server",
|
||||
faultstring="Could not discover device"
|
||||
)
|
||||
return dict(
|
||||
id=device_id, version=discover['version'],
|
||||
release=discover['release']
|
||||
)
|
|
@ -0,0 +1,193 @@
|
|||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
# Copyright 2014 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# pecan imports
|
||||
import ipaddress
|
||||
from pecan import expose, request, response
|
||||
from pecan.rest import RestController
|
||||
from libra.common.api.lbaas import LoadBalancer, Device, db_session
|
||||
from libra.common.api.lbaas import Vip, Node, HealthMonitor
|
||||
from libra.openstack.common import log
|
||||
from libra.admin_api.acl import tenant_is_user
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class LoadBalancersController(RestController):
|
||||
|
||||
@expose('json')
|
||||
def get(
|
||||
self, lb_id=None, status=None, tenant=None, name=None, ip=None,
|
||||
vip=None
|
||||
):
|
||||
"""
|
||||
Gets either a list of all loadbalancers or a details for a single
|
||||
loadbalancer.
|
||||
|
||||
:param lb_id: id of the loadbalancer (unless getall)
|
||||
Url:
|
||||
GET /loadbalancers
|
||||
List all loadbalancers
|
||||
Url:
|
||||
GET /loadbalancers/{lb_id}
|
||||
List details of a particular device
|
||||
Returns: dict
|
||||
"""
|
||||
|
||||
if not tenant_is_user(request.headers):
|
||||
response.status = 401
|
||||
return dict(
|
||||
faultcode="Client",
|
||||
faultstring="Client not authorized to access this function"
|
||||
)
|
||||
|
||||
with db_session() as session:
|
||||
# if there is no lb_id then we want a list of loadbalancers
|
||||
if not lb_id:
|
||||
loadbalancers = {'loadBalancers': []}
|
||||
|
||||
lbs = session.query(
|
||||
LoadBalancer.id, LoadBalancer.name, LoadBalancer.status,
|
||||
LoadBalancer.tenantid, Vip.id.label('vipid'),
|
||||
Vip.ip.label('vip'),
|
||||
Device.floatingIpAddr.label('ip'),
|
||||
LoadBalancer.protocol, LoadBalancer.algorithm,
|
||||
LoadBalancer.port, LoadBalancer.created,
|
||||
LoadBalancer.updated
|
||||
).join(LoadBalancer.devices).join(Device.vip)
|
||||
|
||||
if status is not None:
|
||||
if status not in ('ACTIVE', 'BUILD', 'DEGRADED', 'ERROR'):
|
||||
response.status = 400
|
||||
return dict(
|
||||
faultcode="Client",
|
||||
faultstring="Invalid status: " + status
|
||||
)
|
||||
lbs = lbs.filter(LoadBalancer.status == status)
|
||||
|
||||
if tenant is not None:
|
||||
lbs = lbs.filter(LoadBalancer.tenantid == tenant)
|
||||
|
||||
if name is not None:
|
||||
lbs = lbs.filter(LoadBalancer.name == name)
|
||||
|
||||
if ip is not None:
|
||||
lbs = lbs.filter(Device.floatingIpAddr == ip)
|
||||
|
||||
if vip is not None:
|
||||
vip_num = int(ipaddress.IPv4Address(unicode(vip)))
|
||||
lbs = lbs.filter(Vip.ip == vip_num)
|
||||
|
||||
lbs.all()
|
||||
|
||||
for item in lbs:
|
||||
lb = item._asdict()
|
||||
if lb['vip']:
|
||||
lb['vip'] = [{
|
||||
"id": lb['vipid'],
|
||||
"address": str(ipaddress.IPv4Address(lb['vip']))
|
||||
}]
|
||||
del(lb['vip'])
|
||||
del(lb['vipid'])
|
||||
else:
|
||||
lb['vip'] = [None]
|
||||
del(lb['vipid'])
|
||||
loadbalancers['loadBalancers'].append(lb)
|
||||
|
||||
else:
|
||||
lbs = session.query(
|
||||
LoadBalancer.name, LoadBalancer.id, LoadBalancer.protocol,
|
||||
LoadBalancer.port, LoadBalancer.algorithm,
|
||||
LoadBalancer.status, LoadBalancer.created,
|
||||
LoadBalancer.updated, LoadBalancer.errmsg,
|
||||
Device.id.label('device'),
|
||||
Vip.id.label('vipid'), Vip.ip.label('vip')
|
||||
).join(LoadBalancer.devices).\
|
||||
outerjoin(Device.vip).\
|
||||
filter(LoadBalancer.id == lb_id).\
|
||||
first()
|
||||
|
||||
if not lbs:
|
||||
response.status = 404
|
||||
return dict(
|
||||
faultcode="Client",
|
||||
faultstring="Loadbalancer " + lb_id + " not found"
|
||||
)
|
||||
loadbalancers = lbs._asdict()
|
||||
nodes = session.query(
|
||||
Node.id, Node.address, Node.port, Node.status,
|
||||
Node.enabled, Node.weight
|
||||
).filter(Node.lbid == lb_id).all()
|
||||
loadbalancers['nodes'] = []
|
||||
|
||||
for item in nodes:
|
||||
node = item._asdict()
|
||||
if node['enabled'] == 1:
|
||||
node['condition'] = 'ENABLED'
|
||||
else:
|
||||
node['condition'] = 'DISABLED'
|
||||
del node['enabled']
|
||||
node['port'] = str(node['port'])
|
||||
node['id'] = str(node['id'])
|
||||
if node['weight'] == 1:
|
||||
del node['weight']
|
||||
loadbalancers['nodes'].append(node)
|
||||
|
||||
if loadbalancers['vip']:
|
||||
loadbalancers['vip'] = [{
|
||||
"id": loadbalancers['vipid'],
|
||||
"address": str(
|
||||
ipaddress.IPv4Address(loadbalancers['vip'])
|
||||
)
|
||||
}]
|
||||
del(loadbalancers['vip'])
|
||||
del(loadbalancers['vipid'])
|
||||
else:
|
||||
loadbalancers['vip'] = [None]
|
||||
del(loadbalancers['vipid'])
|
||||
if not loadbalancers['errmsg']:
|
||||
loadbalancers['statusDescription'] = None
|
||||
else:
|
||||
loadbalancers['statusDescription'] =\
|
||||
loadbalancers['errmsg']
|
||||
del(loadbalancers['errmsg'])
|
||||
|
||||
monitor = session.query(
|
||||
HealthMonitor.type, HealthMonitor.delay,
|
||||
HealthMonitor.timeout, HealthMonitor.attempts,
|
||||
HealthMonitor.path
|
||||
).join(LoadBalancer.monitors).\
|
||||
filter(LoadBalancer.id == lb_id).first()
|
||||
|
||||
if monitor is None:
|
||||
monitor_data = {}
|
||||
else:
|
||||
monitor_data = {
|
||||
'type': monitor.type,
|
||||
'delay': monitor.delay,
|
||||
'timeout': monitor.timeout,
|
||||
'attemptsBeforeDeactivation': monitor.attempts
|
||||
}
|
||||
if monitor.path:
|
||||
monitor_data['path'] = monitor.path
|
||||
|
||||
loadbalancers['monitor'] = monitor_data
|
||||
|
||||
session.commit()
|
||||
|
||||
return loadbalancers
|
||||
|
||||
# TODO: we should be able to delete loadbalancers, require lb_id, name,
|
||||
# tenant and a confirm flag for verification
|
|
@ -0,0 +1,260 @@
|
|||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
# Copyright 2014 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# pecan imports
|
||||
import ConfigParser
|
||||
import socket
|
||||
import json
|
||||
from pecan import expose, response, request, conf
|
||||
from pecan.rest import RestController
|
||||
from libra.common.api.lbaas import Device, db_session
|
||||
from libra.common.api.lbaas import Vip, Limits, Counters, TenantLimits
|
||||
from libra.openstack.common import log
|
||||
from libra.admin_api.acl import tenant_is_admin, tenant_is_user
|
||||
|
||||
from sqlalchemy import create_engine
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
from oslo.config import cfg
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class LimitsController(RestController):
|
||||
""" a sub-controller for StatusController """
|
||||
@expose('json')
|
||||
def get_one(self, tenant_id):
|
||||
if not tenant_is_user(request.headers):
|
||||
response.status = 401
|
||||
return dict(
|
||||
faultcode="Client",
|
||||
faultstring="Client not authorized to access this function"
|
||||
)
|
||||
ret = {}
|
||||
with db_session() as session:
|
||||
limit = session.query(TenantLimits.loadbalancers).\
|
||||
filter(TenantLimits.tenantid == tenant_id).scalar()
|
||||
|
||||
ret['maxLoadBalancers'] = limit
|
||||
session.commit()
|
||||
return ret
|
||||
|
||||
@expose('json')
|
||||
def get_all(self):
|
||||
if not tenant_is_user(request.headers):
|
||||
response.status = 401
|
||||
return dict(
|
||||
faultcode="Client",
|
||||
faultstring="Client not authorized to access this function"
|
||||
)
|
||||
ret = {}
|
||||
with db_session() as session:
|
||||
limits = session.query(Limits.name, Limits.value).all()
|
||||
if limits is None:
|
||||
response.status = 500
|
||||
return dict(
|
||||
faultcode="Server",
|
||||
faultstring="Error obtaining limits"
|
||||
)
|
||||
for limit in limits:
|
||||
ret[limit.name] = limit.value
|
||||
session.commit()
|
||||
return ret
|
||||
|
||||
@expose('json')
|
||||
def put(self, tenant_id=None):
|
||||
if not tenant_is_admin(request.headers):
|
||||
response.status = 401
|
||||
return dict(
|
||||
faultcode="Client",
|
||||
faultstring="Client not authorized to access this function"
|
||||
)
|
||||
try:
|
||||
data = json.loads(request.body)
|
||||
except:
|
||||
response.status = 400
|
||||
return dict(
|
||||
faultcode="Client",
|
||||
faultstring="Invalid JSON received"
|
||||
)
|
||||
with db_session() as session:
|
||||
if tenant_id is None:
|
||||
for key, value in data.iteritems():
|
||||
limit = session.query(Limits).filter(Limits.name == key).\
|
||||
first()
|
||||
if limit is None:
|
||||
session.rollback()
|
||||
response.status = 400
|
||||
return dict(
|
||||
faultcode="Client",
|
||||
faultstring="Limit not found: {0}".format(key)
|
||||
)
|
||||
limit.value = value
|
||||
else:
|
||||
if 'maxLoadBalancers' in data:
|
||||
limit = session.query(TenantLimits).\
|
||||
filter(TenantLimits.tenantid == tenant_id).first()
|
||||
if limit is not None:
|
||||
limit.loadbalancers = data['maxLoadBalancers']
|
||||
else:
|
||||
new_limit = TenantLimits()
|
||||
new_limit.tenantid = tenant_id
|
||||
new_limit.loadbalancers = data['maxLoadBalancers']
|
||||
session.add(new_limit)
|
||||
else:
|
||||
session.rollback()
|
||||
response.status = 400
|
||||
return dict(
|
||||
faultcode="Client",
|
||||
faultstring="No user settable limit in json"
|
||||
)
|
||||
session.commit()
|
||||
|
||||
|
||||
class PoolController(RestController):
|
||||
@expose('json')
|
||||
def get(self):
|
||||
if not tenant_is_user(request.headers):
|
||||
response.status = 401
|
||||
return dict(
|
||||
faultcode="Client",
|
||||
faultstring="Client not authorized to access this function"
|
||||
)
|
||||
NULL = None # For pep8
|
||||
with db_session() as session:
|
||||
dev_use = session.query(Device).\
|
||||
filter(Device.status == 'ONLINE').count()
|
||||
dev_free = session.query(Device).\
|
||||
filter(Device.status == 'OFFLINE').count()
|
||||
dev_error = session.query(Device).\
|
||||
filter(Device.status == 'ERROR').count()
|
||||
dev_pending = session.query(Device).\
|
||||
filter(Device.status == 'DELETED').count()
|
||||
vips_use = session.query(Vip).\
|
||||
filter(Vip.device > 0).count()
|
||||
vips_free = session.query(Vip).\
|
||||
filter(Vip.device == NULL).count()
|
||||
vips_bad = session.query(Vip).\
|
||||
filter(Vip.device == 0).count()
|
||||
status = {
|
||||
"devices": {
|
||||
"used": dev_use,
|
||||
"available": dev_free,
|
||||
"error": dev_error,
|
||||
"pendingDelete": dev_pending
|
||||
},
|
||||
"vips": {
|
||||
"used": vips_use,
|
||||
"available": vips_free,
|
||||
"bad": vips_bad
|
||||
}
|
||||
}
|
||||
session.commit()
|
||||
return status
|
||||
|
||||
|
||||
class ServiceController(RestController):
|
||||
@expose('json')
|
||||
def get(self):
|
||||
if not tenant_is_user(request.headers):
|
||||
response.status = 401
|
||||
return dict(
|
||||
faultcode="Client",
|
||||
faultstring="Client not authorized to access this function"
|
||||
)
|
||||
|
||||
ret = {
|
||||
'mysql': [],
|
||||
'gearman': []
|
||||
}
|
||||
config = ConfigParser.SafeConfigParser()
|
||||
config.read(cfg.CONF['config_file'])
|
||||
|
||||
# Connect to all MySQL servers and test
|
||||
for section in conf.database:
|
||||
db_conf = config._sections[section]
|
||||
conn_string = '''mysql+mysqlconnector://%s:%s@%s:%s/%s''' % (
|
||||
db_conf['username'],
|
||||
db_conf['password'],
|
||||
db_conf['host'],
|
||||
db_conf['port'],
|
||||
db_conf['schema']
|
||||
)
|
||||
|
||||
if 'ssl_key' in db_conf:
|
||||
ssl_args = {'ssl': {
|
||||
'cert': db_conf['ssl_cert'],
|
||||
'key': db_conf['ssl_key'],
|
||||
'ca': db_conf['ssl_ca']
|
||||
}}
|
||||
|
||||
engine = create_engine(
|
||||
conn_string, isolation_level="READ COMMITTED",
|
||||
pool_size=1, connect_args=ssl_args, pool_recycle=3600
|
||||
)
|
||||
else:
|
||||
engine = create_engine(
|
||||
conn_string, isolation_level="READ COMMITTED",
|
||||
pool_size=1, pool_recycle=3600
|
||||
)
|
||||
session = sessionmaker(bind=engine)()
|
||||
try:
|
||||
session.execute("SELECT 1")
|
||||
session.close()
|
||||
ret['mysql'].append(
|
||||
{"ip": db_conf['host'], "status": 'ONLINE'}
|
||||
)
|
||||
except:
|
||||
ret['mysql'].append(
|
||||
{"ip": db_conf['host'], "status": 'OFFLINE'}
|
||||
)
|
||||
|
||||
# Socket connect to all gearman servers, TODO: a better gearman test
|
||||
for server in conf.gearman.server:
|
||||
ghost, gport = server.split(':')
|
||||
try:
|
||||
sock = socket.socket()
|
||||
sock.settimeout(5)
|
||||
sock.connect((ghost, int(gport)))
|
||||
sock.close()
|
||||
ret['gearman'].append({"ip": ghost, "status": 'ONLINE'})
|
||||
except socket.error:
|
||||
ret['gearman'].append({"ip": ghost, "status": 'OFFLINE'})
|
||||
try:
|
||||
sock.close()
|
||||
except:
|
||||
pass
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
class CountersController(RestController):
|
||||
@expose('json')
|
||||
def get(self):
|
||||
if not tenant_is_user(request.headers):
|
||||
response.status = 401
|
||||
return dict(
|
||||
faultcode="Client",
|
||||
faultstring="Client not authorized to access this function"
|
||||
)
|
||||
with db_session() as session:
|
||||
counters = session.query(Counters.name, Counters.value).all()
|
||||
return counters
|
||||
|
||||
|
||||
class StatusController(RestController):
|
||||
pool = PoolController()
|
||||
service = ServiceController()
|
||||
counters = CountersController()
|
||||
limits = LimitsController()
|
|
@ -0,0 +1,191 @@
|
|||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
# Copyright 2014 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import json
|
||||
# pecan imports
|
||||
from pecan import expose, request, response
|
||||
from pecan.rest import RestController
|
||||
from libra.openstack.common import log
|
||||
from libra.admin_api.acl import tenant_is_user, tenant_is_admin
|
||||
from libra.common.api.lbaas import db_session, AdminAuth
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class UserController(RestController):
|
||||
@expose('json')
|
||||
def get_all(self):
|
||||
"""
|
||||
Get a list of users
|
||||
"""
|
||||
if not tenant_is_admin(request.headers):
|
||||
response.status = 401
|
||||
return dict(
|
||||
faultcode="Client",
|
||||
faultstring="Client not authorized to access this function"
|
||||
)
|
||||
|
||||
with db_session() as session:
|
||||
user = session.query(
|
||||
AdminAuth.tenant_id.label('tenant'), AdminAuth.level
|
||||
).all()
|
||||
session.commit()
|
||||
return user
|
||||
|
||||
@expose('json')
|
||||
def get_one(self, tenant_id=None):
|
||||
"""
|
||||
Get a single Admin API user or details about self
|
||||
"""
|
||||
if not tenant_is_user(request.headers):
|
||||
response.status = 401
|
||||
return dict(
|
||||
faultcode="Client",
|
||||
faultstring="Client not authorized to access this function"
|
||||
)
|
||||
|
||||
with db_session() as session:
|
||||
user = session.query(AdminAuth).\
|
||||
filter(AdminAuth.tenant_id == tenant_id).first()
|
||||
if user is None:
|
||||
response.status = 404
|
||||
return dict(
|
||||
faultcode="Client",
|
||||
faultstatus="User not found"
|
||||
)
|
||||
ret = {
|
||||
"tenant": user.tenant_id,
|
||||
"level": user.level
|
||||
}
|
||||
session.commit()
|
||||
return ret
|
||||
|
||||
@expose('json')
|
||||
def delete(self, tenant_id):
|
||||
""" Delete a given user from the Admin API """
|
||||
if not tenant_is_admin(request.headers):
|
||||
response.status = 401
|
||||
return dict(
|
||||
faultcode="Client",
|
||||
faultstring="Client not authorized to access this function"
|
||||
)
|
||||
with db_session() as session:
|
||||
user_test = session.query(AdminAuth).\
|
||||
filter(AdminAuth.tenant_id == tenant_id).count()
|
||||
if user_test == 0:
|
||||
response.status = 404
|
||||
return dict(
|
||||
faultcode="Client",
|
||||
faultstring="Tenant not found"
|
||||
)
|
||||
session.query(AdminAuth).\
|
||||
filter(AdminAuth.tenant_id == tenant_id).delete()
|
||||
session.commit()
|
||||
response.status = 204
|
||||
return None
|
||||
|
||||
@expose('json')
|
||||
def post(self):
|
||||
""" Add a new user to the Admin API """
|
||||
if not tenant_is_admin(request.headers):
|
||||
response.status = 401
|
||||
return dict(
|
||||
faultcode="Client",
|
||||
faultstring="Client not authorized to access this function"
|
||||
)
|
||||
try:
|
||||
data = json.loads(request.body)
|
||||
except:
|
||||
response.status = 400
|
||||
return dict(
|
||||
faultcode="Client",
|
||||
faultstring="Invalid JSON received"
|
||||
)
|
||||
if data['tenant'] is None:
|
||||
response.status = 400
|
||||
return dict(
|
||||
faultcode="Client",
|
||||
faultstring="Tenant ID required"
|
||||
)
|
||||
tenant_id = data['tenant']
|
||||
if 'level' not in data:
|
||||
level = 'USER'
|
||||
elif data['level'] not in ['USER', 'ADMIN']:
|
||||
response.status = 400
|
||||
return dict(
|
||||
faultcode="Client",
|
||||
faultstring="Only USER or ADMIN levels allowed"
|
||||
)
|
||||
else:
|
||||
level = data['level']
|
||||
with db_session() as session:
|
||||
user_test = session.query(AdminAuth).\
|
||||
filter(AdminAuth.tenant_id == tenant_id).count()
|
||||
if user_test > 0:
|
||||
response.status = 400
|
||||
return dict(
|
||||
faultcode="Client",
|
||||
faultstring="Tenant already has an account"
|
||||
)
|
||||
user = AdminAuth()
|
||||
user.tenant_id = tenant_id
|
||||
user.level = level
|
||||
session.add(user)
|
||||
session.commit()
|
||||
|
||||
@expose('json')
|
||||
def put(self, tenant_id):
|
||||
""" Change the leve for an Admin API user """
|
||||
if not tenant_is_admin(request.headers):
|
||||
response.status = 401
|
||||
return dict(
|
||||
faultcode="Client",
|
||||
faultstring="Client not authorized to access this function"
|
||||
)
|
||||
try:
|
||||
data = json.loads(request.body)
|
||||
except:
|
||||
response.status = 400
|
||||
return dict(
|
||||
faultcode="Client",
|
||||
faultstring="Invalid JSON received"
|
||||
)
|
||||
if tenant_id is None:
|
||||
response.status = 400
|
||||
return dict(
|
||||
faultcode="Client",
|
||||
faultstring="Tenant ID required"
|
||||
)
|
||||
if not data['level']:
|
||||
level = 'USER'
|
||||
elif data['level'] not in ['USER', 'ADMIN']:
|
||||
response.status = 400
|
||||
return dict(
|
||||
faultcode="Client",
|
||||
faultstring="Only USER or ADMIN levels allowed"
|
||||
)
|
||||
else:
|
||||
level = data['level']
|
||||
with db_session() as session:
|
||||
user = session.query(AdminAuth).\
|
||||
filter(AdminAuth.tenant_id == tenant_id).first()
|
||||
if not user:
|
||||
response.status = 404
|
||||
return dict(
|
||||
faultcode="Client",
|
||||
faultstring="Tenant does not have an account"
|
||||
)
|
||||
user.level = level
|
||||
session.commit()
|
|
@ -0,0 +1,42 @@
|
|||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
# Copyright 2013 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from pecan import expose, response
|
||||
from devices import DevicesController
|
||||
from loadbalancers import LoadBalancersController
|
||||
from status import StatusController
|
||||
from user import UserController
|
||||
from libra.admin_api.model.responses import Responses
|
||||
|
||||
|
||||
class V2Controller(object):
|
||||
"""v2 control object."""
|
||||
|
||||
@expose('json')
|
||||
def index(self):
|
||||
response.status = 200
|
||||
return Responses.versions_v2_0
|
||||
|
||||
@expose('json')
|
||||
def _default(self):
|
||||
"""default route.. acts as catch all for any wrong urls.
|
||||
For now it returns a 404 because no action is defined for /"""
|
||||
response.status = 404
|
||||
return Responses._default
|
||||
|
||||
devices = DevicesController()
|
||||
loadbalancers = LoadBalancersController()
|
||||
status = StatusController()
|
||||
user = UserController()
|
|
@ -21,6 +21,7 @@ from oslo.config import cfg
|
|||
from sqlalchemy import func
|
||||
|
||||
from libra.common.api.lbaas import Device, PoolBuilding, Vip, db_session
|
||||
from libra.common.api.lbaas import Counters
|
||||
from libra.common.json_gearman import JSONGearmanClient
|
||||
from libra.openstack.common import log
|
||||
|
||||
|
@ -76,6 +77,10 @@ class Pool(object):
|
|||
'name': device.name
|
||||
}
|
||||
message.append(dict(task='libra_pool_mgm', data=job_data))
|
||||
|
||||
counter = session.query(Counters).\
|
||||
filter(Counters.name == 'devices_deleted').first()
|
||||
counter.value += len(devices)
|
||||
session.commit()
|
||||
if not message:
|
||||
LOG.info("No devices to delete")
|
||||
|
@ -336,6 +341,9 @@ class GearmanWork(object):
|
|||
vip.ip = int(ipaddress.IPv4Address(unicode(data['ip'])))
|
||||
with db_session() as session:
|
||||
session.add(vip)
|
||||
counter = session.query(Counters).\
|
||||
filter(Counters.name == 'vips_built').first()
|
||||
counter.value += 1
|
||||
session.commit()
|
||||
|
||||
def _add_node(self, data):
|
||||
|
@ -352,6 +360,9 @@ class GearmanWork(object):
|
|||
device.created = None
|
||||
with db_session() as session:
|
||||
session.add(device)
|
||||
counter = session.query(Counters).\
|
||||
filter(Counters.name == 'devices_built').first()
|
||||
counter.value += 1
|
||||
session.commit()
|
||||
|
||||
def _add_bad_node(self, data):
|
||||
|
@ -370,4 +381,7 @@ class GearmanWork(object):
|
|||
device.created = None
|
||||
with db_session() as session:
|
||||
session.add(device)
|
||||
counter = session.query(Counters).\
|
||||
filter(Counters.name == 'devices_bad_built').first()
|
||||
counter.value += 1
|
||||
session.commit()
|
||||
|
|
|
@ -17,7 +17,7 @@ import threading
|
|||
from datetime import datetime, timedelta
|
||||
from oslo.config import cfg
|
||||
|
||||
from libra.common.api.lbaas import LoadBalancer, db_session
|
||||
from libra.common.api.lbaas import LoadBalancer, db_session, Counters
|
||||
from libra.openstack.common import log
|
||||
|
||||
|
||||
|
@ -60,6 +60,9 @@ class ExpungeScheduler(object):
|
|||
LoadBalancer.status
|
||||
).filter(LoadBalancer.updated < exp_time).\
|
||||
filter(LoadBalancer.status == 'DELETED').delete()
|
||||
counter = session.query(Counters).\
|
||||
filter(Counters.name == 'loadbalancers_expunged').first()
|
||||
counter.value += count
|
||||
session.commit()
|
||||
LOG.info(
|
||||
'{0} deleted load balancers expunged'.format(count)
|
||||
|
|
|
@ -0,0 +1,13 @@
|
|||
# Copyright 2013 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
|
@ -0,0 +1,99 @@
|
|||
# Copyright 2014 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import ipaddress
|
||||
from libra.common.api.lbaas import loadbalancers_devices, Vip, Counters
|
||||
from libra.common.api.lbaas import Device, LoadBalancer, db_session
|
||||
from libra.common.api.gearman_client import submit_job, submit_vip_job
|
||||
from libra.openstack.common import log
|
||||
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
def rebuild_device(device_id):
|
||||
new_device_id = None
|
||||
new_device_name = None
|
||||
with db_session() as session:
|
||||
new_device = session.query(Device).\
|
||||
filter(~Device.id.in_(
|
||||
session.query(loadbalancers_devices.c.device)
|
||||
)).\
|
||||
filter(Device.status == "OFFLINE").\
|
||||
filter(Device.pingCount == 0).\
|
||||
with_lockmode('update').\
|
||||
first()
|
||||
if new_device is None:
|
||||
session.rollback()
|
||||
LOG.error(
|
||||
'No spare devices when trying to rebuild device {0}'
|
||||
.format(device_id)
|
||||
)
|
||||
return (
|
||||
500,
|
||||
dict(
|
||||
faultcode="Server",
|
||||
faultstring='No spare devices when trying to rebuild '
|
||||
'device {0}'.format(device_id)
|
||||
)
|
||||
)
|
||||
new_device_id = new_device.id
|
||||
new_device_name = new_device.name
|
||||
LOG.info(
|
||||
"Moving device {0} to device {1}"
|
||||
.format(device_id, new_device_id)
|
||||
)
|
||||
lbs = session.query(LoadBalancer).\
|
||||
join(LoadBalancer.devices).\
|
||||
filter(Device.id == device_id).all()
|
||||
for lb in lbs:
|
||||
lb.devices = [new_device]
|
||||
lb.status = "ERROR(REBUILDING)"
|
||||
new_device.status = 'BUILDING'
|
||||
lbid = lbs[0].id
|
||||
session.commit()
|
||||
submit_job(
|
||||
'UPDATE', new_device_name, new_device_id, lbid
|
||||
)
|
||||
with db_session() as session:
|
||||
new_device = session.query(Device).\
|
||||
filter(Device.id == new_device_id).first()
|
||||
vip = session.query(Vip).filter(Vip.device == device_id).first()
|
||||
if vip:
|
||||
vip.device = new_device_id
|
||||
device = session.query(Device).\
|
||||
filter(Device.id == device_id).first()
|
||||
device.status = 'DELETED'
|
||||
lbs = session.query(LoadBalancer).\
|
||||
join(LoadBalancer.devices).\
|
||||
filter(Device.id == new_device_id).all()
|
||||
for lb in lbs:
|
||||
lb.errmsg = "Load Balancer rebuild on new device"
|
||||
if vip:
|
||||
LOG.info(
|
||||
"Moving IP {0} and marking device {1} for deletion"
|
||||
.format(str(ipaddress.IPv4Address(vip.ip)), device_id)
|
||||
)
|
||||
submit_vip_job(
|
||||
'ASSIGN', new_device_name, vip.id
|
||||
)
|
||||
new_device.status = 'ONLINE'
|
||||
counter = session.query(Counters).\
|
||||
filter(Counters.name == 'loadbalancers_rebuild').first()
|
||||
counter.value += 1
|
||||
session.commit()
|
||||
return (
|
||||
200,
|
||||
dict(oldId=device_id, newId=new_device_id)
|
||||
)
|
|
@ -27,3 +27,44 @@ class Responses(object):
|
|||
|
||||
"""service_unavailable"""
|
||||
service_unavailable = {'message': 'Service Unavailable'}
|
||||
|
||||
versions = {
|
||||
"versions": [
|
||||
{
|
||||
"id": "v1",
|
||||
"updated": "2014-01-13T16:55:25Z",
|
||||
"status": "DEPRECATED"
|
||||
},
|
||||
{
|
||||
"id": "v2.0",
|
||||
"updated": "2014-01-13T16:55:25Z",
|
||||
"status": "CURRENT"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
versions_v1 = {
|
||||
"version": {
|
||||
"id": "v1",
|
||||
"updated": "2014-01-13T16:55:25Z",
|
||||
"status": "DEPRECATED",
|
||||
"media-types": [
|
||||
{
|
||||
"base": "application/json"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
versions_v2_0 = {
|
||||
"version": {
|
||||
"id": "v2",
|
||||
"updated": "2014-01-13T16:55:25Z",
|
||||
"status": "CURRENT",
|
||||
"media-types": [
|
||||
{
|
||||
"base": "application/json"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
|
|
@ -11,11 +11,10 @@
|
|||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
|
||||
import ipaddress
|
||||
from libra.admin_api.stats.drivers.base import AlertDriver
|
||||
from libra.common.api.lbaas import Device, LoadBalancer, db_session
|
||||
from libra.common.api.lbaas import loadbalancers_devices, Vip
|
||||
from libra.common.api.gearman_client import submit_job, submit_vip_job
|
||||
from libra.common.api.lbaas import loadbalancers_devices
|
||||
from libra.admin_api.library.rebuild import rebuild_device
|
||||
from libra.openstack.common import log
|
||||
|
||||
|
||||
|
@ -36,6 +35,9 @@ class DbDriver(AlertDriver):
|
|||
filter(loadbalancers_devices.c.device == device_id).\
|
||||
all()
|
||||
|
||||
# TODO: make it so that we don't get stuck in LB ERROR here when
|
||||
# a rebuild fails due to something like a bad device. Maybe have
|
||||
# an attempted rebuild count?
|
||||
for lb in lbs:
|
||||
session.query(LoadBalancer).\
|
||||
filter(LoadBalancer.id == lb[0]).\
|
||||
|
@ -71,63 +73,4 @@ class DbDriver(AlertDriver):
|
|||
session.commit()
|
||||
|
||||
def _rebuild_device(self, device_id):
|
||||
new_device_id = None
|
||||
new_device_name = None
|
||||
with db_session() as session:
|
||||
new_device = session.query(Device).\
|
||||
filter(~Device.id.in_(
|
||||
session.query(loadbalancers_devices.c.device)
|
||||
)).\
|
||||
filter(Device.status == "OFFLINE").\
|
||||
filter(Device.pingCount == 0).\
|
||||
with_lockmode('update').\
|
||||
first()
|
||||
if new_device is None:
|
||||
session.rollback()
|
||||
LOG.error(
|
||||
'No spare devices when trying to rebuild device {0}'
|
||||
.format(device_id)
|
||||
)
|
||||
return
|
||||
new_device_id = new_device.id
|
||||
new_device_name = new_device.name
|
||||
LOG.info(
|
||||
"Moving device {0} to device {1}"
|
||||
.format(device_id, new_device_id)
|
||||
)
|
||||
lbs = session.query(LoadBalancer).\
|
||||
join(LoadBalancer.devices).\
|
||||
filter(Device.id == device_id).all()
|
||||
for lb in lbs:
|
||||
lb.devices = [new_device]
|
||||
lb.status = "ERROR(REBUILDING)"
|
||||
new_device.status = 'BUILDING'
|
||||
lbid = lbs[0].id
|
||||
session.commit()
|
||||
submit_job(
|
||||
'UPDATE', new_device_name, new_device_id, lbid
|
||||
)
|
||||
with db_session() as session:
|
||||
new_device = session.query(Device).\
|
||||
filter(Device.id == new_device_id).first()
|
||||
vip = session.query(Vip).filter(Vip.device == device_id).first()
|
||||
if vip:
|
||||
vip.device = new_device_id
|
||||
device = session.query(Device).\
|
||||
filter(Device.id == device_id).first()
|
||||
device.status = 'DELETED'
|
||||
lbs = session.query(LoadBalancer).\
|
||||
join(LoadBalancer.devices).\
|
||||
filter(Device.id == new_device_id).all()
|
||||
for lb in lbs:
|
||||
lb.errmsg = "Load Balancer rebuild on new device"
|
||||
if vip:
|
||||
LOG.info(
|
||||
"Moving IP {0} and marking device {1} for deletion"
|
||||
.format(str(ipaddress.IPv4Address(vip.ip)), device_id)
|
||||
)
|
||||
submit_vip_job(
|
||||
'ASSIGN', new_device_name, vip.id
|
||||
)
|
||||
new_device.status = 'ONLINE'
|
||||
session.commit()
|
||||
rebuild_device(device_id)
|
||||
|
|
|
@ -17,7 +17,7 @@ import threading
|
|||
from datetime import datetime
|
||||
from oslo.config import cfg
|
||||
|
||||
from libra.common.api.lbaas import Device, db_session
|
||||
from libra.common.api.lbaas import Counters, Device, db_session
|
||||
from libra.admin_api.stats.stats_gearman import GearJobs
|
||||
from libra.openstack.common import log as logging
|
||||
|
||||
|
@ -143,6 +143,9 @@ class OfflineStats(object):
|
|||
)
|
||||
)
|
||||
instance.send_delete(message, data.id)
|
||||
counter = session.query(Counters).\
|
||||
filter(Counters.name == 'devices_offline_failed').first()
|
||||
counter.value += 1
|
||||
session.commit()
|
||||
|
||||
def start_offline_sched(self):
|
||||
|
|
|
@ -146,6 +146,24 @@ class GearJobs(object):
|
|||
failed_list.append(ping.job.task)
|
||||
return failed_list
|
||||
|
||||
def get_discover(self, name):
|
||||
# Used in the v2 devices controller
|
||||
job_data = {"hpcs_action": "DISCOVER"}
|
||||
job = self.gm_client.submit_job(
|
||||
str(name), job_data, background=False, wait_until_complete=True,
|
||||
poll_timeout=10
|
||||
)
|
||||
if job.state == JOB_UNKNOWN:
|
||||
# Gearman server failed
|
||||
return None
|
||||
elif job.timed_out:
|
||||
# Time out is a fail
|
||||
return None
|
||||
elif job.result['hpcs_response'] == 'FAIL':
|
||||
# Fail response is a fail
|
||||
return None
|
||||
return job.result
|
||||
|
||||
def get_stats(self, node_list):
|
||||
# TODO: lots of duplicated code that needs cleanup
|
||||
list_of_jobs = []
|
||||
|
|
|
@ -20,7 +20,7 @@ from wsme.exc import ClientSideError
|
|||
from wsme import Unset
|
||||
from urllib import quote
|
||||
from libra.common.api.lbaas import LoadBalancer, db_session
|
||||
from libra.common.api.lbaas import Device, HealthMonitor
|
||||
from libra.common.api.lbaas import Device, HealthMonitor, Counters
|
||||
from libra.api.acl import get_limited_to_project
|
||||
from libra.api.model.validators import LBMonitorPut, LBMonitorResp
|
||||
from libra.common.api.gearman_client import submit_job
|
||||
|
@ -76,6 +76,10 @@ class HealthMonitorController(RestController):
|
|||
if monitor.path:
|
||||
monitor_data['path'] = monitor.path
|
||||
|
||||
counter = session.query(Counters).\
|
||||
filter(Counters.name == 'api_healthmonitor_get').first()
|
||||
counter.value += 1
|
||||
|
||||
session.commit()
|
||||
return monitor_data
|
||||
|
||||
|
@ -240,6 +244,9 @@ class HealthMonitorController(RestController):
|
|||
if ((data["path"] is not None) and (len(data["path"]) > 0)):
|
||||
return_data.path = data["path"]
|
||||
|
||||
counter = session.query(Counters).\
|
||||
filter(Counters.name == 'api_healthmonitor_modify').first()
|
||||
counter.value += 1
|
||||
session.commit()
|
||||
submit_job(
|
||||
'UPDATE', device.name, device.id, lb.id
|
||||
|
@ -289,6 +296,9 @@ class HealthMonitorController(RestController):
|
|||
).join(LoadBalancer.devices).\
|
||||
filter(LoadBalancer.id == self.lbid).\
|
||||
first()
|
||||
counter = session.query(Counters).\
|
||||
filter(Counters.name == 'api_healthmonitor.delete').first()
|
||||
counter.value += 1
|
||||
session.commit()
|
||||
submit_job(
|
||||
'UPDATE', device.name, device.id, self.lbid
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
from pecan import expose, request
|
||||
from pecan.rest import RestController
|
||||
from libra.api.acl import get_limited_to_project
|
||||
from libra.common.api.lbaas import Limits, TenantLimits, db_session
|
||||
from libra.common.api.lbaas import Limits, Counters, TenantLimits, db_session
|
||||
|
||||
|
||||
class LimitsController(RestController):
|
||||
|
@ -40,5 +40,8 @@ class LimitsController(RestController):
|
|||
resp['maxLoadBalancers'] = tenant_lblimit
|
||||
|
||||
resp = {"limits": {"absolute": {"values": resp}}}
|
||||
session.rollback()
|
||||
counter = session.query(Counters).\
|
||||
filter(Counters.name == 'api_limits_get').first()
|
||||
counter.value += 1
|
||||
session.commit()
|
||||
return resp
|
||||
|
|
|
@ -29,7 +29,7 @@ from logs import LogsController
|
|||
from libra.common.api.lbaas import LoadBalancer, Device, Node, db_session
|
||||
from libra.common.api.lbaas import TenantLimits
|
||||
from libra.common.api.lbaas import loadbalancers_devices, Limits, Vip, Ports
|
||||
from libra.common.api.lbaas import HealthMonitor
|
||||
from libra.common.api.lbaas import HealthMonitor, Counters
|
||||
from libra.common.exc import ExhaustedError
|
||||
from libra.api.model.validators import LBPut, LBPost, LBResp, LBVipResp
|
||||
from libra.api.model.validators import LBRespNode, LBOptions
|
||||
|
@ -206,7 +206,10 @@ class LoadBalancersController(RestController):
|
|||
del(load_balancers['timeout'])
|
||||
del(load_balancers['retries'])
|
||||
|
||||
session.rollback()
|
||||
counter = session.query(Counters).\
|
||||
filter(Counters.name == 'api_loadbalancers_get').first()
|
||||
counter.value += 1
|
||||
session.commit()
|
||||
response.status = 200
|
||||
return load_balancers
|
||||
|
||||
|
@ -607,6 +610,9 @@ class LoadBalancersController(RestController):
|
|||
return_data.options = LBOptions(timeout=timeout_ms,
|
||||
retries=retries)
|
||||
|
||||
counter = session.query(Counters).\
|
||||
filter(Counters.name == 'api_loadbalancers_create').first()
|
||||
counter.value += 1
|
||||
session.commit()
|
||||
# trigger gearman client to create new lb
|
||||
submit_job(
|
||||
|
@ -687,7 +693,9 @@ class LoadBalancersController(RestController):
|
|||
).join(LoadBalancer.devices).\
|
||||
filter(LoadBalancer.id == self.lbid).\
|
||||
first()
|
||||
|
||||
counter = session.query(Counters).\
|
||||
filter(Counters.name == 'api_loadbalancers_modify').first()
|
||||
counter.value += 1
|
||||
session.commit()
|
||||
submit_job(
|
||||
'UPDATE', device.name, device.id, lb.id
|
||||
|
@ -735,6 +743,10 @@ class LoadBalancersController(RestController):
|
|||
).join(LoadBalancer.devices).\
|
||||
filter(LoadBalancer.id == load_balancer_id).\
|
||||
first()
|
||||
counter = session.query(Counters).\
|
||||
filter(Counters.name == 'api_loadbalancers_delete').first()
|
||||
counter.value += 1
|
||||
|
||||
if device is None:
|
||||
# This can happen if a device was manually deleted from the DB
|
||||
lb.status = 'DELETED'
|
||||
|
|
|
@ -19,7 +19,7 @@ from pecan.rest import RestController
|
|||
import wsmeext.pecan as wsme_pecan
|
||||
from wsme.exc import ClientSideError
|
||||
from wsme import Unset
|
||||
from libra.common.api.lbaas import LoadBalancer, Device, db_session
|
||||
from libra.common.api.lbaas import LoadBalancer, Device, db_session, Counters
|
||||
from libra.api.acl import get_limited_to_project
|
||||
from libra.api.model.validators import LBLogsPost
|
||||
from libra.common.api.gearman_client import submit_job
|
||||
|
@ -59,7 +59,9 @@ class LogsController(RestController):
|
|||
).join(LoadBalancer.devices).\
|
||||
filter(LoadBalancer.id == self.lbid).\
|
||||
first()
|
||||
|
||||
counter = session.query(Counters).\
|
||||
filter(Counters.name == 'api_log_archive').first()
|
||||
counter.value += 1
|
||||
session.commit()
|
||||
data = {
|
||||
'deviceid': device.id
|
||||
|
|
|
@ -20,7 +20,7 @@ from wsme.exc import ClientSideError
|
|||
from wsme import Unset
|
||||
#default response objects
|
||||
from libra.common.api.lbaas import LoadBalancer, Node, db_session, Limits
|
||||
from libra.common.api.lbaas import Device
|
||||
from libra.common.api.lbaas import Device, Counters
|
||||
from libra.api.acl import get_limited_to_project
|
||||
from libra.api.model.validators import LBNodeResp, LBNodePost, NodeResp
|
||||
from libra.api.model.validators import LBNodePut
|
||||
|
@ -100,6 +100,9 @@ class NodesController(RestController):
|
|||
del node_response['enabled']
|
||||
if node_response['weight'] == 1:
|
||||
del node_response['weight']
|
||||
counter = session.query(Counters).\
|
||||
filter(Counters.name == 'api_node_get').first()
|
||||
counter.value += 1
|
||||
session.commit()
|
||||
response.status = 200
|
||||
return node_response
|
||||
|
@ -255,7 +258,9 @@ class NodesController(RestController):
|
|||
).join(LoadBalancer.devices).\
|
||||
filter(LoadBalancer.id == self.lbid).\
|
||||
first()
|
||||
|
||||
counter = session.query(Counters).\
|
||||
filter(Counters.name == 'api_node_create').first()
|
||||
counter.value += 1
|
||||
session.commit()
|
||||
submit_job(
|
||||
'UPDATE', device.name, device.id, self.lbid
|
||||
|
@ -335,7 +340,9 @@ class NodesController(RestController):
|
|||
).join(LoadBalancer.devices).\
|
||||
filter(LoadBalancer.id == self.lbid).\
|
||||
first()
|
||||
|
||||
counter = session.query(Counters).\
|
||||
filter(Counters.name == 'api_node_modify').first()
|
||||
counter.value += 1
|
||||
session.commit()
|
||||
submit_job(
|
||||
'UPDATE', device.name, device.id, lb.id
|
||||
|
@ -412,6 +419,9 @@ class NodesController(RestController):
|
|||
).join(LoadBalancer.devices).\
|
||||
filter(LoadBalancer.id == self.lbid).\
|
||||
first()
|
||||
counter = session.query(Counters).\
|
||||
filter(Counters.name == 'api_node_delete').first()
|
||||
counter.value += 1
|
||||
session.commit()
|
||||
submit_job(
|
||||
'UPDATE', device.name, device.id, self.lbid
|
||||
|
|
|
@ -26,7 +26,7 @@ class V1Controller(object):
|
|||
@expose('json')
|
||||
def index(self):
|
||||
response.status = 200
|
||||
return Responses.versions
|
||||
return Responses.v1_1
|
||||
|
||||
@expose('json')
|
||||
def algorithms(self):
|
||||
|
|
|
@ -17,6 +17,7 @@ import ipaddress
|
|||
from pecan import response, expose, request
|
||||
from pecan.rest import RestController
|
||||
from libra.common.api.lbaas import LoadBalancer, Vip, Device, db_session
|
||||
from libra.common.api.lbaas import Counters
|
||||
from libra.api.acl import get_limited_to_project
|
||||
|
||||
|
||||
|
@ -65,5 +66,8 @@ class VipsController(RestController):
|
|||
"ipVersion": "IPV4"
|
||||
}]
|
||||
}
|
||||
counter = session.query(Counters).\
|
||||
filter(Counters.name == 'api_vips_get').first()
|
||||
counter.value += 1
|
||||
session.rollback()
|
||||
return resp
|
||||
|
|
|
@ -17,7 +17,7 @@ eventlet.monkey_patch()
|
|||
import ipaddress
|
||||
from libra.common.json_gearman import JSONGearmanClient
|
||||
from libra.common.api.lbaas import LoadBalancer, db_session, Device, Node, Vip
|
||||
from libra.common.api.lbaas import HealthMonitor
|
||||
from libra.common.api.lbaas import HealthMonitor, Counters
|
||||
from libra.common.api.lbaas import loadbalancers_devices
|
||||
from libra.common.api.mnb import update_mnb
|
||||
from libra.openstack.common import log
|
||||
|
@ -142,7 +142,7 @@ class GearmanClientThread(object):
|
|||
device = session.query(Device).\
|
||||
filter(Device.name == data).first()
|
||||
if device is None:
|
||||
self.LOG.error(
|
||||
LOG.error(
|
||||
"VIP assign have been given non existent device {0}"
|
||||
.format(data)
|
||||
)
|
||||
|
@ -167,7 +167,7 @@ class GearmanClientThread(object):
|
|||
filter(Vip.id == self.lbid).first()
|
||||
if vip is None:
|
||||
errmsg = 'Cannot find existing floating IP'
|
||||
self.LOG.error(
|
||||
LOG.error(
|
||||
"Failed to assign IP to device {0}"
|
||||
.format(data)
|
||||
)
|
||||
|
@ -189,12 +189,12 @@ class GearmanClientThread(object):
|
|||
if status:
|
||||
return True
|
||||
elif self.lbid:
|
||||
self.LOG.error(
|
||||
LOG.error(
|
||||
"Failed to assign IP {0} to device {1}"
|
||||
.format(ip_str, data)
|
||||
)
|
||||
else:
|
||||
self.LOG.error(
|
||||
LOG.error(
|
||||
"Failed to assign IP {0} to device {1}"
|
||||
.format(ip_str, data)
|
||||
)
|
||||
|
@ -234,6 +234,9 @@ class GearmanClientThread(object):
|
|||
else:
|
||||
session.query(Vip).\
|
||||
filter(Vip.ip == ip_int).delete()
|
||||
counter = session.query(Counters).\
|
||||
filter(Counters.name == 'vips_deleted').first()
|
||||
counter.value += 1
|
||||
session.commit()
|
||||
|
||||
def send_delete(self, data):
|
||||
|
@ -314,6 +317,9 @@ class GearmanClientThread(object):
|
|||
filter(Node.lbid == lb.id).delete()
|
||||
session.query(HealthMonitor).\
|
||||
filter(HealthMonitor.lbid == lb.id).delete()
|
||||
counter = session.query(Counters).\
|
||||
filter(Counters.name == 'loadbalancers_deleted').first()
|
||||
counter.value += 1
|
||||
session.commit()
|
||||
|
||||
#Notify billing of the LB deletion
|
||||
|
@ -335,7 +341,10 @@ class GearmanClientThread(object):
|
|||
# and auto-failover
|
||||
return
|
||||
device.status = 'ERROR'
|
||||
counter = session.query(Counters).\
|
||||
filter(Counters.name == 'loadbalancers_error').first()
|
||||
for lb in lbs:
|
||||
counter.value += 1
|
||||
lb.status = 'ERROR'
|
||||
lb.errmsg = errmsg
|
||||
|
||||
|
@ -365,6 +374,9 @@ class GearmanClientThread(object):
|
|||
else:
|
||||
device.errmsg = 'Log archive failed: {0}'.format(response)
|
||||
lb.status = 'ACTIVE'
|
||||
counter = session.query(Counters).\
|
||||
filter(Counters.name == 'log_archives').first()
|
||||
counter.value += 1
|
||||
session.commit()
|
||||
|
||||
def send_update(self, data):
|
||||
|
@ -494,6 +506,9 @@ class GearmanClientThread(object):
|
|||
device.status = 'ONLINE'
|
||||
device_name = device.name
|
||||
device_status = device.status
|
||||
counter = session.query(Counters).\
|
||||
filter(Counters.name == 'loadbalancers_updated').first()
|
||||
counter.value += 1
|
||||
session.commit()
|
||||
if device_status == 'BUILD':
|
||||
submit_vip_job(
|
||||
|
|
|
@ -64,6 +64,13 @@ class Limits(DeclarativeBase):
|
|||
value = Column(u'value', BIGINT(), nullable=False)
|
||||
|
||||
|
||||
class AdminAuth(DeclarativeBase):
|
||||
__tablename__ = 'admin_auth'
|
||||
id = Column(u'id', Integer, primary_key=True, nullable=False)
|
||||
tenant_id = Column(u'tenant_id', VARCHAR(length=128), nullable=False)
|
||||
level = Column(u'level', VARCHAR(length=10), nullable=False)
|
||||
|
||||
|
||||
class PoolBuilding(DeclarativeBase):
|
||||
__tablename__ = 'pool_building'
|
||||
id = Column(u'id', Integer, primary_key=True, nullable=False)
|
||||
|
@ -192,6 +199,13 @@ class Ports(DeclarativeBase):
|
|||
enabled = Column(u'enabled', INTEGER(), nullable=False, default=0)
|
||||
|
||||
|
||||
class Counters(DeclarativeBase):
|
||||
__tablename__ = 'counters'
|
||||
id = Column(u'id', Integer, primary_key=True, nullable=False)
|
||||
name = Column(u'name', VARCHAR(length=50), nullable=False)
|
||||
value = Column(u'value', BIGINT(), primary_key=True, nullable=False)
|
||||
|
||||
|
||||
class RoutingSession(Session):
|
||||
""" Try to use the first engine provided. If this fails use the next in
|
||||
sequence and so on. Reset to the first after 60 seconds
|
||||
|
|
|
@ -120,6 +120,24 @@ CREATE TABLE billing (
|
|||
|
||||
INSERT INTO billing VALUES (1, 'stats', '0000-00-00 00:00:00'),(2, 'usage', '0000-00-00 00:00:00'),(3, 'exists', '0000-00-00 00:00:00');
|
||||
|
||||
# Admin API auth
|
||||
CREATE TABLE admin_auth (
|
||||
id int(11) NOT NULL AUTO_INCREMENT,
|
||||
tenant_id varchar(128) NOT NULL,
|
||||
level varchar(10) NOT NULL,
|
||||
PRIMARY KEY(id)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET latin1;
|
||||
|
||||
# Counters
|
||||
CREATE TABLE counters (
|
||||
id int(11) NOT NULL AUTO_INCREMENT,
|
||||
name varchar(50) NOT NULL,
|
||||
value BIGINT NOT NULL,
|
||||
PRIMARY KEY(id)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET latin1;
|
||||
|
||||
INSERT INTO counters VALUES (1, 'loadbalancers_rebuild', 0),(2, 'loadbalancers_error', 0),(3, 'devices_offline_failed', 0),(4, 'loadbalancers_expunged', 0),(5, 'devices_deleted', 0), (6, 'vips_built', 0), (7, 'devices_built', 0), (8, 'devices_bad_built', 0), (9, 'vips_deleted', 0), (10, 'loadbalancers_deleted', 0), (11, 'log_archives',0), (12, 'loadbalancers_updated', 0), (13, 'api_loadbalancers_create', 0), (14, 'api_loadbalancers_get', 0), (15, 'api_loadbalancers_modify', 0), (16, 'api_loadbalancers_delete', 0), (17, 'api_healthmonitor_get', 0), (18, 'api_healthmonitor_modify', 0), (19, 'api_healthmonitor_delete', 0), (20, 'api_limits_get', 0), (21, 'api_log_archive', 0), (22, 'api_node_get', 0), (23, 'api_node_create', 0), (24, 'api_node_modify', 0), (25, 'api_node_delete', 0), (26, 'api_vips_get', 0);
|
||||
|
||||
# Stats
|
||||
CREATE TABLE stats (
|
||||
id BIGINT NOT NULL AUTO_INCREMENT, # unique id for this billing record
|
||||
|
|
Loading…
Reference in New Issue