update apiv2 code

Change-Id: I30057083bd29f2324f1ad4c0be34b60b696db2c6
This commit is contained in:
xiaodongwang 2014-06-17 14:57:01 -07:00
parent 1ef039fca3
commit ffed509623
118 changed files with 9002 additions and 8160 deletions

View File

@ -28,18 +28,7 @@ from compass.actions import reinstall
from compass.actions import search
from compass.api import app
from compass.config_management.utils import config_manager
from compass.db import database
from compass.db.model import Adapter
from compass.db.model import Cluster
from compass.db.model import ClusterHost
from compass.db.model import ClusterState
from compass.db.model import HostState
from compass.db.model import LogProgressingHistory
from compass.db.model import Machine
from compass.db.model import Role
from compass.db.model import Switch
from compass.db.model import SwitchConfig
from compass.db.model import User
from compass.db.api import database
from compass.tasks.client import celery
from compass.utils import flags
from compass.utils import logsetting
@ -84,17 +73,6 @@ app_manager = Manager(app, usage="Perform database operations")
TABLE_MAPPING = {
'role': Role,
'adapter': Adapter,
'switch': Switch,
'switch_config': SwitchConfig,
'machine': Machine,
'hoststate': HostState,
'clusterstate': ClusterState,
'cluster': Cluster,
'clusterhost': ClusterHost,
'logprogressinghistory': LogProgressingHistory,
'user': User
}
@ -120,6 +98,11 @@ def checkdb():
@app_manager.command
def createdb():
"""Creates database from sqlalchemy models."""
try:
dropdb()
except Exception:
pass
if setting.DATABASE_TYPE == 'file':
if os.path.exists(setting.DATABASE_FILE):
os.remove(setting.DATABASE_FILE)

View File

@ -20,7 +20,7 @@ import logging
from compass.actions import util
from compass.config_management.utils.config_manager import ConfigManager
from compass.db import database
from compass.db.api import database
def clean_deployment(cluster_hosts):

View File

@ -1,49 +0,0 @@
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module to clean installing progress of a given cluster
.. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
"""
import logging
from compass.actions import util
from compass.config_management.utils.config_manager import ConfigManager
from compass.db import database
def clean_installing_progress(cluster_hosts):
"""Clean installing progress of clusters.
:param cluster_hosts: clusters and hosts in each cluster to clean.
:type cluster_hosts: dict of int or str to list of int or str
.. note::
The function should be called out of database session.
"""
with util.lock('serialized_action') as lock:
if not lock:
raise Exception(
'failed to acquire lock to clean installation progress')
logging.info(
'clean installing progress of cluster_hosts: %s',
cluster_hosts)
with database.session():
cluster_hosts, os_versions, target_systems = (
util.update_cluster_hosts(cluster_hosts))
manager = ConfigManager()
manager.clean_cluster_and_hosts_installing_progress(
cluster_hosts, os_versions, target_systems)
manager.sync()

View File

@ -20,7 +20,7 @@ import logging
from compass.actions import util
from compass.config_management.utils.config_manager import ConfigManager
from compass.db import database
from compass.db.api import database
def deploy(cluster_hosts):

View File

@ -14,16 +14,88 @@
"""Module to provider function to poll switch."""
import logging
import netaddr
from compass.db import database
from compass.db.model import Machine
from compass.db.model import Switch
from compass.db.model import SwitchConfig
from compass.actions import util
from compass.db.api import database
from compass.db.api import switch as switch_api
from compass.hdsdiscovery.hdmanager import HDManager
def poll_switch(ip_addr, req_obj='mac', oper="SCAN"):
"""Query switch and return expected result
def _poll_switch(ip_addr, credentials, req_obj='mac', oper="SCAN"):
under_monitoring = 'under_monitoring'
unreachable = 'unreachable'
polling_error = 'error'
hdmanager = HDManager()
vendor, state, err_msg = hdmanager.get_vendor(ip_addr, credentials)
if not vendor:
logging.info("*****error_msg: %s****", err_msg)
logging.error('no vendor found or match switch %s', ip_addr)
return (
{
'vendor': vendor, 'state': state, 'err_msg': err_msg
}, {
}
)
logging.debug(
'hdmanager learn switch from %s', ip_addr
)
results = []
try:
results = hdmanager.learn(
ip_addr, credentials, vendor, req_obj, oper
)
except Exception as error:
logging.exception(error)
state = unreachable
err_msg = (
'SNMP walk for querying MAC addresses timedout'
)
return (
{
'vendor': vendor, 'state': state, 'err_msg': err_msg
}, {
}
)
logging.info("pollswitch %s result: %s", ip_addr, results)
if not results:
logging.error(
'no result learned from %s', ip_addr
)
state = polling_error
err_msg = 'No result learned from SNMP walk'
return (
{'vendor': vendor, 'state': state, 'err_msg': err_msg},
{}
)
state = under_monitoring
machine_dicts = {}
for machine in results:
mac = machine['mac']
port = machine['port']
vlan = machine['vlan']
if vlan:
vlans = [vlan]
else:
vlans = []
if mac not in machine_dicts:
machine_dicts[mac] = {'port': port, 'vlans': vlans}
else:
machine_dicts[mac]['port'] = port
machine_dicts[mac]['vlans'].extend(vlans)
logging.debug('update switch %s state to under monitoring', ip_addr)
return (
{'vendor': vendor, 'state': state, 'err_msg': err_msg},
machine_dicts
)
def poll_switch(ip_addr, credentials, req_obj='mac', oper="SCAN"):
"""Query switch and update switch machines.
.. note::
When polling switch succeeds, for each mac it got from polling switch,
@ -31,6 +103,8 @@ def poll_switch(ip_addr, req_obj='mac', oper="SCAN"):
:param ip_addr: switch ip address.
:type ip_addr: str
:param credentials: switch crednetials.
:type credentials: dict
:param req_obj: the object requested to query from switch.
:type req_obj: str
:param oper: the operation to query the switch.
@ -38,89 +112,29 @@ def poll_switch(ip_addr, req_obj='mac', oper="SCAN"):
.. note::
The function should be called out of database session scope.
"""
under_monitoring = 'under_monitoring'
unreachable = 'unreachable'
with util.lock('poll switch %s' % ip_addr) as lock:
if not lock:
raise Exception(
'failed to acquire lock to poll switch %s' % ip_addr
)
if not ip_addr:
logging.error('No switch IP address is provided!')
return
logging.debug('poll switch: %s', ip_addr)
ip_int = long(netaddr.IPAddress(ip_addr))
switch_dict, machine_dicts = _poll_switch(
ip_addr, credentials, req_obj=req_obj, oper=oper
)
with database.session() as session:
switch = switch_api.get_switch_internal(
session, False, ip_int=ip_int
)
if not switch:
logging.error('no switch found for %s', ip_addr)
return
with database.session() as session:
#Retrieve vendor info from switch table
switch = session.query(Switch).filter_by(ip=ip_addr).first()
logging.info("pollswitch: %s", switch)
if not switch:
logging.error('no switch found for %s', ip_addr)
return
credential = switch.credential
logging.info("pollswitch: credential %r", credential)
vendor = switch.vendor
prev_state = switch.state
hdmanager = HDManager()
vendor, vstate, err_msg = hdmanager.get_vendor(ip_addr, credential)
if not vendor:
switch.state = vstate
switch.err_msg = err_msg
logging.info("*****error_msg: %s****", switch.err_msg)
logging.error('no vendor found or match switch %s', switch)
return
switch.vendor = vendor
# Start to poll switch's mac address.....
logging.debug('hdmanager learn switch from %s %s %s %s %s',
ip_addr, credential, vendor, req_obj, oper)
results = []
try:
results = hdmanager.learn(
ip_addr, credential, vendor, req_obj, oper)
except Exception as error:
logging.exception(error)
switch.state = unreachable
switch.err_msg = "SNMP walk for querying MAC addresses timedout"
return
logging.info("pollswitch %s result: %s", switch, results)
if not results:
logging.error('no result learned from %s %s %s %s %s',
ip_addr, credential, vendor, req_obj, oper)
return
switch_id = switch.id
filter_ports = session.query(
SwitchConfig.filter_port
).filter(
SwitchConfig.ip == Switch.ip
).filter(
Switch.id == switch_id
).all()
logging.info("***********filter posts are %s********", filter_ports)
if filter_ports:
#Get all ports from tuples into list
filter_ports = [i[0] for i in filter_ports]
for entry in results:
mac = entry['mac']
port = entry['port']
vlan = entry['vlan']
if port in filter_ports:
continue
machine = session.query(Machine).filter_by(
mac=mac, port=port, switch_id=switch_id).first()
if not machine:
machine = Machine(mac=mac, port=port, vlan=vlan)
session.add(machine)
machine.switch = switch
logging.debug('update switch %s state to under monitoring', switch)
if prev_state != under_monitoring:
#Update error message in db
switch.err_msg = ""
switch.state = under_monitoring
switch_api.update_switch_internal(
session, switch, **switch_dict
)
switch_api.add_switch_machines_internal(
session, switch, machine_dicts, False
)

View File

@ -20,7 +20,7 @@ import logging
from compass.actions import util
from compass.config_management.utils.config_manager import ConfigManager
from compass.db import database
from compass.db.api import database
def reinstall(cluster_hosts):

View File

@ -20,7 +20,7 @@ import logging
from compass.actions import util
from compass.config_management.utils.config_manager import ConfigManager
from compass.db import database
from compass.db.api import database
def search(cluster_hosts, cluster_propreties_match,

View File

@ -1,95 +0,0 @@
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module to update status and installing progress of the given cluster.
.. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
"""
import logging
from compass.actions import util
from compass.db import database
from compass.log_analyzor import progress_calculator
from compass.utils import setting_wrapper as setting
def _cluster_filter(cluster):
"""filter cluster."""
if not cluster.state:
logging.error('there is no state for cluster %s',
cluster.id)
return False
if cluster.state.state != 'INSTALLING':
logging.error('the cluster %s state %s is not installing',
cluster.id, cluster.state.state)
return False
return True
def _host_filter(host):
"""filter host."""
if not host.state:
logging.error('there is no state for host %s',
host.id)
return False
if host.state.state != 'INSTALLING':
logging.error('the host %s state %s is not installing',
host.id, host.state.state)
return False
return True
def update_progress(cluster_hosts):
"""Update status and installing progress of the given cluster.
:param cluster_hosts: clusters and hosts in each cluster to update.
:type cluster_hosts: dict of int or str to list of int or str
.. note::
The function should be called out of the database session scope.
In the function, it will update the database cluster_state and
host_state table for the deploying cluster and hosts.
The function will also query log_progressing_history table to get
the lastest installing progress and the position of log it has
processed in the last run. The function uses these information to
avoid recalculate the progress from the beginning of the log file.
After the progress got updated, these information will be stored back
to the log_progressing_history for next time run.
"""
with util.lock('log_progressing', blocking=False) as lock:
if not lock:
logging.error(
'failed to acquire lock to calculate installation progress')
return
logging.info('update installing progress of cluster_hosts: %s',
cluster_hosts)
os_versions = {}
target_systems = {}
with database.session():
cluster_hosts, os_versions, target_systems = (
util.update_cluster_hosts(
cluster_hosts, _cluster_filter, _host_filter))
progress_calculator.update_progress(
setting.OS_INSTALLER,
os_versions,
setting.PACKAGE_INSTALLER,
target_systems,
cluster_hosts)

View File

@ -21,9 +21,8 @@ import redis
from contextlib import contextmanager
from compass.db import database
from compass.db.model import Cluster
from compass.db.model import Switch
from compass.db.api import database
from compass.db import models
@contextmanager
@ -49,79 +48,3 @@ def lock(lock_name, blocking=True, timeout=10):
instance_lock.acquired_until = 0
instance_lock.release()
logging.debug('released lock %s', lock_name)
def update_switch_ips(switch_ips):
"""get updated switch ips."""
session = database.current_session()
switches = session.query(Switch).all()
if switch_ips:
return [
switch.ip for switch in switches
if switch.ip in switch_ips
]
else:
return [switch.ip for switch in switches]
def update_cluster_hosts(cluster_hosts,
cluster_filter=None, host_filter=None):
"""get updated clusters and hosts per cluster from cluster hosts."""
session = database.current_session()
os_versions = {}
target_systems = {}
updated_cluster_hosts = {}
clusters = session.query(Cluster).all()
for cluster in clusters:
if cluster_hosts and (
cluster.id not in cluster_hosts and
str(cluster.id) not in cluster_hosts and
cluster.name not in cluster_hosts
):
logging.debug('ignore cluster %s sinc it is not in %s',
cluster.id, cluster_hosts)
continue
adapter = cluster.adapter
if not cluster.adapter:
logging.error('there is no adapter for cluster %s',
cluster.id)
continue
if cluster_filter and not cluster_filter(cluster):
logging.debug('filter cluster %s', cluster.id)
continue
updated_cluster_hosts[cluster.id] = []
os_versions[cluster.id] = adapter.os
target_systems[cluster.id] = adapter.target_system
if cluster.id in cluster_hosts:
hosts = cluster_hosts[cluster.id]
elif str(cluster.id) in cluster_hosts:
hosts = cluster_hosts[str(cluster.id)]
elif cluster.name in cluster_hosts:
hosts = cluster_hosts[cluster.name]
else:
hosts = []
if not hosts:
hosts = [host.id for host in cluster.hosts]
for host in cluster.hosts:
if (
host.id not in hosts and
str(host.id) not in hosts and
host.hostname not in hosts
):
logging.debug('ignore host %s which is not in %s',
host.id, hosts)
continue
if host_filter and not host_filter(host):
logging.debug('filter host %s', host.id)
continue
updated_cluster_hosts[cluster.id].append(host.id)
return (updated_cluster_hosts, os_versions, target_systems)

View File

@ -13,23 +13,29 @@
# limitations under the License.
import datetime
from flask import Blueprint
from flask.ext.login import LoginManager
from flask import Flask
from compass.api.v1.api import v1_app
from compass.db.models import SECRET_KEY
# from compass.api.v1.api import v1_app
from compass.utils import setting_wrapper as setting
from compass.utils import util
app = Flask(__name__)
app.debug = True
app.register_blueprint(v1_app, url_prefix='/v1.0')
# blueprint = Blueprint('v2_app', __name__)
# app.register_blueprint(v1_app, url_prefix='/v1.0')
# app.register_blueprint(blueprint, url_prefix='/api')
app.secret_key = SECRET_KEY
app.config['AUTH_HEADER_NAME'] = 'X-Auth-Token'
app.config['REMEMBER_COOKIE_DURATION'] = datetime.timedelta(minutes=30)
app.config['SECRET_KEY'] = setting.USER_SECRET_KEY
app.config['AUTH_HEADER_NAME'] = setting.USER_AUTH_HEADER_NAME
app.config['REMEMBER_COOKIE_DURATION'] = (
datetime.timedelta(
seconds=util.parse_time_interval(setting.USER_TOKEN_DURATION)
)
)
login_manager = LoginManager()
login_manager.login_view = 'login'

File diff suppressed because it is too large Load Diff

View File

@ -1,46 +0,0 @@
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from itsdangerous import BadData
import logging
from compass.db.models import login_serializer
def get_user_id_from_token(token, max_age):
"""Return user's ID and hased password from token."""
user_id = None
try:
user_id = login_serializer.loads(token, max_age=max_age)
except BadData as err:
logging.error("[auth][get_user_info_from_token] Exception: %s", err)
return None
return user_id
def authenticate_user(email, pwd):
"""Authenticate a user by email and password."""
from compass.db.models import User
try:
user = User.query.filter_by(email=email).first()
if user and user.valid_password(pwd):
return user
except Exception as err:
logging.info('[auth][authenticate_user]Exception: %s', err)
return None

View File

@ -0,0 +1,49 @@
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from itsdangerous import BadData
import logging
import sys
from compass.api import app
from compass.api import exception_handler
from compass.api import login_manager
from compass.db.api import user as user_api
from compass.db.api.user import UserWrapper
def authenticate_user(email, password, **kwargs):
"""Authenticate a user by email and password."""
user = user_api.get_user_object(
email, **kwargs
)
user.authenticate(password)
return user
@login_manager.token_loader
def load_user_from_token(token):
return user_api.get_user_object_from_token(token)
@login_manager.header_loader
def load_user_from_header(header):
"""Return a user object from token."""
return user_api.get_user_object_from_token(header)
@login_manager.user_loader
def load_user(token):
return user_api.get_user_object_from_token(token)

View File

@ -13,79 +13,77 @@
# limitations under the License.
"""Exceptions for RESTful API."""
import logging
import simplejson as json
import traceback
from compass.api import app
from compass.api import utils
class ItemNotFound(Exception):
class HTTPException(Exception):
def __init__(self, message, status_code):
super(HTTPException, self).__init__(message)
self.traceback = traceback.format_exc()
self.status_code = status_code
class ItemNotFound(HTTPException):
"""Define the exception for referring non-existing object."""
def __init__(self, message):
super(ItemNotFound, self).__init__(message)
self.message = message
def __str__(self):
return repr(self.message)
super(ItemNotFound, self).__init__(message, 410)
class BadRequest(Exception):
class BadRequest(HTTPException):
"""Define the exception for invalid/missing parameters or a user makes
a request in invalid state and cannot be processed at this moment.
"""
def __init__(self, message):
super(BadRequest, self).__init__(message)
self.message = message
def __str__(self):
return repr(self.message)
super(BadRequest, self).__init__(message, 400)
class Unauthorized(Exception):
class Unauthorized(HTTPException):
"""Define the exception for invalid user login."""
def __init__(self, message):
super(Unauthorized, self).__init__(message)
self.message = message
def __str__(self):
return repr(self.message)
super(Unauthorized, self).__init__(message, 401)
class UserDisabled(Exception):
class UserDisabled(HTTPException):
"""Define the exception that a disabled user tries to do some operations.
"""
def __init__(self, message):
super(UserDisabled, self).__init__(message)
self.message = message
def __str__(self):
return repr(self.message)
super(UserDisabled, self).__init__(message, 403)
class Forbidden(Exception):
class Forbidden(HTTPException):
"""Define the exception that a user tries to do some operations without
valid permissions.
"""
def __init__(self, message):
super(Forbidden, self).__init__(message)
self.message = message
def __str__(self):
return repr(self.message)
super(Forbidden, self).__init__(message, 403)
class BadMethod(Exception):
class BadMethod(HTTPException):
"""Define the exception for invoking unsupprted or unimplemented methods.
"""
def __init__(self, message):
super(BadMethod, self).__init__(message)
self.message = message
def __str__(self):
return repr(self.message)
super(BadMethod, self).__init__(message, 405)
class ConflictObject(Exception):
class ConflictObject(HTTPException):
"""Define the exception for creating an existing object."""
def __init__(self, message):
super(ConflictObject, self).__init__(message)
self.message = message
super(ConflictObject, self).__init__(message, 409)
def __str__(self):
return repr(self.message)
@app.errorhandler(Exception)
def handle_exception(error):
response = {'message': str(error)}
if hasattr(error, 'traceback'):
response['traceback'] = error.traceback
status_code = 400
if hasattr(error, 'status_code'):
status_code = error.status_code
return utils.make_json_response(status_code, response)

View File

@ -1,28 +0,0 @@
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom flask restful."""
from flask.ext.restful import Api
class CompassApi(Api):
"""Override the Flask_Restful error routing for 500."""
def error_router(self, original_handler, e):
code = getattr(e, 'code', 500)
# for HTTP 500 errors return custom response
if code >= 500:
return original_handler(e)
return super(CompassApi, self).error_router(original_handler, e)

View File

@ -1,13 +0,0 @@
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@ -1,43 +0,0 @@
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""modules to read/write cluster/host config from installers.
.. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
"""
__all__ = [
'chefhandler', 'cobbler',
'get_os_installer_by_name',
'get_os_installer',
'register_os_installer',
'get_package_installer_by_name',
'get_package_installer',
'register_package_installer',
]
from compass.config_management.installers.os_installer import (
get_installer as get_os_installer)
from compass.config_management.installers.os_installer import (
get_installer_by_name as get_os_installer_by_name)
from compass.config_management.installers.os_installer import (
register as register_os_installer)
from compass.config_management.installers.package_installer import (
get_installer as get_package_installer)
from compass.config_management.installers.package_installer import (
get_installer_by_name as get_package_installer_by_name)
from compass.config_management.installers.package_installer import (
register as register_package_installer)
from compass.config_management.installers.plugins import chefhandler
from compass.config_management.installers.plugins import cobbler

View File

@ -1,146 +0,0 @@
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module to provider installer interface.
.. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
"""
class Installer(object):
"""Interface for installer."""
NAME = 'installer'
def __repr__(self):
return '%s[%s]' % (self.__class__.__name__, self.NAME)
def sync(self, **kwargs):
"""virtual method to sync installer."""
pass
def get_global_config(self, **kwargs):
"""virtual method to get global config."""
return {}
def get_cluster_config(self, clusterid, **kwargs):
"""virtual method to get cluster config.
:param clusterid: the id of the cluster to get configuration.
:type clusterid: int
:returns: cluster configuration as dict.
"""
return {}
def get_host_config(self, hostid, **kwargs):
"""virtual method to get host config.
:param hostid: the id of host to get configuration.
:type hostid: int
:returns: host configuration as dict.
"""
return {}
def update_global_config(self, config, **kwargs):
"""virtual method to update global config.
:param config: global configuration.
:type config: dict
"""
pass
def update_cluster_config(self, clusterid, config, **kwargs):
"""virtual method to update cluster config.
:param clusterid: the id of the cluster to update the configuration.
:type clusterid: int
:param config: cluster configuration to update.
:type config: dict
"""
pass
def update_host_config(self, hostid, config, **kwargs):
"""virtual method to update host config.
:param hostid: the id of host to update host configuration.
:type hostid: int
:param config: host configuration to update.
:type config: dict
"""
pass
def clean_host_installing_progress(
self, hostid, config, **kwargs
):
"""virtual method to clean host installing progress.
:param hostid: the id of host to clean the log.
:type hostid: int
:param config: host configuration.
:type config: dict
"""
pass
def clean_cluster_installing_progress(
self, clusterid, config, **kwargs
):
"""virtual method to clean host installing progress.
:param clusterid: the id of cluster to clean the log.
:type clusterid: int
:param config: cluster configuration.
:type config: dict
"""
pass
def reinstall_host(self, hostid, config, **kwargs):
"""virtual method to reinstall specific host.
:param hostid: the id of the host to reinstall.
:type hostid: int
:param config: host configuration to reinstall
:type config: dict
"""
pass
def reinstall_cluster(self, clusterid, config, **kwargs):
"""virtual method to reinstall specific cluster.
:param clusterid: the id of the cluster to reinstall.
:type clusterid: int
:param config: cluster configuration to reinstall
:type config: dict
"""
pass
def clean_host_config(self, hostid, config, **kwargs):
"""virtual method to clean host config.
:param hostid: the id of the host to cleanup.
:type hostid: int
:param config: host configuration to cleanup.
:type config: dict
"""
pass
def clean_cluster_config(self, clusterid, config, **kwargs):
"""virtual method to clean cluster config.
:param clusterid: the id of the cluster to cleanup.
:type clusterid: int
:param config: cluster configuration to cleanup.
:type config: dict
"""
pass

View File

@ -1,78 +0,0 @@
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for interface of os installer.
.. moduleauthor::: Xiaodong Wang <xiaodongwang@huawei.com>
"""
import logging
from compass.config_management.installers import installer
from compass.utils import setting_wrapper as setting
class Installer(installer.Installer):
"""Interface for os installer."""
NAME = 'os_installer'
def get_oses(self):
"""virtual method to get supported oses.
:returns: list of str, each is the supported os version.
"""
return []
INSTALLERS = {}
def get_installer_by_name(name, **kwargs):
"""Get os installer by name.
:param name: os installer name.
:type name: str
:returns: :instance of subclass of :class:`Installer`
:raises: KeyError
"""
if name not in INSTALLERS:
logging.error('os installer name %s is not in os installers %s',
name, INSTALLERS)
raise KeyError('os installer name %s is not in os INSTALLERS')
os_installer = INSTALLERS[name](**kwargs)
logging.debug('got os installer %s', os_installer)
return os_installer
def register(os_installer):
"""Register os installer.
:param os_installer: subclass of :class:`Installer`
:raises: KeyError
"""
if os_installer.NAME in INSTALLERS:
logging.error(
'os installer %s is already registered in INSTALLERS %s',
os_installer, INSTALLERS)
raise KeyError(
'os installer %s is already registered' % os_installer)
logging.info('register os installer %s', os_installer)
INSTALLERS[os_installer.NAME] = os_installer
def get_installer(**kwargs):
"""Get default os installer from compass setting."""
return get_installer_by_name(setting.OS_INSTALLER, **kwargs)

View File

@ -1,101 +0,0 @@
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module to provider interface for package installer.
.. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
"""
import logging
from compass.config_management.installers import installer
from compass.utils import setting_wrapper as setting
class Installer(installer.Installer):
"""Interface for package installer."""
NAME = 'package_installer'
def get_target_systems(self, oses):
"""virtual method to get available target_systems for each os.
:param oses: supported os versions.
:type oses: list of st
:returns: dict of os_version to target systems as list of str.
"""
return {}
def get_roles(self, target_system):
"""virtual method to get all roles of given target system.
:param target_system: target distributed system such as openstack.
:type target_system: str
:returns: dict of role to role description as str.
"""
return {}
def os_installer_config(self, config, **kwargs):
"""virtual method to get os installer related config.
:param config: os installer host configuration
:type config: dict
:returns: package related configuration for os installer.
"""
return {}
INSTALLERS = {}
def get_installer_by_name(name, **kwargs):
"""Get package installer by name.
:param name: package installer name.
:type name: str
:returns: instance of subclass of :class:`Installer`
:raises: KeyError
"""
if name not in INSTALLERS:
logging.error('installer name %s is not in package installers %s',
name, INSTALLERS)
raise KeyError('installer name %s is not in package INSTALLERS' % name)
package_installer = INSTALLERS[name](**kwargs)
logging.debug('got package installer %s', package_installer)
return package_installer
def register(package_installer):
"""Register package installer.
:param package_installer: subclass of :class:`Installer`
:raises: KeyError
"""
if package_installer.NAME in INSTALLERS:
logging.error(
'package installer %s is already in INSTALLERS %s',
installer, INSTALLERS)
raise KeyError(
'package installer %s already registered' % package_installer)
logging.info('register package installer: %s', package_installer)
INSTALLERS[package_installer.NAME] = package_installer
def get_installer(**kwargs):
"""get default package installer from comapss setting."""
return get_installer_by_name(setting.PACKAGE_INSTALLER, **kwargs)

View File

@ -1,13 +0,0 @@
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@ -1,467 +0,0 @@
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""package instaler chef plugin.
.. moduleauthor:: Xiaodong Wang <xiaodongwang@gmail.com>
"""
import fnmatch
import functools
import logging
from compass.config_management.installers import package_installer
from compass.config_management.utils.config_translator import ConfigTranslator
from compass.config_management.utils.config_translator import KeyTranslator
from compass.config_management.utils import config_translator_callbacks
from compass.utils import setting_wrapper as setting
from compass.utils import util
FROM_GLOBAL_TRANSLATORS = {
'openstack': ConfigTranslator(
mapping={
'/read_config_mapping': [KeyTranslator(
translated_keys=(
config_translator_callbacks.get_keys_from_config_mapping),
translated_value=(
config_translator_callbacks.get_value_from_config_mapping)
)],
}
),
}
TO_GLOBAL_TRANSLATORS = {
'openstack': ConfigTranslator(
mapping={
'/test_roles/*': [KeyTranslator(
translated_keys=[
functools.partial(
config_translator_callbacks.get_key_from_pattern,
from_pattern=r'^/test_roles/(?P<role>.*)$',
to_pattern=(
'/role_assign_policy/default'
'/dependencies/%(role)s'
)
)
],
from_values={'testmode': '/testmode'},
translated_value=functools.partial(
config_translator_callbacks.add_value,
check_value_callback=(
lambda value, value_list: (
set(value) & set(value_list))
),
add_value_callback=(
lambda value, value_list: value_list.extend(value)
)
),
override=True
)],
}
),
}
TO_CLUSTER_TRANSLATORS = {
'openstack': ConfigTranslator(
mapping={
'/config_mapping': [KeyTranslator(
translated_keys=(
config_translator_callbacks.get_keys_from_config_mapping),
translated_value=(
config_translator_callbacks.get_value_from_config_mapping)
)],
'/testmode': [KeyTranslator(
translated_keys=['/debugging/debug', '/debugging/verbose'],
translated_value=functools.partial(
config_translator_callbacks.set_value,
return_value_callback=lambda value: str(value)
),
override=True
)],
}
),
}
FROM_CLUSTER_TRANSLATORS = {
'openstack': ConfigTranslator(
mapping={
'/role_assign_policy': [KeyTranslator(
translated_keys=['/role_assign_policy']
)],
'/config_mapping': [KeyTranslator(
translated_keys=['/config_mapping']
)],
'/role_mapping': [KeyTranslator(
translated_keys=['/role_mapping']
)],
}
),
}
TO_HOST_TRANSLATORS = {
'openstack': ConfigTranslator(
mapping={
'/roles': [KeyTranslator(
translated_keys=(
config_translator_callbacks.get_keys_from_role_mapping),
from_keys={'mapping': '/role_mapping'},
translated_value=(
config_translator_callbacks.get_value_from_role_mapping),
from_values={'mapping': '/role_mapping'}
), KeyTranslator(
translated_keys=[functools.partial(
config_translator_callbacks.get_key_from_pattern,
to_pattern='/node_mapping/%(node_name)s/roles'
)],
from_keys={'node_name': '/node_name'}
)],
'/networking/interfaces/management/ip': [KeyTranslator(
translated_keys=[functools.partial(
config_translator_callbacks.get_key_from_pattern,
to_pattern='/node_mapping/%(node_name)s/management_ip'
)],
from_keys={'node_name': '/node_name'}
)],
'/haproxy_roles': [KeyTranslator(
translated_keys=['/ha/status'],
translated_value='enable',
override=config_translator_callbacks.override_if_any,
override_conditions={'haproxy_roles': '/haproxy_roles'}
)],
'/haproxy/router_id': [KeyTranslator(
translated_keys=[functools.partial(
config_translator_callbacks.get_key_from_pattern,
to_pattern='/ha/keepalived/router_ids/%(node_name)s'
)],
from_keys={'node_name': '/node_name'}
)],
'/haproxy/priority': [KeyTranslator(
translated_keys=[functools.partial(
config_translator_callbacks.get_key_from_pattern,
to_pattern=(
'/ha/keepalived/instance_name/'
'priorities/%(node_name)s'
)
)],
from_keys={'node_name': '/node_name'}
)],
'/haproxy/state': [KeyTranslator(
translated_keys=[functools.partial(
config_translator_callbacks.get_key_from_pattern,
to_pattern=(
'/ha/keepalived/instance_name/'
'states/%(node_name)s'
)
)],
from_keys={'node_name': '/node_name'}
)],
}
),
}
class Installer(package_installer.Installer):
"""chef package installer."""
NAME = 'chef'
def __init__(self, **kwargs):
import chef
super(Installer, self).__init__(**kwargs)
self.installer_url_ = setting.CHEF_INSTALLER_URL
self.global_databag_name_ = setting.CHEF_GLOBAL_DATABAG_NAME
self.api_ = chef.autoconfigure()
self.tmp_databags_ = {}
self.tmp_databag_items_ = {}
logging.debug('%s instance created', self)
def __repr__(self):
return '%s[name=%s,installer_url=%s,global_databag_name=%s]' % (
self.__class__.__name__, self.NAME, self.installer_url_,
self.global_databag_name_)
@classmethod
def _cluster_databag_name(cls, clusterid):
"""get cluster databag name."""
return '%s' % clusterid
@classmethod
def _get_client_name(cls, fullname):
"""get client name."""
return cls._get_node_name(fullname)
def _clean_host_attributes(self, config, target_system):
"""clean node attributes about target system."""
import chef
node_name = self._get_node_name(config['fullname'])
client_name = self._get_client_name(config['fullname'])
node = chef.Node(node_name, api=self.api_)
roles_per_target_system = node.get('roles_per_target_system', {})
if target_system in roles_per_target_system:
del roles_per_target_system[target_system]
node['roles_per_target_system'] = roles_per_target_system
if not roles_per_target_system:
try:
node.delete()
client = chef.Client(client_name, api=self.api_)
client.delete()
logging.debug(
'delete %s for host %s ', target_system, node_name)
except Exception as error:
logging.debug(
'failed to delete %s for host %s: %s',
target_system, node_name, error)
else:
node.run_list = []
for _, roles in node['roles'].items():
for role in roles:
node.run_list.append('role[%s]' % role)
node.save()
logging.debug('node %s is updated for %s',
node_name, target_system)
def _update_host_attributes(self, config, target_system):
"""chef manage node attributes about target system."""
import chef
node_name = self._get_node_name(config['fullname'])
node = chef.Node(node_name, api=self.api_)
node['cluster'] = self._cluster_databag_name(config['clusterid'])
roles_per_target_system = node.get('roles_per_target_system', {})
roles_per_target_system[target_system] = config['roles']
node['roles_per_target_system'] = roles_per_target_system
node.run_list = []
for _, roles in roles_per_target_system.items():
for role in roles:
node.run_list.append('role[%s]' % role)
node.save()
logging.debug('update %s for host %s',
target_system, node_name)
@classmethod
def _get_node_name(cls, fullname):
"""get node name."""
return fullname
def os_installer_config(self, config, target_system, **kwargs):
"""get os installer config."""
return {
'%s_url' % self.NAME: self.installer_url_,
'chef_client_name': self._get_client_name(config['fullname']),
'chef_node_name': self._get_node_name(config['fullname'])
}
def get_target_systems(self, oses):
"""get target systems."""
import chef
databags = chef.DataBag.list(api=self.api_)
target_systems = {}
for os_version in oses:
target_systems[os_version] = []
for databag in databags:
target_system = databag
global_databag_item = self._get_global_databag_item(target_system)
support_oses = global_databag_item['support_oses']
for os_version in oses:
for support_os in support_oses:
if fnmatch.fnmatch(os_version, support_os):
target_systems[os_version].append(target_system)
break
return target_systems
def get_roles(self, target_system):
"""get supported roles."""
global_databag_item = self._get_global_databag_item(target_system)
return global_databag_item['all_roles']
def _get_databag(self, target_system):
"""get databag."""
import chef
if target_system not in self.tmp_databags_:
self.tmp_databags_[target_system] = chef.DataBag(
target_system, api=self.api_)
return self.tmp_databags_[target_system]
def _get_databag_item(self, target_system, bag_item_name):
"""get databag item."""
import chef
databag_items = self.tmp_databag_items_.setdefault(
target_system, {})
if bag_item_name not in databag_items:
databag = self._get_databag(target_system)
databag_items[bag_item_name] = chef.DataBagItem(
databag, bag_item_name, api=self.api_)
return dict(databag_items[bag_item_name])
def _update_databag_item(
self, target_system, bag_item_name, config, save=True
):
"""update databag item."""
import chef
databag_items = self.tmp_databag_items_.setdefault(
target_system, {})
if bag_item_name not in databag_items:
databag = self._get_databag(target_system)
databag_items[bag_item_name] = chef.DataBagItem(
databag, bag_item_name, api=self.api_)
bag_item = databag_items[bag_item_name]
for key, value in config.items():
bag_item[key] = value
if save:
bag_item.save()
logging.debug('save databag item %s to target system %s',
bag_item_name, target_system)
else:
logging.debug(
'ignore saving databag item %s to target system %s',
bag_item_name, target_system)
def _clean_databag_item(self, target_system, bag_item_name):
"""clean databag item."""
import chef
databag_items = self.tmp_databag_items_.setdefault(
target_system, {})
if bag_item_name not in databag_items:
databag = self._get_databag(target_system)
databag_items[bag_item_name] = chef.DataBagItem(
databag, bag_item_name, api=self.api_)
bag_item = databag_items[bag_item_name]
try:
bag_item.delete()
logging.debug(
'databag item %s is removed from target_system %s',
bag_item_name, target_system)
except Exception as error:
logging.debug(
'no databag item %s to delete from target_system %s: %s',
bag_item_name, target_system, error)
del databag_items[bag_item_name]
def _get_global_databag_item(self, target_system):
"""get global databag item."""
return self._get_databag_item(
target_system, self.global_databag_name_)
def _clean_global_databag_item(self, target_system):
"""clean global databag item."""
self._clean_databag_item(
target_system, self.global_databag_name_)
def _update_global_databag_item(self, target_system, config):
"""update global databag item."""
self._update_databag_item(
target_system, self.global_databag_name_, config, save=False)
def _get_cluster_databag_item(self, target_system, clusterid):
"""get cluster databag item."""
return self._get_databag_item(
target_system, self._cluster_databag_name(clusterid))
def _clean_cluster_databag_item(self, target_system, clusterid):
"""clean cluster databag item."""
self._clean_databag_item(
target_system, self._cluster_databag_name(clusterid))
def _update_cluster_databag_item(self, target_system, clusterid, config):
"""update cluster databag item."""
self._update_databag_item(
target_system, self._cluster_databag_name(clusterid),
config, save=True)
def get_global_config(self, target_system, **kwargs):
"""get global config."""
bag_item = self._get_global_databag_item(target_system)
return FROM_GLOBAL_TRANSLATORS[target_system].translate(bag_item)
def get_cluster_config(self, clusterid, target_system, **kwargs):
"""get cluster config."""
global_bag_item = self._get_global_databag_item(
target_system)
cluster_bag_item = self._get_cluster_databag_item(
target_system, clusterid)
util.merge_dict(cluster_bag_item, global_bag_item, False)
return FROM_CLUSTER_TRANSLATORS[target_system].translate(
cluster_bag_item)
def clean_cluster_config(self, clusterid, config,
target_system, **kwargs):
"""clean cluster config."""
self._clean_cluster_databag_item(target_system, clusterid)
def update_global_config(self, config, target_system, **kwargs):
"""update global config."""
global_bag_item = self._get_global_databag_item(target_system)
translated_config = TO_GLOBAL_TRANSLATORS[target_system].translate(
config)
util.merge_dict(global_bag_item, translated_config, True)
self._update_global_databag_item(target_system, global_bag_item)
def update_cluster_config(self, clusterid, config,
target_system, **kwargs):
"""update cluster config."""
self.clean_cluster_config(clusterid, config,
target_system, **kwargs)
global_bag_item = self._get_global_databag_item(target_system)
cluster_bag_item = self._get_cluster_databag_item(
target_system, clusterid)
util.merge_dict(cluster_bag_item, global_bag_item, False)
translated_config = TO_CLUSTER_TRANSLATORS[target_system].translate(
config)
util.merge_dict(cluster_bag_item, translated_config, True)
self._update_cluster_databag_item(
target_system, clusterid, cluster_bag_item)
def clean_host_config(self, hostid, config, target_system, **kwargs):
"""clean host config."""
self._clean_host_attributes(config, target_system)
def reinstall_host(self, hostid, config, target_system, **kwargs):
"""reinstall host."""
self._clean_host_attributes(config, target_system)
self._update_host_attributes(config, target_system)
def update_host_config(self, hostid, config, target_system, **kwargs):
"""update host config."""
clusterid = config['clusterid']
global_bag_item = self._get_global_databag_item(target_system)
cluster_bag_item = self._get_cluster_databag_item(
target_system, clusterid)
util.merge_dict(cluster_bag_item, global_bag_item, False)
util.merge_dict(config, {
'client_name': self._get_client_name(config['fullname']),
'node_name': self._get_node_name(config['fullname'])
})
translated_config = TO_HOST_TRANSLATORS[target_system].translate(
config)
util.merge_dict(cluster_bag_item, translated_config, True)
self._update_cluster_databag_item(
target_system, clusterid, cluster_bag_item)
self._update_host_attributes(config, target_system)
package_installer.register(Installer)

View File

@ -1,290 +0,0 @@
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""os installer cobbler plugin.
.. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
"""
import functools
import logging
import os.path
import shutil
import xmlrpclib
from compass.config_management.installers import os_installer
from compass.config_management.utils.config_translator import ConfigTranslator
from compass.config_management.utils.config_translator import KeyTranslator
from compass.config_management.utils import config_translator_callbacks
from compass.utils import setting_wrapper as setting
from compass.utils import util
TO_HOST_TRANSLATOR = ConfigTranslator(
mapping={
'/networking/global/gateway': [KeyTranslator(
translated_keys=['/gateway']
)],
'/networking/global/nameservers': [KeyTranslator(
translated_keys=['/name_servers']
)],
'/networking/global/search_path': [KeyTranslator(
translated_keys=['/name_servers_search']
)],
'/networking/global/proxy': [KeyTranslator(
translated_keys=['/ksmeta/proxy']
)],
'/networking/global/ignore_proxy': [KeyTranslator(
translated_keys=['/ksmeta/ignore_proxy']
)],
'/networking/global/ntp_server': [KeyTranslator(
translated_keys=['/ksmeta/ntp_server']
)],
'/security/server_credentials/username': [KeyTranslator(
translated_keys=['/ksmeta/username']
)],
'/security/server_credentials/password': [KeyTranslator(
translated_keys=['/ksmeta/password'],
translated_value=config_translator_callbacks.get_encrypted_value
)],
'/partition': [KeyTranslator(
translated_keys=['/ksmeta/partition']
)],
'/networking/interfaces/*/mac': [KeyTranslator(
translated_keys=[functools.partial(
config_translator_callbacks.get_key_from_pattern,
to_pattern='/modify_interface/macaddress-%(nic)s')],
from_keys={'nic': '../nic'},
override=functools.partial(
config_translator_callbacks.override_path_has,
should_exist='management')
)],
'/networking/interfaces/*/ip': [KeyTranslator(
translated_keys=[functools.partial(
config_translator_callbacks.get_key_from_pattern,
to_pattern='/modify_interface/ipaddress-%(nic)s')],
from_keys={'nic': '../nic'},
override=functools.partial(
config_translator_callbacks.override_path_has,
should_exist='management')
)],
'/networking/interfaces/*/netmask': [KeyTranslator(
translated_keys=[functools.partial(
config_translator_callbacks.get_key_from_pattern,
to_pattern='/modify_interface/netmask-%(nic)s')],
from_keys={'nic': '../nic'},
override=functools.partial(
config_translator_callbacks.override_path_has,
should_exist='management')
)],
'/networking/interfaces/*/dns_alias': [KeyTranslator(
translated_keys=[functools.partial(
config_translator_callbacks.get_key_from_pattern,
to_pattern='/modify_interface/dnsname-%(nic)s')],
from_keys={'nic': '../nic'},
override=functools.partial(
config_translator_callbacks.override_path_has,
should_exist='management')
)],
'/networking/interfaces/*/nic': [KeyTranslator(
translated_keys=[functools.partial(
config_translator_callbacks.get_key_from_pattern,
to_pattern='/modify_interface/static-%(nic)s')],
from_keys={'nic': '../nic'},
translated_value=True,
override=functools.partial(
config_translator_callbacks.override_path_has,
should_exist='management'),
), KeyTranslator(
translated_keys=[functools.partial(
config_translator_callbacks.get_key_from_pattern,
to_pattern='/modify_interface/management-%(nic)s')],
from_keys={'nic': '../nic'},
translated_value=functools.partial(
config_translator_callbacks.override_path_has,
should_exist='management'),
override=functools.partial(
config_translator_callbacks.override_path_has,
should_exist='management')
), KeyTranslator(
translated_keys=['/ksmeta/promisc_nics'],
from_values={'promisc': '../promisc'},
translated_value=functools.partial(
config_translator_callbacks.add_value,
get_value_callback=lambda config: [
value for value in config.split(',') if value
],
return_value_callback=lambda values: ','.join(values)
),
override=True
)],
}
)
class Installer(os_installer.Installer):
"""cobbler installer"""
NAME = 'cobbler'
def __init__(self, **kwargs):
super(Installer, self).__init__()
# the connection is created when cobbler installer is initialized.
self.remote_ = xmlrpclib.Server(
setting.COBBLER_INSTALLER_URL,
allow_none=True)
self.token_ = self.remote_.login(
*setting.COBBLER_INSTALLER_TOKEN)
# cobbler tries to get package related config from package installer.
self.package_installer_ = kwargs['package_installer']
logging.debug('%s instance created', self)
def __repr__(self):
return '%s[name=%s,remote=%s,token=%s' % (
self.__class__.__name__, self.NAME,
self.remote_, self.token_)
def get_oses(self):
"""get supported os versions.
:returns: list of os version.
.. note::
In cobbler, we treat profile name as the indicator
of os version. It is just a simple indicator
and not accurate.
"""
profiles = self.remote_.get_profiles()
oses = []
for profile in profiles:
oses.append(profile['name'])
return oses
def sync(self):
"""Sync cobbler to catch up the latest update config."""
logging.debug('sync %s', self)
self.remote_.sync(self.token_)
os.system('service rsyslog restart')
def _get_modify_system(self, profile, config, **kwargs):
"""get modified system config."""
system_config = {
'name': config['fullname'],
'hostname': config['hostname'],
'profile': profile,
}
translated_config = TO_HOST_TRANSLATOR.translate(config)
util.merge_dict(system_config, translated_config)
ksmeta = system_config.setdefault('ksmeta', {})
package_config = {'tool': self.package_installer_.NAME}
util.merge_dict(
package_config,
self.package_installer_.os_installer_config(
config, **kwargs))
util.merge_dict(ksmeta, package_config)
return system_config
def _get_profile(self, os_version, **_kwargs):
"""get profile name."""
profile_found = self.remote_.find_profile(
{'name': os_version})
return profile_found[0]
def _get_system(self, config, create_if_not_exists=True):
"""get system reference id."""
sys_name = config['fullname']
try:
sys_id = self.remote_.get_system_handle(
sys_name, self.token_)
logging.debug('using existing system %s for %s',
sys_id, sys_name)
except Exception:
if create_if_not_exists:
sys_id = self.remote_.new_system(self.token_)
logging.debug('create new system %s for %s',
sys_id, sys_name)
else:
sys_id = None
return sys_id
def _clean_system(self, config):
"""clean system."""
sys_name = config['fullname']
try:
self.remote_.remove_system(sys_name, self.token_)
logging.debug('system %s is removed', sys_name)
except Exception:
logging.debug('no system %s found to remove', sys_name)
def _save_system(self, sys_id):
"""save system config update."""
self.remote_.save_system(sys_id, self.token_)
def _update_modify_system(self, sys_id, system_config):
"""update modify system."""
for key, value in system_config.items():
self.remote_.modify_system(
sys_id, key, value, self.token_)
def _netboot_enabled(self, sys_id):
"""enable netboot."""
self.remote_.modify_system(
sys_id, 'netboot_enabled', True, self.token_)
def clean_host_config(self, hostid, config, **kwargs):
"""clean host config."""
self.clean_host_installing_progress(
hostid, config, **kwargs)
self._clean_system(config)
@classmethod
def _clean_log(cls, system_name):
"""clean log."""
log_dir = os.path.join(
setting.INSTALLATION_LOGDIR,
system_name)
shutil.rmtree(log_dir, True)
def clean_host_installing_progress(
self, hostid, config, **kwargs
):
"""clean host installing progress."""
self._clean_log(config['fullname'])
def reinstall_host(self, hostid, config, **kwargs):
"""reinstall host."""
sys_id = self._get_system(config, False)
if sys_id:
self.clean_host_installing_progress(
hostid, config, **kwargs)
self._netboot_enabled(sys_id)
self._save_system(sys_id)
def update_host_config(self, hostid, config, **kwargs):
"""update host config."""
profile = self._get_profile(**kwargs)
sys_id = self._get_system(config)
system_config = self._get_modify_system(
profile, config, **kwargs)
logging.debug('%s system config to update: %s',
hostid, system_config)
self._update_modify_system(sys_id, system_config)
self._save_system(sys_id)
os_installer.register(Installer)

View File

@ -1,33 +0,0 @@
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""modules to provider providers to read/write cluster/host config
.. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
"""
__all__ = [
'db_config_provider', 'file_config_provider', 'mix_config_provider',
'get_provider', 'get_provider_by_name', 'register_provider',
]
from compass.config_management.providers.config_provider import (
get_provider)
from compass.config_management.providers.config_provider import (
get_provider_by_name)
from compass.config_management.providers.config_provider import (
register_provider)
from compass.config_management.providers.plugins import db_config_provider
from compass.config_management.providers.plugins import file_config_provider
from compass.config_management.providers.plugins import mix_config_provider

View File

@ -1,231 +0,0 @@
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module to provide interface to read/update global/cluster/host config.
.. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
"""
import logging
from abc import ABCMeta
from compass.utils import setting_wrapper as setting
class ConfigProvider(object):
"""Interface for config provider"""
__metaclass__ = ABCMeta
NAME = 'config_provider'
def __repr__(self):
return '%s[%s]' % (self.__class__.__name__, self.NAME)
def get_global_config(self):
"""Virtual method to get global config.
:returns: global configuration as dict.
"""
return {}
def get_cluster_config(self, clusterid):
"""Virtual method to get cluster config.
:param clusterid: id of the cluster to get configuration.
:type clusterid: int
:returns: cluster configuration as dict.
"""
return {}
def update_adapters(self, adapters, roles_per_target_system):
"""Virtual method to update adapters.
:param adapters: adapters to update
:type adapters: list of dict
:param roles_per_target_system: roles per target_system to update
:type roles_per_target_system: dict of str to dict.
"""
pass
def update_switch_filters(self, switch_filters):
"""Virtual method to update switch filters.
:param switch_filters: switch filters to update.
:type switch_filters: list of dict
"""
pass
def get_host_config(self, hostid):
"""Virtual method to get host config.
:param hostid: id of the host to get configuration.
:type hostid: int
:returns: host configuration as dict.
"""
return {}
def update_global_config(self, config):
"""Virtual method to update global config.
:param config: global configuration.
:type config: dict
"""
pass
def update_cluster_config(self, clusterid, config):
"""Virtual method to update cluster config.
:param clusterid: the id of the cluster to update configuration.
:type clusterid: int
:param config: cluster configuration.
:type config: dict
"""
pass
def update_host_config(self, hostid, config):
"""Virtual method to update host config.
:param hostid: the id of the host to update configuration.
:type hostid: int
:param config: host configuration.
:type config: dict
"""
pass
def clean_host_config(self, hostid):
"""Virtual method to clean host config.
:param hostid; the id of the host to clean.
:type hostid: int
"""
pass
def reinstall_host(self, hostid):
"""Virtual method to reintall host.
:param hostid: the id of the host to reinstall.
:type hostid: int.
"""
pass
def reinstall_cluster(self, clusterid):
"""Virtual method to reinstall cluster.
:param clusterid: the id of the cluster to reinstall.
:type clusterid: int
"""
pass
def clean_host_installing_progress(self, hostid):
"""Virtual method to clean host installing progress.
:param hostid: the id of the host to clean the installing progress
:type hostid: int
"""
pass
def clean_cluster_installing_progress(self, clusterid):
"""Virtual method to clean cluster installing progress.
:param clusterid: the id of the cluster to clean installing progress
:type clusterid: int
"""
pass
def clean_cluster_config(self, clusterid):
"""Virtual method to clean cluster config
:param clsuterid: the id of the cluster to clean
:type clusterid: int
"""
pass
def get_cluster_hosts(self, clusterid):
"""Virtual method to get hosts of given cluster.
:param clusterid: the id of the clsuter
:type clsuterid: int
"""
return []
def get_clusters(self):
"""Virtual method to get cluster list."""
return []
def get_switch_and_machines(self):
"""Virtual method to get switches and machines.
:returns: switches as list, machines per switch as dict of str to list
"""
return ([], {})
def update_switch_and_machines(
self, switches, switch_machines
):
"""Virtual method to update switches and machines.
:param switches: switches to update
:type switches: list of dict.
:param switch_machines: machines of each switch to update
:type switch_machines: dict of str to list of dict.
"""
pass
def sync(self):
"""Virtual method to sync data in provider."""
pass
PROVIDERS = {}
def get_provider():
"""get default provider from compass setting."""
return get_provider_by_name(setting.PROVIDER_NAME)
def get_provider_by_name(name):
"""get provider by provider name.
:param name: provider name.
:type name: str
:returns: instance of subclass of :class:`ConfigProvider`.
:raises: KeyError
"""
if name not in PROVIDERS:
logging.error('provider name %s is not found in providers %s',
name, PROVIDERS)
raise KeyError('provider %s is not found in PROVIDERS' % name)
provider = PROVIDERS[name]()
logging.debug('got provider %s', provider)
return provider
def register_provider(provider):
"""register provider.
:param provider: class inherited from :class:`ConfigProvider`
:raises: KeyError
"""
if provider.NAME in PROVIDERS:
logging.error('provider %s name %s is already registered in %s',
provider, provider.NAME, PROVIDERS)
raise KeyError('provider %s is already registered in PROVIDERS' %
provider.NAME)
logging.debug('register provider %s', provider.NAME)
PROVIDERS[provider.NAME] = provider

View File

@ -1,13 +0,0 @@
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@ -1,314 +0,0 @@
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module to provide ConfigProvider that reads config from db.
.. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
"""
import logging
import os.path
from compass.config_management.providers import config_provider
from compass.config_management.utils import config_filter
from compass.config_management.utils import config_filter_callbacks
from compass.db import database
from compass.db.model import Adapter
from compass.db.model import Cluster
from compass.db.model import ClusterHost
from compass.db.model import ClusterState
from compass.db.model import HostState
from compass.db.model import LogProgressingHistory
from compass.db.model import Machine
from compass.db.model import Role
from compass.db.model import Switch
from compass.db.model import SwitchConfig
from compass.utils import setting_wrapper as setting
GET_CLUSTER_ALLOWS = {
'*': config_filter.AllowRule()
}
GET_CLUSTER_DENIES = {
'/networking/global/ha_vip': config_filter.DenyRule(
check=config_filter_callbacks.deny_if_empty)
}
GET_HOST_ALLOWS = {
'*': config_filter.AllowRule()
}
GET_HOST_DENIES = {
'/roles': config_filter.DenyRule(
check=config_filter_callbacks.deny_if_empty
),
'/dashboard_roles': config_filter.DenyRule(
check=config_filter_callbacks.deny_if_empty
),
'/haproxy_roles': config_filter.DenyRule(
check=config_filter_callbacks.deny_if_empty
),
}
UPDATE_CLUSTER_ALLOWS = {
'/security': config_filter.AllowRule(),
'/networking': config_filter.AllowRule(),
'/partition': config_filter.AllowRule()
}
UPDATE_CLUSTER_DENIES = {
'/networking/global/ha_vip': config_filter.DenyRule(
check=config_filter_callbacks.deny_if_empty)
}
UPDATE_HOST_ALLOWS = {
'/roles': config_filter.AllowRule(
check=config_filter_callbacks.allow_if_not_empty),
'/has_dashboard_roles': config_filter.AllowRule(),
'/dashboard_roles': config_filter.AllowRule(
check=config_filter_callbacks.allow_if_not_empty
),
'/haproxy_roles': config_filter.AllowRule(
check=config_filter_callbacks.allow_if_not_empty
),
'/networking/interfaces/*/ip': config_filter.AllowRule()
}
UPDATE_HOST_DENIES = {}
class DBProvider(config_provider.ConfigProvider):
"""config provider which reads config from db.
.. note::
All method of this class should be called inside database
session scope.
"""
NAME = 'db'
GET_CLUSTER_FILTER = config_filter.ConfigFilter(
GET_CLUSTER_ALLOWS, GET_CLUSTER_DENIES)
GET_HOST_FILTER = config_filter.ConfigFilter(
GET_HOST_ALLOWS, GET_HOST_DENIES)
UPDATE_CLUSTER_FILTER = config_filter.ConfigFilter(
UPDATE_CLUSTER_ALLOWS, UPDATE_CLUSTER_DENIES)
UPDATE_HOST_FILTER = config_filter.ConfigFilter(
UPDATE_HOST_ALLOWS, UPDATE_HOST_DENIES)
def __init__(self):
pass
def get_cluster_config(self, clusterid):
"""Get cluster config from db."""
session = database.current_session()
cluster = session.query(Cluster).filter_by(id=clusterid).first()
if not cluster:
return {}
return self.GET_CLUSTER_FILTER.filter(cluster.config)
def get_host_config(self, hostid):
"""Get host config from db."""
session = database.current_session()
host = session.query(ClusterHost).filter_by(id=hostid).first()
if not host:
return {}
return self.GET_HOST_FILTER.filter(host.config)
def update_cluster_config(self, clusterid, config):
"""Update cluster config to db."""
session = database.current_session()
cluster = session.query(Cluster).filter_by(id=clusterid).first()
if not cluster:
return
cluster.config = self.UPDATE_CLUSTER_FILTER.filter(config)
def update_host_config(self, hostid, config):
"""Update host config to db."""
session = database.current_session()
host = session.query(ClusterHost).filter_by(id=hostid).first()
if not host:
return
host.config = self.UPDATE_HOST_FILTER.filter(config)
def update_adapters(self, adapters, roles_per_target_system):
"""Update adapter config to db."""
session = database.current_session()
session.query(Adapter).delete()
session.query(Role).delete()
for adapter in adapters:
session.add(Adapter(**adapter))
for _, roles in roles_per_target_system.items():
for role in roles:
session.add(Role(**role))
def update_switch_filters(self, switch_filters):
"""update switch filters."""
session = database.current_session()
switch_filter_tuples = set([])
session.query(SwitchConfig).delete(synchronize_session='fetch')
for switch_filter in switch_filters:
switch_filter_tuple = tuple(switch_filter.values())
if switch_filter_tuple in switch_filter_tuples:
logging.debug('ignore adding switch filter: %s',
switch_filter)
continue
else:
logging.debug('add switch filter: %s', switch_filter)
switch_filter_tuples.add(switch_filter_tuple)
session.add(SwitchConfig(**switch_filter))
def clean_host_config(self, hostid):
"""clean host config."""
self.clean_host_installing_progress(hostid)
session = database.current_session()
session.query(ClusterHost).filter_by(
id=hostid).delete(synchronize_session='fetch')
session.query(HostState).filter_by(
id=hostid).delete(synchronize_session='fetch')
def reinstall_host(self, hostid):
"""reinstall host."""
session = database.current_session()
host = session.query(ClusterHost).filter_by(id=hostid).first()
if not host:
return
log_dir = os.path.join(
setting.INSTALLATION_LOGDIR,
host.fullname,
'')
session.query(LogProgressingHistory).filter(
LogProgressingHistory.pathname.startswith(
log_dir)).delete(synchronize_session='fetch')
if not host.state:
host.state = HostState()
host.mutable = False
host.state.state = 'INSTALLING'
host.state.progress = 0.0
host.state.message = ''
host.state.severity = 'INFO'
def reinstall_cluster(self, clusterid):
"""reinstall cluster."""
session = database.current_session()
cluster = session.query(Cluster).filter_by(id=clusterid).first()
if not cluster:
return
if not cluster.state:
cluster.state = ClusterState()
cluster.state.state = 'INSTALLING'
cluster.mutable = False
cluster.state.progress = 0.0
cluster.state.message = ''
cluster.state.severity = 'INFO'
def clean_cluster_installing_progress(self, clusterid):
"""clean cluster installing progress."""
session = database.current_session()
cluster = session.query(Cluster).filter_by(id=clusterid).first()
if not cluster:
return
if cluster.state and cluster.state.state != 'UNINITIALIZED':
cluster.mutable = False
cluster.state.state = 'INSTALLING'
cluster.state.progress = 0.0
cluster.state.message = ''
cluster.state.severity = 'INFO'
def clean_host_installing_progress(self, hostid):
"""clean host intalling progress."""
session = database.current_session()
host = session.query(ClusterHost).filter_by(id=hostid).first()
if not host:
return
log_dir = os.path.join(
setting.INSTALLATION_LOGDIR,
host.fullname,
'')
session.query(LogProgressingHistory).filter(
LogProgressingHistory.pathname.startswith(
log_dir)).delete(synchronize_session='fetch')
if host.state and host.state.state != 'UNINITIALIZED':
host.mutable = False
host.state.state = 'INSTALLING'
host.state.progress = 0.0
host.state.message = ''
host.state.severity = 'INFO'
def clean_cluster_config(self, clusterid):
"""clean cluster config."""
session = database.current_session()
session.query(Cluster).filter_by(
id=clusterid).delete(synchronize_session='fetch')
session.query(ClusterState).filter_by(
id=clusterid).delete(synchronize_session='fetch')
def get_cluster_hosts(self, clusterid):
"""get cluster hosts."""
session = database.current_session()
hosts = session.query(ClusterHost).filter_by(
cluster_id=clusterid).all()
return [host.id for host in hosts]
def get_clusters(self):
"""get clusters."""
session = database.current_session()
clusters = session.query(Cluster).all()
return [cluster.id for cluster in clusters]
def get_switch_and_machines(self):
"""get switches and machines."""
session = database.current_session()
switches = session.query(Switch).all()
switches_data = []
switch_machines_data = {}
for switch in switches:
switches_data.append({
'ip': switch.ip,
'vendor_info': switch.vendor_info,
'credential': switch.credential,
'state': switch.state,
})
switch_machines_data[switch.ip] = []
for machine in switch.machines:
switch_machines_data[switch.ip].append({
'mac': machine.mac,
'port': machine.port,
'vlan': machine.vlan,
})
return switches_data, switch_machines_data
def update_switch_and_machines(
self, switches, switch_machines
):
"""update switches and machines."""
session = database.current_session()
session.query(Switch).delete(synchronize_session='fetch')
session.query(Machine).delete(synchronize_session='fetch')
for switch_data in switches:
switch = Switch(**switch_data)
logging.info('add switch %s', switch)
session.add(switch)
for machine_data in switch_machines.get(switch.ip, []):
machine = Machine(**machine_data)
logging.info('add machine %s under %s', machine, switch)
machine.switch = switch
session.add(machine)
config_provider.register_provider(DBProvider)

View File

@ -1,99 +0,0 @@
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""config provider read config from file.
.. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
"""
import json
import logging
from compass.config_management.providers import config_provider
from compass.utils import setting_wrapper as setting
class FileProvider(config_provider.ConfigProvider):
"""config provider which reads config from file."""
NAME = 'file'
def __init__(self):
self.config_dir_ = setting.CONFIG_DIR
self.global_config_filename_ = setting.GLOBAL_CONFIG_FILENAME
self.config_file_format_ = setting.CONFIG_FILE_FORMAT
def _global_config_filename(self):
"""Get global config file name."""
return '%s/%s' % (
self.config_dir_, self.global_config_filename_)
def _config_format(self):
"""Get config file format."""
return self.config_file_format_
@classmethod
def _config_format_python(cls, config_format):
"""Check if config file is stored as python formatted."""
if config_format == 'python':
return True
return False
@classmethod
def _config_format_json(cls, config_format):
"""Check if config file is stored as json formatted."""
if config_format == 'json':
return True
return False
@classmethod
def _read_config_from_file(cls, filename, config_format):
"""read config from file."""
config_globals = {}
config_locals = {}
content = ''
logging.debug('read config from %s and format is %s',
filename, config_format)
try:
with open(filename) as file_handler:
content = file_handler.read()
except Exception as error:
logging.error('failed to read file %s', filename)
logging.exception(error)
return {}
if cls._config_format_python(config_format):
try:
exec(content, config_globals, config_locals)
except Exception as error:
logging.error('failed to exec %s', content)
logging.exception(error)
return {}
elif cls._config_format_json(config_format):
try:
config_locals = json.loads(content)
except Exception as error:
logging.error('failed to load json data %s', content)
logging.exception(error)
return {}
return config_locals
def get_global_config(self):
"""read global config from file."""
return self._read_config_from_file(
self._global_config_filename(),
self._config_format())
config_provider.register_provider(FileProvider)

View File

@ -1,111 +0,0 @@
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mix provider which read config from different other providers.
.. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
"""
from compass.config_management.providers import config_provider
from compass.utils import setting_wrapper as setting
class MixProvider(config_provider.ConfigProvider):
"""mix provider which read config from different other providers."""
NAME = 'mix'
def __init__(self):
self.global_provider_ = config_provider.get_provider_by_name(
setting.GLOBAL_CONFIG_PROVIDER)
self.cluster_provider_ = config_provider.get_provider_by_name(
setting.CLUSTER_CONFIG_PROVIDER)
self.host_provider_ = config_provider.get_provider_by_name(
setting.HOST_CONFIG_PROVIDER)
def get_global_config(self):
"""get global config."""
return self.global_provider_.get_global_config()
def get_cluster_config(self, clusterid):
"""get cluster config."""
return self.cluster_provider_.get_cluster_config(clusterid)
def get_host_config(self, hostid):
"""get host config."""
return self.host_provider_.get_host_config(hostid)
def update_global_config(self, config):
"""update global config."""
self.global_provider_.update_global_config(config)
def update_cluster_config(self, clusterid, config):
"""update cluster config."""
self.cluster_provider_.update_cluster_config(
clusterid, config)
def update_host_config(self, hostid, config):
"""update host config."""
self.host_provider_.update_host_config(hostid, config)
def update_adapters(self, adapters, roles_per_target_system):
"""update adapters."""
self.host_provider_.update_adapters(
adapters, roles_per_target_system)
def update_switch_filters(self, switch_filters):
"""update switch filters."""
self.host_provider_.update_switch_filters(switch_filters)
def clean_host_config(self, hostid):
"""clean host config."""
self.host_provider_.clean_host_config(hostid)
def reinstall_host(self, hostid):
"""reinstall host config."""
self.host_provider_.reinstall_host(hostid)
def reinstall_cluster(self, clusterid):
"""reinstall cluster."""
self.host_provider_.reinstall_cluster(clusterid)
def clean_host_installing_progress(self, hostid):
"""clean host installing progress."""
self.host_provider_.clean_host_installing_progress(hostid)
def clean_cluster_installing_progress(self, clusterid):
"""clean cluster installing progress."""
self.host_provider_.clean_cluster_installing_progress(clusterid)
def clean_cluster_config(self, clusterid):
"""clean cluster config."""
self.host_provider_.clean_cluster_config(clusterid)
def get_cluster_hosts(self, clusterid):
"""get cluster hosts."""
return self.host_provider_.get_cluster_hosts(clusterid)
def get_clusters(self):
"""get clusters."""
return self.host_provider_.get_clusters()
def get_switch_and_machines(self):
"""get switch and machines."""
return self.host_provider_.get_switch_and_machines()
def update_switch_and_machines(self, switches, switch_machines):
"""update siwtch and machines."""
self.host_provider_.update_switch_and_machines(
switches, switch_machines)
config_provider.register_provider(MixProvider)

View File

@ -1,13 +0,0 @@
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@ -1,154 +0,0 @@
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module to filter configuration when upddating.
.. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
"""
import logging
from compass.config_management.utils import config_reference
class AllowRule(object):
"""class to define allow rule."""
def __init__(self, check=None):
self.check_ = check
def allow(self, key, ref):
"""Check if the ref is OK to add to filtered config."""
if not self.check_:
return True
else:
return self.check_(key, ref)
class DenyRule(object):
def __init__(self, check=None):
self.check_ = check
def deny(self, key, ref):
"""Check if the ref is OK to del from filtered config."""
if not self.check_:
return True
else:
return self.check_(key, ref)
class ConfigFilter(object):
"""config filter based on allows and denies rules."""
def __init__(self, allows={'*': AllowRule()}, denies={}):
"""Constructor
:param allows: dict of glob path and allow rule to copy to the
filtered configuration.
:type allows: dict of str to AllowRule
:param denies: dict of glob path and deny rule to remove from
the filtered configuration.
:type denies: dict of str to DenyRule
"""
self.allows_ = allows
self.denies_ = denies
self._is_valid()
def __repr__(self):
return '%s[allows=%s,denies=%s]' % (
self.__class__.__name__, self.allows_, self.denies_)
def _is_allows_valid(self):
"""Check if allows are valid."""
if not isinstance(self.allows_, dict):
raise TypeError(
'allows type is %s but expected type is dict: %s' % (
type(self.allows_), self.allows_))
for allow_key, allow_rule in self.allows_.items():
if not isinstance(allow_key, basestring):
raise TypeError(
'allow_key %s type is %s but expected type '
'is str or unicode' % (allow_key, type(allow_rule)))
if not isinstance(allow_rule, AllowRule):
raise TypeError(
'allows[%s] %s type is %s but expected type '
'is AllowRule' % (
allow_key, allow_rule, type(allow_rule)))
def _is_denies_valid(self):
"""Check if denies are valid."""
if not isinstance(self.denies_, dict):
raise TypeError(
'denies type is %s but expected type is dict: %s' % (
type(self.denies_), self.denies_))
for deny_key, deny_rule in self.denies_.items():
if not isinstance(deny_key, basestring):
raise TypeError(
'deny_key %s type is %s but expected type '
'is str or unicode: %s' % (
deny_key, deny_rule, type(deny_rule)))
if not isinstance(deny_rule, DenyRule):
raise TypeError(
'denies[%s] %s type is %s but expected type '
'is DenyRule' % (deny_key, deny_rule, type(deny_rule)))
def _is_valid(self):
"""Check if config filter is valid."""
self._is_allows_valid()
self._is_denies_valid()
def filter(self, config):
"""Filter config
:param config: configuration to filter.
:type config: dict
:returns: filtered configuration as dict
"""
ref = config_reference.ConfigReference(config)
filtered_ref = config_reference.ConfigReference({})
self._filter_allows(ref, filtered_ref)
self._filter_denies(filtered_ref)
filtered_config = config_reference.get_clean_config(
filtered_ref.config)
logging.debug('filter config %s to %s', config, filtered_config)
return filtered_config
def _filter_allows(self, ref, filtered_ref):
"""copy ref config with the allows to filtered ref."""
for allow_key, allow_rule in self.allows_.items():
logging.debug('filter by allow rule %s', allow_key)
for sub_key, sub_ref in ref.ref_items(allow_key):
if allow_rule.allow(sub_key, sub_ref):
logging.debug('%s is added to filtered config', sub_key)
filtered_ref.setdefault(sub_key).update(sub_ref.config)
else:
logging.debug('%s is ignored to add to filtered config',
sub_key)
def _filter_denies(self, filtered_ref):
"""remove config from filter_ref by denies."""
for deny_key, deny_rule in self.denies_.items():
logging.debug('filter by deny rule %s', deny_key)
for ref_key, ref in filtered_ref.ref_items(deny_key):
if deny_rule.deny(ref_key, ref):
logging.debug('%s is removed from filtered config',
ref_key)
del filtered_ref[ref_key]
else:
logging.debug('%s is ignored to del from filtered config',
ref_key)

View File

@ -1,31 +0,0 @@
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""callback lib for config filter callbacks."""
def allow_if_not_empty(_key, ref):
"""allow if ref is not empty."""
if not ref.config:
return False
else:
return True
def deny_if_empty(_key, ref):
"""deny if ref is empty."""
if not ref.config:
return True
else:
return False

View File

@ -1,687 +0,0 @@
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module to get configs from provider and isntallers and update
them to provider and installers.
.. moduleauthor:: Xiaodong wang ,xiaodongwang@huawei.com>
"""
import functools
import logging
from compass.config_management import installers
from compass.config_management import providers
from compass.config_management.utils.config_merger import ConfigMapping
from compass.config_management.utils.config_merger import ConfigMerger
from compass.config_management.utils import config_merger_callbacks
from compass.config_management.utils.config_reference import ConfigReference
from compass.utils import setting_wrapper as setting
from compass.utils import util
CLUSTER_HOST_MERGER = ConfigMerger(
mappings=[
ConfigMapping(
path_list=['/networking/interfaces/*'],
from_upper_keys={'ip_start': 'ip_start', 'ip_end': 'ip_end'},
to_key='ip',
value=config_merger_callbacks.assign_ips
),
ConfigMapping(
path_list=['/role_assign_policy'],
from_upper_keys={
'policy_by_host_numbers': 'policy_by_host_numbers',
'default': 'default'},
to_key='/roles',
value=config_merger_callbacks.assign_roles_by_host_numbers,
override=True
),
ConfigMapping(
path_list=['/config_mapping']
),
ConfigMapping(
path_list=['/role_mapping']
),
ConfigMapping(
path_list=['/dashboard_roles'],
from_lower_keys={'lower_values': '/roles'},
to_key='/has_dashboard_roles',
value=config_merger_callbacks.has_intersection
),
ConfigMapping(
path_list=['/dashboard_roles'],
from_lower_keys={'lower_values': '/roles'},
to_key='/dashboard_roles',
value=config_merger_callbacks.get_intersection
),
ConfigMapping(
path_list=['/haproxy_roles'],
from_lower_keys={'lower_values': '/roles'},
to_key='/haproxy_roles',
value=config_merger_callbacks.get_intersection
),
ConfigMapping(
path_list=[
'/networking/global/nameservers',
'/networking/global/gateway',
'/networking/global/proxy',
'/networking/global/ntp_server',
'/networking/global/ha_vip',
'/networking/interfaces/*/netmask',
'/networking/interfaces/*/nic',
'/networking/interfaces/*/promisc',
'/security/*',
'/partition',
]
),
ConfigMapping(
path_list=['/networking/interfaces/*'],
from_upper_keys={'pattern': 'dns_pattern',
'clusterid': '/clusterid',
'search_path': '/networking/global/search_path'},
from_lower_keys={'hostname': '/hostname'},
to_key='dns_alias',
value=functools.partial(
config_merger_callbacks.assign_from_pattern,
upper_keys=['search_path', 'clusterid'],
lower_keys=['hostname'])
),
ConfigMapping(
path_list=['/networking/global'],
from_upper_keys={'default': 'default_no_proxy',
'clusterid': '/clusterid',
'noproxy_pattern': 'noproxy_pattern',
'ha_vip': 'ha_vip'},
from_lower_keys={'hostnames': '/hostname',
'ips': '/networking/interfaces/management/ip'},
to_key='ignore_proxy',
value=config_merger_callbacks.assign_noproxy
),
ConfigMapping(
path_list=['/networking/global'],
from_upper_keys={'pattern': 'search_path_pattern',
'search_path': 'search_path',
'clusterid': '/clusterid'},
to_key='search_path',
value=functools.partial(
config_merger_callbacks.assign_from_pattern,
upper_keys=['search_path', 'clusterid']
)
),
ConfigMapping(
path_list=['/networking/global/ha_vip'],
to_key='/haproxy/router_id',
value=functools.partial(
config_merger_callbacks.assign_by_order,
orders=config_merger_callbacks.generate_order(0, -1)
),
from_upper_keys={'prefix': '/haproxy/router_id_prefix'},
from_lower_keys={'conditions': '/haproxy_roles'},
),
ConfigMapping(
path_list=['/networking/global/ha_vip'],
to_key='/haproxy/priority',
value=functools.partial(
config_merger_callbacks.assign_by_order,
orders=config_merger_callbacks.generate_order(0, -1),
reverse=True
),
from_upper_keys={'prefix': '/haproxy/default_priority'},
from_lower_keys={'conditions': '/haproxy_roles'},
),
ConfigMapping(
path_list=['/networking/global/ha_vip'],
to_key='/haproxy/state',
value=functools.partial(
config_merger_callbacks.assign_by_order,
prefix=''
),
from_upper_keys={
'orders': '/haproxy/states_to_assign',
'default_order': '/haproxy/default_state',
},
from_lower_keys={'conditions': '/haproxy_roles'}
)])
class ConfigManager(object):
"""Class to get global/clsuter/host configs.
.. note::
The class is used to get global/clsuter/host configs
from provider, os installer, package installer, process them,
and update them to provider, os installer, package installer.
"""
def __init__(self):
self.config_provider_ = providers.get_provider()
logging.debug('got config provider: %s', self.config_provider_)
self.package_installer_ = installers.get_package_installer()
logging.debug('got package installer: %s', self.package_installer_)
self.os_installer_ = installers.get_os_installer(
package_installer=self.package_installer_)
logging.debug('got os installer: %s', self.os_installer_)
def get_adapters(self):
"""Get adapter information from os installer and package installer.
:returns: list of adapter information.
.. note::
For each adapter, the information is as
{'name': '...', 'os': '...', 'target_system': '...'}
"""
oses = self.os_installer_.get_oses()
logging.debug('got oses %s from %s', oses, self.os_installer_)
target_systems_per_os = self.package_installer_.get_target_systems(
oses)
logging.debug('got target_systems per os from %s: %s',
self.package_installer_, target_systems_per_os)
adapters = []
for os_version, target_systems in target_systems_per_os.items():
for target_system in target_systems:
adapters.append({
'name': '%s/%s' % (os_version, target_system),
'os': os_version,
'target_system': target_system})
logging.debug('got adapters: %s', adapters)
return adapters
def get_roles(self, target_system):
"""Get all roles of the target system from package installer.
:param target_system: the target distributed system to deploy.
:type target_system: str
:returns: list of role information.
.. note::
For each role, the information is as:
{'name': '...', 'description': '...', 'target_system': '...'}
"""
roles = self.package_installer_.get_roles(target_system)
logging.debug('got target system %s roles %s from %s',
target_system, roles, self.package_installer_)
return [
{
'name': role,
'description': description,
'target_system': target_system
} for role, description in roles.items()
]
def update_adapters_from_installers(self):
"""update adapters from installers."""
adapters = self.get_adapters()
target_systems = set()
roles_per_target_system = {}
for adapter in adapters:
target_systems.add(adapter['target_system'])
for target_system in target_systems:
roles_per_target_system[target_system] = self.get_roles(
target_system)
logging.debug('update adapters %s and '
'roles per target system %s to %s',
adapters, roles_per_target_system,
self.config_provider_)
self.config_provider_.update_adapters(
adapters, roles_per_target_system)
def update_switch_filters(self):
"""Update switch filter from setting.SWITCHES."""
if not hasattr(setting, 'SWITCHES'):
logging.info('no switch configs to set')
return
switch_filters = util.get_switch_filters(setting.SWITCHES)
logging.debug('update switch filters %s to %s',
switch_filters, self.config_provider_)
self.config_provider_.update_switch_filters(switch_filters)
def get_switch_and_machines(self):
"""Get switches and machines."""
switches, machines_per_switch = (
self.config_provider_.get_switch_and_machines())
logging.debug('got switches %s from %s',
switches, self.config_provider_)
logging.debug('got machines per switch %s from %s',
machines_per_switch, self.config_provider_)
return (switches, machines_per_switch)
def update_switch_and_machines(
self, switches, switch_machines
):
"""Update switches and machines."""
logging.debug('update switches %s to %s',
switches, self.config_provider_)
logging.debug('update switch machines %s to %s',
switch_machines, self.config_provider_)
self.config_provider_.update_switch_and_machines(
switches, switch_machines)
def get_global_config(self, os_version, target_system):
"""Get global config."""
config = self.config_provider_.get_global_config()
logging.debug('got global provider config from %s: %s',
self.config_provider_, config)
os_config = self.os_installer_.get_global_config(
os_version=os_version, target_system=target_system)
logging.debug('got global os config from %s: %s',
self.os_installer_, os_config)
package_config = self.package_installer_.get_global_config(
os_version=os_version,
target_system=target_system)
logging.debug('got global package config from %s: %s',
self.package_installer_, package_config)
util.merge_dict(config, os_config)
util.merge_dict(config, package_config)
return config
def update_global_config(self, config, os_version, target_system):
"""update global config."""
logging.debug('update global config: %s', config)
logging.debug('update global config to %s',
self.config_provider_)
self.config_provider_.update_global_config(config)
logging.debug('update global config to %s',
self.os_installer_)
self.os_installer_.update_global_config(
config, os_version=os_version, target_system=target_system)
logging.debug('update global config to %s',
self.package_installer_)
self.package_installer_.update_global_config(
config, os_version=os_version, target_system=target_system)
def get_cluster_config(self, clusterid, os_version, target_system):
"""get cluster config."""
config = self.config_provider_.get_cluster_config(clusterid)
logging.debug('got cluster %s config from %s: %s',
clusterid, self.config_provider_, config)
os_config = self.os_installer_.get_cluster_config(
clusterid, os_version=os_version,
target_system=target_system)
logging.debug('got cluster %s config from %s: %s',
clusterid, self.os_installer_, os_config)
package_config = self.package_installer_.get_cluster_config(
clusterid, os_version=os_version,
target_system=target_system)
logging.debug('got cluster %s config from %s: %s',
clusterid, self.package_installer_, package_config)
util.merge_dict(config, os_config)
util.merge_dict(config, package_config)
return config
def update_cluster_config(self, clusterid, config,
os_version, target_system):
"""update cluster config."""
logging.debug('update cluster %s config: %s', clusterid, config)
logging.debug('update cluster %s config to %s',
clusterid, self.config_provider_)
self.config_provider_.update_cluster_config(clusterid, config)
logging.debug('update cluster %s config to %s',
clusterid, self.os_installer_)
self.os_installer_.update_cluster_config(
clusterid, config, os_version=os_version,
target_system=target_system)
logging.debug('update cluster %s config to %s',
clusterid, self.package_installer_)
self.package_installer_.update_cluster_config(
clusterid, config, os_version=os_version,
target_system=target_system)
def get_host_config(self, hostid, os_version, target_system):
"""get host config."""
config = self.config_provider_.get_host_config(hostid)
logging.debug('got host %s config from %s: %s',
hostid, self.config_provider_, config)
os_config = self.os_installer_.get_host_config(
hostid, os_version=os_version,
target_system=target_system)
logging.debug('got host %s config from %s: %s',
hostid, self.os_installer_, os_config)
package_config = self.package_installer_.get_host_config(
hostid, os_version=os_version,
target_system=target_system)
logging.debug('got host %s config from %s: %s',
hostid, self.package_installer_, package_config)
util.merge_dict(config, os_config)
util.merge_dict(config, package_config)
return config
def get_host_configs(self, hostids, os_version, target_system):
"""get hosts' configs."""
host_configs = {}
for hostid in hostids:
host_configs[hostid] = self.get_host_config(
hostid, os_version, target_system)
return host_configs
def clean_host_config(self, hostid, os_version, target_system):
"""clean host config."""
config = self.config_provider_.get_host_config(hostid)
logging.debug('got host %s config from %s: %s',
hostid, self.config_provider_, config)
logging.debug('clean host %s config in %s',
hostid, self.config_provider_)
self.config_provider_.clean_host_config(hostid)
logging.debug('clean host %s config in %s',
hostid, self.os_installer_)
self.os_installer_.clean_host_config(
hostid, config, os_version=os_version,
target_system=target_system)
logging.debug('clean host %s config in %s',
hostid, self.package_installer_)
self.package_installer_.clean_host_config(
hostid, config, os_version=os_version,
target_system=target_system)
def clean_host_configs(self, hostids, os_version, target_system):
"""clean hosts' configs."""
for hostid in hostids:
self.clean_host_config(hostid, os_version, target_system)
def reinstall_host(self, hostid, os_version, target_system):
"""reinstall host."""
config = self.config_provider_.get_host_config(hostid)
logging.debug('got host %s config from %s: %s',
hostid, self.config_provider_, config)
logging.debug('reinstall host %s in %s',
hostid, self.config_provider_)
self.config_provider_.reinstall_host(hostid)
logging.debug('reinstall host %s in %s',
hostid, self.os_installer_)
self.os_installer_.reinstall_host(
hostid, config, os_version=os_version,
target_system=target_system)
logging.debug('reinstall host %s in %s',
hostid, self.package_installer_)
self.package_installer_.reinstall_host(
hostid, config, os_version=os_version,
target_system=target_system)
def reinstall_cluster(self, clusterid, os_version, target_system):
"""reinstall cluster."""
config = self.config_provider_.get_cluster_config(clusterid)
logging.debug('got cluster %s config from %s: %s',
clusterid, self.config_provider_, config)
logging.debug('reinstall cluster %s in %s',
clusterid, self.config_provider_)
self.config_provider_.reinstall_cluster(clusterid)
logging.debug('reinstall cluster %s in %s',
clusterid, self.os_installer_)
self.os_installer_.reinstall_cluster(
clusterid, config, os_version=os_version,
target_system=target_system)
logging.debug('reinstall cluster %s in %s',
clusterid, self.package_installer_)
self.package_installer_.reinstall_cluster(
clusterid, config, os_version=os_version,
target_system=target_system)
def reinstall_hosts(self, hostids, os_version, target_system):
"""reinstall hosts."""
for hostid in hostids:
self.reinstall_host(hostid, os_version, target_system)
def clean_host_installing_progress(self, hostid,
os_version, target_system):
"""clean host installing progress."""
config = self.config_provider_.get_host_config(hostid)
logging.debug('got host %s config from %s: %s',
hostid, self.config_provider_, config)
logging.debug('clean host %s installing progress in %s',
hostid, self.config_provider_)
self.config_provider_.clean_host_installing_progress(hostid)
logging.debug('clean host %s installing progress in %s',
hostid, self.os_installer_)
self.os_installer_.clean_host_installing_progress(
hostid, config, os_version=os_version,
target_system=target_system)
logging.debug('clean host %s installing progress in %s',
hostid, self.package_installer_)
self.package_installer_.clean_host_installing_progress(
hostid, config, os_version=os_version,
target_system=target_system)
def clean_hosts_installing_progress(self, hostids,
os_version, target_system):
"""clean hosts installing progress."""
for hostid in hostids:
self.clean_host_installing_progress(
hostid, os_version, target_system)
def clean_cluster_installing_progress(self, clusterid,
os_version, target_system):
"""clean cluster installing progress."""
config = self.config_provider_.get_cluster_config(clusterid)
logging.debug('got host %s config from %s: %s',
clusterid, self.config_provider_, config)
logging.debug('clean cluster %s installing progress in %s',
clusterid, self.config_provider_)
self.config_provider_.clean_cluster_installing_progress(clusterid)
logging.debug('clean cluster %s installing progress in %s',
clusterid, self.os_installer_)
self.os_installer_.clean_cluster_installing_progress(
clusterid, config, os_version=os_version,
target_system=target_system)
logging.debug('clean cluster %s installing progress in %s',
clusterid, self.package_installer_)
self.package_installer_.clean_cluster_installing_progress(
clusterid, config, os_version=os_version,
target_system=target_system)
def clean_cluster_config(self, clusterid,
os_version, target_system):
"""clean cluster config."""
config = self.config_provider_.get_cluster_config(clusterid)
logging.debug('got cluster %s config from %s: %s',
clusterid, self.config_provider_, config)
logging.debug('clean cluster %s config in %s',
clusterid, self.config_provider_)
self.config_provider_.clean_cluster_config(clusterid)
logging.debug('clean cluster %s config in %s',
clusterid, self.os_installer_)
self.os_installer_.clean_cluster_config(
clusterid, config, os_version=os_version,
target_system=target_system)
logging.debug('clean cluster %s config in %s',
clusterid, self.package_installer_)
self.package_installer_.clean_cluster_config(
clusterid, config, os_version=os_version,
target_system=target_system)
def update_host_config(self, hostid, config,
os_version, target_system):
"""update host config."""
logging.debug('update host %s config: %s', hostid, config)
logging.debug('update host %s config to %s',
hostid, self.config_provider_)
self.config_provider_.update_host_config(hostid, config)
logging.debug('update host %s config to %s',
hostid, self.os_installer_)
self.os_installer_.update_host_config(
hostid, config, os_version=os_version,
target_system=target_system)
logging.debug('update host %s config to %s',
hostid, self.package_installer_)
self.package_installer_.update_host_config(
hostid, config, os_version=os_version,
target_system=target_system)
def update_host_configs(self, host_configs, os_version, target_system):
"""update host configs."""
for hostid, host_config in host_configs.items():
self.update_host_config(
hostid, host_config, os_version, target_system)
def get_cluster_hosts(self, clusterid):
"""get cluster hosts."""
hostids = self.config_provider_.get_cluster_hosts(clusterid)
logging.debug('got hosts of cluster %s from %s: %s',
clusterid, self.config_provider_, hostids)
return hostids
def get_clusters(self):
"""get clusters."""
clusters = self.config_provider_.get_clusters()
logging.debug('got clusters from %s: %s',
self.config_provider_, clusters)
return clusters
def filter_cluster_and_hosts(self, cluster_hosts,
os_versions, target_systems,
cluster_properties_match,
cluster_properties_name,
host_properties_match,
host_properties_name):
"""get filtered cluster and hosts configs."""
logging.debug('filter cluster_hosts: %s', cluster_hosts)
clusters_properties = []
cluster_hosts_properties = {}
for clusterid, hostids in cluster_hosts.items():
cluster_config = self.get_cluster_config(
clusterid, os_version=os_versions[clusterid],
target_system=target_systems[clusterid])
cluster_ref = ConfigReference(cluster_config)
if cluster_ref.match(cluster_properties_match):
clusters_properties.append(
cluster_ref.filter(cluster_properties_name))
host_configs = self.get_host_configs(
hostids, os_version=os_versions[clusterid],
target_system=target_systems[clusterid])
cluster_hosts_properties[clusterid] = []
for _, host_config in host_configs.items():
host_ref = ConfigReference(host_config)
if host_ref.match(host_properties_match):
cluster_hosts_properties[clusterid].append(
host_ref.filter(host_properties_name))
logging.debug('got clsuter properties: %s',
clusters_properties)
logging.debug('got cluster hosts properties: %s',
cluster_hosts_properties)
return (clusters_properties, cluster_hosts_properties)
def reinstall_cluster_and_hosts(self,
cluster_hosts,
os_versions,
target_systems):
"""reinstall clusters and hosts of each cluster."""
logging.debug('reinstall cluster_hosts: %s', cluster_hosts)
for clusterid, hostids in cluster_hosts.items():
self.reinstall_hosts(
hostids,
os_version=os_versions[clusterid],
target_system=target_systems[clusterid])
self.reinstall_cluster(clusterid,
os_version=os_versions[clusterid],
target_system=target_systems[clusterid])
def clean_cluster_and_hosts(self, cluster_hosts,
os_versions, target_systems):
"""clean clusters and hosts of each cluster."""
logging.debug('clean cluster_hosts: %s', cluster_hosts)
for clusterid, hostids in cluster_hosts.items():
all_hostids = self.get_cluster_hosts(clusterid)
self.clean_host_configs(hostids,
os_version=os_versions[clusterid],
target_system=target_systems[clusterid])
if set(all_hostids) == set(hostids):
self.clean_cluster_config(
clusterid,
os_version=os_versions[clusterid],
target_system=target_systems[clusterid])
else:
self.clean_cluster_installing_progress(
clusterid, os_version=os_versions[clusterid],
target_system=target_systems[clusterid])
def clean_cluster_and_hosts_installing_progress(
self, cluster_hosts, os_versions, target_systems
):
"""Clean clusters and hosts of each cluster intalling progress."""
logging.debug('clean cluster_hosts installing progress: %s',
cluster_hosts)
for clusterid, hostids in cluster_hosts.items():
self.clean_hosts_installing_progress(
hostids, os_version=os_versions[clusterid],
target_system=target_systems[clusterid])
self.clean_cluster_installing_progress(
clusterid, os_version=os_versions[clusterid],
target_system=target_systems[clusterid])
def install_cluster_and_hosts(self,
cluster_hosts,
os_versions,
target_systems):
"""update clusters and hosts of each cluster configs."""
logging.debug('update cluster_hosts: %s', cluster_hosts)
for clusterid, hostids in cluster_hosts.items():
global_config = self.get_global_config(
os_version=os_versions[clusterid],
target_system=target_systems[clusterid])
self.update_global_config(global_config,
os_version=os_versions[clusterid],
target_system=target_systems[clusterid])
cluster_config = self.get_cluster_config(
clusterid, os_version=os_versions[clusterid],
target_system=target_systems[clusterid])
util.merge_dict(cluster_config, global_config, False)
self.update_cluster_config(
clusterid, cluster_config,
os_version=os_versions[clusterid],
target_system=target_systems[clusterid])
all_hostids = self.get_cluster_hosts(clusterid)
host_configs = self.get_host_configs(
all_hostids, os_version=os_versions[clusterid],
target_system=target_systems[clusterid])
CLUSTER_HOST_MERGER.merge(cluster_config, host_configs)
update_host_configs = dict(
[(hostid, host_config)
for hostid, host_config in host_configs.items()
if hostid in hostids])
self.update_host_configs(
update_host_configs,
os_version=os_versions[clusterid],
target_system=target_systems[clusterid])
self.reinstall_hosts(
update_host_configs.keys(),
os_version=os_versions[clusterid],
target_system=target_systems[clusterid])
self.reinstall_cluster(clusterid,
os_version=os_versions[clusterid],
target_system=target_systems[clusterid])
def sync(self):
"""sync os installer and package installer."""
logging.info('config manager sync')
logging.debug('sync %s', self.config_provider_)
self.config_provider_.sync()
logging.debug('sync %s', self.os_installer_)
self.os_installer_.sync()
logging.debug('sync %s', self.package_installer_)
self.package_installer_.sync()

View File

@ -1,351 +0,0 @@
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module to set the hosts configs from cluster config.
.. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
"""
import copy
import logging
from compass.config_management.utils import config_reference
from compass.utils import util
class ConfigMapping(object):
"""Class to merge cluster config ref to host config ref by path list."""
def __init__(self, path_list, from_upper_keys={},
from_lower_keys={}, to_key='.',
override=False, override_conditions={},
value=None):
"""Constructor
:param path_list: list of path to merge from cluster ref to host refs
:type path_list: list of str
:param from_upper_keys: kwargs from cluster ref for value callback.
:type from_upper_keys: dict of kwargs name to path in cluster ref
:param from_lower_keys: kwargs from host refs for value callback.
:type from_lower_keys: dict of kwargs name to path in host refs.
:param to_key: the path in host refs to be merged to.
:type to_key: str
:param override: if the path in host ref can be overridden.
:type override: callback or bool
:param override_conditions: kwargs from host ref for override callback
:type override_conditions: dict of kwargs name to path in host ref
:param value: the value to be set in host refs.
:type value: callback or any type
"""
self.path_list_ = path_list
self.from_upper_keys_ = from_upper_keys
self.from_lower_keys_ = from_lower_keys
self.to_key_ = to_key
self.override_ = override
self.override_conditions_ = override_conditions
self.value_ = value
self._is_valid()
def __repr__(self):
return (
'%s[path_list=%s,from_upper_keys=%s,'
'from_lower_keys=%s,to_key=%s,override=%s,'
'override_conditions=%s,value=%s]'
) % (
self.__class__.__name__,
self.path_list_, self.from_upper_keys_,
self.from_lower_keys_, self.to_key_,
self.override_, self.override_conditions_,
self.value_)
def _is_valid_path_list(self):
"""Check path_list are valid."""
if not isinstance(self.path_list_, list):
raise TypeError(
'path_list %s type is %s while expected type is list' % (
self.path_list_, type(self.path_list_)))
for i, path in enumerate(self.path_list_):
if not isinstance(path, basestring):
raise TypeError(
'path_list[%d] type is %s while '
'expected type is str or unicode: %s' % (
i, type(path), path))
def _is_valid_from_upper_keys(self):
"""Check from_upper_keys are valid."""
if not isinstance(self.from_upper_keys_, dict):
raise TypeError(
'from_upper_keys type is %s while expected is dict',
type(self.from_upper_keys_))
for mapping_key, from_upper_key in self.from_upper_keys_.items():
if not isinstance(mapping_key, basestring):
raise TypeError(
'key %s in from_upper_keys type is %s'
'while expected type is str or unicode' % (
mapping_key, type(mapping_key)))
if not isinstance(from_upper_key, basestring):
raise TypeError(
'from_upper_keys[%s] type is %s'
'while expected type is str or unicode: %s' % (
mapping_key, type(from_upper_key), from_upper_key))
if '*' in from_upper_key:
raise KeyError(
'from_upper_keys[%s] %s contains *' % (
mapping_key, from_upper_key))
def _is_valid_from_lower_keys(self):
"""Check from_lower_keys are valid."""
if not isinstance(self.from_lower_keys_, dict):
raise TypeError(
'from_lower_keys type is %s while expected type is dict',
type(self.from_lower_keys_))
for mapping_key, from_lower_key in self.from_lower_keys_.items():
if not isinstance(mapping_key, basestring):
raise TypeError(
'key %s in from_lower_keys type is %s'
'while expected type is str or unicode: %s' % (
mapping_key, type(mapping_key)))
if not isinstance(from_lower_key, basestring):
raise TypeError(
'from_lower_keys[%s] type'
'is %s while expected type is str or unicode: %s' % (
mapping_key, type(from_lower_key), from_lower_key))
if '*' in from_lower_key:
raise KeyError(
'from_lower_keys[%s] %s contains *' % (
mapping_key, from_lower_key))
def _is_valid_from_keys(self):
"""Check from keys are valid."""
self._is_valid_from_upper_keys()
self._is_valid_from_lower_keys()
upper_keys = set(self.from_upper_keys_.keys())
lower_keys = set(self.from_lower_keys_.keys())
intersection = upper_keys.intersection(lower_keys)
if intersection:
raise KeyError(
'there is intersection between from_upper_keys %s'
' and from_lower_keys %s: %s' % (
upper_keys, lower_keys, intersection))
def _is_valid_to_key(self):
"""Check to_key is valid."""
if not isinstance(self.to_key_, basestring):
raise TypeError(
'to_key %s type is %s '
'while expected type is [str, unicode]' % (
self.to_key_, type(self.to_key_)))
if '*' in self.to_key_:
raise KeyError('to_key %s contains *' % self.to_key_)
def _is_valid_override_conditions(self):
"""Check override conditions are valid."""
if not isinstance(self.override_conditions_, dict):
raise TypeError(
'override_conditions type is %s while expected type is dict',
type(self.override_conditions_))
override_items = self.override_conditions_.items()
for mapping_key, override_condition in override_items:
if not isinstance(mapping_key, basestring):
raise TypeError(
'overrid_conditions key %s type is %s '
'while expected type is [str, unicode]' % (
mapping_key, type(mapping_key)))
if not isinstance(override_condition, basestring):
raise TypeError(
'override_conditions[%s] type is %s '
'while expected type is [str, unicode]: %s' % (
mapping_key, type(override_condition),
override_condition))
if '*' in override_condition:
raise KeyError(
'override_conditions[%s] %s contains *' % (
mapping_key, override_condition))
def _is_valid(self):
"""Check ConfigMapping instance is valid."""
self._is_valid_path_list()
self._is_valid_from_keys()
self._is_valid_to_key()
self._is_valid_override_conditions()
def _get_upper_sub_refs(self, upper_ref):
"""get sub_refs from upper_ref."""
upper_refs = []
for path in self.path_list_:
upper_refs.extend(upper_ref.ref_items(path))
return upper_refs
def _get_mapping_from_upper_keys(self, ref_key, sub_ref):
"""Get upper config mapping from from_upper_keys."""
sub_configs = {}
for mapping_key, from_upper_key in self.from_upper_keys_.items():
if from_upper_key in sub_ref:
sub_configs[mapping_key] = sub_ref[from_upper_key]
else:
logging.info('%s ignore from_upper_key %s in %s',
self, from_upper_key, ref_key)
return sub_configs
def _get_mapping_from_lower_keys(self, ref_key, lower_sub_refs):
"""Get lower config mapping from from_lower_keys."""
sub_configs = {}
for mapping_key, from_lower_key in self.from_lower_keys_.items():
sub_configs[mapping_key] = {}
for lower_key, lower_sub_ref in lower_sub_refs.items():
for mapping_key, from_lower_key in self.from_lower_keys_.items():
if from_lower_key in lower_sub_ref:
sub_configs[mapping_key][lower_key] = (
lower_sub_ref[from_lower_key])
else:
logging.error(
'%s ignore from_lower_key %s in %s lower_key %s',
self, from_lower_key, ref_key, lower_key)
return sub_configs
def _get_values(self, ref_key, sub_ref, lower_sub_refs, sub_configs):
"""Get values to set to lower configs."""
if self.value_ is None:
lower_values = {}
for lower_key in lower_sub_refs.keys():
lower_values[lower_key] = copy.deepcopy(sub_ref.config)
return lower_values
if not callable(self.value_):
lower_values = {}
for lower_key in lower_sub_refs.keys():
lower_values[lower_key] = copy.deepcopy(self.value_)
return lower_values
return self.value_(sub_ref, ref_key, lower_sub_refs, self.to_key_,
**sub_configs)
def _get_override(self, ref_key, sub_ref, to_key, lower_to_ref):
"""Get override from ref_key, ref from ref_key."""
if not callable(self.override_):
return bool(self.override_)
override_condition_configs = {}
override_items = self.override_conditions_.items()
for mapping_key, override_condition in override_items:
if override_condition in sub_ref:
override_condition_configs[mapping_key] = (
sub_ref[override_condition])
else:
logging.info('%s no override condition %s in %s',
self, override_condition, ref_key)
return self.override_(sub_ref, ref_key, lower_to_ref, to_key,
**override_condition_configs)
def merge(self, upper_ref, lower_refs):
"""merge upper config to lower configs."""
upper_sub_refs = self._get_upper_sub_refs(upper_ref)
for ref_key, sub_ref in upper_sub_refs:
sub_configs = self._get_mapping_from_upper_keys(ref_key, sub_ref)
lower_sub_refs = {}
for lower_key, lower_ref in lower_refs.items():
lower_sub_refs[lower_key] = lower_ref.setdefault(ref_key)
lower_sub_configs = self._get_mapping_from_lower_keys(
ref_key, lower_sub_refs)
util.merge_dict(sub_configs, lower_sub_configs)
values = self._get_values(
ref_key, sub_ref, lower_sub_refs, sub_configs)
logging.debug('%s set values %s to %s',
ref_key, self.to_key_, values)
for lower_key, lower_sub_ref in lower_sub_refs.items():
if lower_key not in values:
logging.error('no key %s in %s', lower_key, values)
continue
value = values[lower_key]
lower_to_ref = lower_sub_ref.setdefault(self.to_key_)
override = self._get_override(
ref_key, sub_ref, self.to_key_, lower_to_ref)
lower_to_ref.update(value, override)
class ConfigMerger(object):
"""Class to merge clsuter config to host configs."""
def __init__(self, mappings):
"""Constructor
:param mappings: list of :class:`ConfigMapping` instance
"""
self.mappings_ = mappings
self._is_valid()
def __repr__(self):
return '%s[mappings=%s]' % (self.__class__.__name__, self.mappings_)
def _is_valid(self):
"""Check ConfigMerger instance is valid."""
if not isinstance(self.mappings_, list):
raise TypeError(
'%s mapping type is %s while expect type is list: %s' % (
self.__class__.__name__, type(self.mappings_),
self.mappings_))
for i, mapping in enumerate(self.mappings_):
if not isinstance(mapping, ConfigMapping):
raise TypeError(
'%s mappings[%s] type is %s '
'while expected type is ConfigMapping' % (
self.__class__.__name__, i, type(mapping)))
def merge(self, upper_config, lower_configs):
"""Merge cluster config to host configs.
:param upper_config: cluster configuration to merge from.
:type upper_config: dict
:param lower_configs: host configurations to merge to.
:type lower_configs: dict of host id to host config as dict
"""
upper_ref = config_reference.ConfigReference(upper_config)
lower_refs = {}
for lower_key, lower_config in lower_configs.items():
lower_refs[lower_key] = config_reference.ConfigReference(
lower_config)
for mapping in self.mappings_:
logging.debug('apply merging from the rule %s', mapping)
mapping.merge(upper_ref, lower_refs)
for lower_key, lower_config in lower_configs.items():
lower_configs[lower_key] = config_reference.get_clean_config(
lower_config)
logging.debug('merged upper config\n%s\nto lower configs:\n%s',
upper_config, lower_configs)

View File

@ -1,607 +0,0 @@
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ConfigMerger Callbacks module.
.. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
"""
import copy
import itertools
import logging
import netaddr
import re
from compass.utils import util
def _get_role_bundle_mapping(roles, bundles):
"""Get role bundles.
"""
bundle_mapping = {}
for role in roles:
bundle_mapping[role] = role
for bundle in bundles:
bundled_role = None
for role in bundle:
if role not in roles:
continue
while role != bundle_mapping[role]:
role = bundle_mapping[role]
if not bundled_role:
bundled_role = role
else:
bundle_mapping[role] = bundled_role
role_bundles = {}
for role in roles:
bundled_role = role
while bundled_role != bundle_mapping[bundled_role]:
bundled_role = bundle_mapping[bundled_role]
bundle_mapping[role] = bundled_role
role_bundles.setdefault(bundled_role, set()).add(role)
logging.debug('bundle_mapping is %s', bundle_mapping)
logging.debug('role_bundles is %s', role_bundles)
return bundle_mapping, role_bundles
def _get_bundled_exclusives(exclusives, bundle_mapping):
"""Get bundled exclusives."""
bundled_exclusives = set()
for exclusive in exclusives:
if exclusive not in bundle_mapping:
logging.error(
'exclusive role %s did not found in roles %s',
exclusive, bundle_mapping.keys())
continue
bundled_exclusives.add(bundle_mapping[exclusive])
logging.debug('bundled exclusives: %s', bundled_exclusives)
return bundled_exclusives
def _get_max(lhs, rhs):
"""Get max value."""
if lhs < 0 and rhs < 0:
return min(lhs, rhs)
if lhs < 0:
return lhs
if rhs < 0:
return rhs
return max(lhs, rhs)
def _get_min(lhs, rhs):
"""Get min value."""
if lhs < 0:
return max(rhs, 0)
if rhs < 0:
return max(lhs, 0)
return min(lhs, rhs)
def _dec_max_min(value):
"""dec max and min value."""
if value > 0:
return value - 1
else:
return value
def _get_bundled_max_mins(maxs, mins, default_max, default_min, role_bundles):
"""Get max and mins for each bundled role."""
bundled_maxs = {}
bundled_mins = {}
for bundled_role, roles in role_bundles.items():
bundled_min = None
bundled_max = None
for role in roles:
new_max = maxs.get(role, maxs.get('default', default_max))
new_min = mins.get(role, mins.get('default', default_min))
new_max = _get_max(new_max, new_min)
if bundled_min is None:
bundled_min = new_min
else:
bundled_min = _get_min(bundled_min, new_min)
if bundled_max is None:
bundled_max = new_max
else:
bundled_max = _get_min(
bundled_max, _get_max(new_max, bundled_min))
if bundled_min is None:
bundled_min = default_min
if bundled_max is None:
bundled_max = _get_max(default_max, bundled_min)
bundled_mins[bundled_role] = bundled_min
bundled_maxs[bundled_role] = bundled_max
logging.debug('bundled_maxs are %s', bundled_maxs)
logging.debug('bundled_mins are %s', bundled_mins)
return bundled_maxs, bundled_mins
def _update_assigned_roles(lower_refs, to_key, bundle_mapping,
role_bundles, bundled_maxs, bundled_mins):
"""Update bundled maxs/mins and assign roles to each unassigned host."""
lower_roles = {}
unassigned_hosts = []
for lower_key, lower_ref in lower_refs.items():
roles_per_host = lower_ref.get(to_key, [])
roles = set()
bundled_roles = set()
for role in roles_per_host:
if role in bundle_mapping:
bundled_role = bundle_mapping[role]
bundled_roles.add(bundled_role)
roles |= set(role_bundles[bundled_role])
else:
roles.add(role)
for bundled_role in bundled_roles:
bundled_maxs[bundled_role] = _dec_max_min(
bundled_maxs[bundled_role])
bundled_mins[bundled_role] = _dec_max_min(
bundled_mins[bundled_role])
lower_roles[lower_key] = list(roles)
if not roles:
unassigned_hosts.append(lower_key)
logging.debug('assigned roles: %s', lower_roles)
logging.debug('unassigned_hosts: %s', unassigned_hosts)
logging.debug('bundled maxs for unassigned hosts: %s', bundled_maxs)
logging.debug('bundled mins for unassigned hosts: %s', bundled_mins)
return lower_roles, unassigned_hosts
def _update_exclusive_roles(bundled_exclusives, lower_roles,
unassigned_hosts, bundled_maxs,
bundled_mins, role_bundles):
"""Assign exclusive roles to hosts."""
for bundled_exclusive in bundled_exclusives:
while bundled_mins[bundled_exclusive] > 0:
if not unassigned_hosts:
raise ValueError('no enough unassigned hosts for exlusive %s',
bundled_exclusive)
host = unassigned_hosts.pop(0)
bundled_mins[bundled_exclusive] = _dec_max_min(
bundled_mins[bundled_exclusive])
bundled_maxs[bundled_exclusive] = _dec_max_min(
bundled_maxs[bundled_exclusive])
lower_roles[host] = list(role_bundles[bundled_exclusive])
del role_bundles[bundled_exclusive]
logging.debug('assigned roles after assigning exclusives: %s', lower_roles)
logging.debug('unassigned_hosts after assigning exclusives: %s',
unassigned_hosts)
logging.debug('bundled maxs after assigning exclusives: %s', bundled_maxs)
logging.debug('bundled mins after assigning exclusives: %s', bundled_mins)
def _assign_roles_by_mins(role_bundles, lower_roles, unassigned_hosts,
bundled_maxs, bundled_mins):
"""Assign roles to hosts by min restriction."""
available_hosts = copy.deepcopy(unassigned_hosts)
for bundled_role, roles in role_bundles.items():
while bundled_mins[bundled_role] > 0:
if not available_hosts:
raise ValueError('no enough available hosts to assign to %s',
bundled_role)
host = available_hosts.pop(0)
available_hosts.append(host)
if host in unassigned_hosts:
unassigned_hosts.remove(host)
bundled_mins[bundled_role] = _dec_max_min(
bundled_mins[bundled_role])
bundled_maxs[bundled_role] = _dec_max_min(
bundled_maxs[bundled_role])
if host not in lower_roles:
lower_roles[host] = list(roles)
elif set(lower_roles[host]) & roles:
duplicated_roles = set(lower_roles[host]) & roles
raise ValueError(
'duplicated roles %s on %s' % (duplicated_roles, host))
else:
lower_roles[host].extend(list(roles))
logging.debug('assigned roles after assigning mins: %s', lower_roles)
logging.debug('unassigned_hosts after assigning mins: %s',
unassigned_hosts)
logging.debug('bundled maxs after assigning mins: %s', bundled_maxs)
def _assign_roles_by_maxs(role_bundles, lower_roles, unassigned_hosts,
bundled_maxs):
"""Assign roles to host by max restriction."""
available_lists = []
default_roles_lists = []
for bundled_role in role_bundles.keys():
if bundled_maxs[bundled_role] > 0:
available_lists.append(
[bundled_role] * bundled_maxs[bundled_role])
elif bundled_maxs[bundled_role] < 0:
default_roles_lists.append(
[bundled_role] * (-bundled_maxs[bundled_role]))
available_list = util.flat_lists_with_possibility(available_lists)
for bundled_role in available_list:
if not unassigned_hosts:
break
host = unassigned_hosts.pop(0)
lower_roles[host] = list(role_bundles[bundled_role])
logging.debug('assigned roles after assigning max: %s', lower_roles)
logging.debug('unassigned_hosts after assigning maxs: %s',
unassigned_hosts)
default_roles = util.flat_lists_with_possibility(
default_roles_lists)
if default_roles:
default_iter = itertools.cycle(default_roles)
while unassigned_hosts:
host = unassigned_hosts.pop(0)
bundled_role = default_iter.next()
lower_roles[host] = list(role_bundles[bundled_role])
logging.debug('assigned roles are %s', lower_roles)
logging.debug('unassigned hosts: %s', unassigned_hosts)
def _sort_roles(lower_roles, roles):
"""Sort roles with the same order as in all roles."""
for lower_key, lower_value in lower_roles.items():
updated_roles = []
for role in roles:
if role in lower_value:
updated_roles.append(role)
for role in lower_value:
if role not in updated_roles:
logging.debug('found role %s not in roles %s', role, roles)
updated_roles.append(role)
lower_roles[lower_key] = updated_roles
logging.debug('sorted roles are %s', lower_roles)
def _update_dependencies(lower_roles, default_dependencies, dependencies):
"""update dependencies to lower roles."""
for lower_key, roles in lower_roles.items():
new_roles = []
for role in roles:
new_dependencies = dependencies.get(
role, dependencies.get('default', default_dependencies)
)
for new_dependency in new_dependencies:
if new_dependency not in new_roles:
new_roles.append(new_dependency)
if role not in new_roles:
new_roles.append(role)
lower_roles[lower_key] = new_roles
logging.debug(
'roles after adding dependencies %s default dependencies %s are: %s',
dependencies, default_dependencies, lower_roles)
def _update_post_roles(lower_roles, default_post_roles, post_roles):
"""update post roles to lower roles."""
for lower_key, roles in lower_roles.items():
new_roles = []
for role in reversed(roles):
new_post_roles = post_roles.get(
role, post_roles.get('default', default_post_roles)
)
for new_post_role in reversed(new_post_roles):
if new_post_role not in new_roles:
new_roles.append(new_post_role)
if role not in new_roles:
new_roles.append(role)
lower_roles[lower_key] = list(reversed(new_roles))
logging.debug(
'roles after adding post roles %s default %s are: %s',
post_roles, default_post_roles, lower_roles)
def assign_roles(_upper_ref, _from_key, lower_refs, to_key,
roles=[], maxs={}, mins={}, default_max=-1,
default_min=0, exclusives=[], bundles=[],
default_dependencies=[], dependencies={},
default_post_roles=[], post_roles={}, **_kwargs):
"""Assign roles to lower configs."""
logging.debug(
'assignRoles with roles=%s, maxs=%s, mins=%s, '
'default_max=%s, default_min=%s, exclusives=%s, bundles=%s'
'default_dependencies=%s, dependencies=%s'
'default_post_roles=%s, post_roles=%s',
roles, maxs, mins, default_max,
default_min, exclusives, bundles,
default_dependencies, dependencies,
default_post_roles, post_roles)
bundle_mapping, role_bundles = _get_role_bundle_mapping(roles, bundles)
bundled_exclusives = _get_bundled_exclusives(exclusives, bundle_mapping)
bundled_maxs, bundled_mins = _get_bundled_max_mins(
maxs, mins, default_max, default_min, role_bundles)
lower_roles, unassigned_hosts = _update_assigned_roles(
lower_refs, to_key, bundle_mapping, role_bundles,
bundled_maxs, bundled_mins)
if not unassigned_hosts:
logging.debug(
'there is not unassigned hosts, assigned roles by host is: %s',
lower_roles)
else:
_update_exclusive_roles(
bundled_exclusives, lower_roles, unassigned_hosts,
bundled_maxs, bundled_mins, role_bundles)
_assign_roles_by_mins(
role_bundles, lower_roles, unassigned_hosts,
bundled_maxs, bundled_mins)
_assign_roles_by_maxs(
role_bundles, lower_roles, unassigned_hosts,
bundled_maxs)
_sort_roles(lower_roles, roles)
_update_dependencies(lower_roles, default_dependencies, dependencies)
_update_post_roles(lower_roles, default_post_roles, post_roles)
return lower_roles
def assign_roles_by_host_numbers(upper_ref, from_key, lower_refs, to_key,
policy_by_host_numbers={}, default={},
**kwargs):
"""Assign roles by role assign policy."""
host_numbers = str(len(lower_refs))
policy_kwargs = copy.deepcopy(kwargs)
util.merge_dict(policy_kwargs, default)
if host_numbers in policy_by_host_numbers:
util.merge_dict(policy_kwargs, policy_by_host_numbers[host_numbers])
else:
logging.debug('didnot find policy %s by host numbers %s',
policy_by_host_numbers, host_numbers)
return assign_roles(upper_ref, from_key, lower_refs,
to_key, **policy_kwargs)
def has_intersection(upper_ref, from_key, _lower_refs, _to_key,
lower_values={}, **_kwargs):
"""Check if upper config has intersection with lower values."""
has = {}
for lower_key, lower_value in lower_values.items():
values = set(lower_value)
intersection = values.intersection(set(upper_ref.config))
logging.debug(
'lower_key %s values %s intersection'
'with from_key %s value %s: %s',
lower_key, values, from_key, upper_ref.config, intersection)
if intersection:
has[lower_key] = True
else:
has[lower_key] = False
return has
def get_intersection(upper_ref, from_key, _lower_refs, _to_key,
lower_values={}, **_kwargs):
"""Get intersection of upper config and lower values."""
intersections = {}
for lower_key, lower_value in lower_values.items():
values = set(lower_value)
intersection = values.intersection(set(upper_ref.config))
logging.debug(
'lower_key %s values %s intersection'
'with from_key %s value %s: %s',
lower_key, values, from_key, upper_ref.config, intersection)
if intersection:
intersections[lower_key] = list(intersection)
return intersections
def assign_ips(_upper_ref, _from_key, lower_refs, to_key,
ip_start='192.168.0.1', ip_end='192.168.0.254',
**_kwargs):
"""Assign ips to hosts' configurations."""
if not ip_start or not ip_end:
raise ValueError(
'ip_start %s or ip_end %s is empty' % (ip_start, ip_end))
if not re.match(r'^\d+\.\d+\.\d+\.\d+$', ip_start):
raise ValueError(
'ip_start %s formmat is not correct' % ip_start)
if not re.match(r'^\d+\.\d+\.\d+\.\d+$', ip_end):
raise ValueError(
'ip_end %s format is not correct' % ip_end)
host_ips = {}
unassigned_hosts = []
try:
ips = netaddr.IPSet(netaddr.IPRange(ip_start, ip_end))
except Exception:
raise ValueError(
'failed to create ip block [%s, %s]' % (ip_start, ip_end))
for lower_key, lower_ref in lower_refs.items():
ip_addr = lower_ref.get(to_key, '')
if ip_addr:
host_ips[lower_key] = ip_addr
ips.remove(ip_addr)
else:
unassigned_hosts.append(lower_key)
for ip_addr in ips:
if not unassigned_hosts:
break
host = unassigned_hosts.pop(0)
host_ips[host] = str(ip_addr)
if unassigned_hosts:
raise ValueError(
'there is no enough ips to assign to %s: [%s-%s]' % (
unassigned_hosts, ip_start, ip_end))
logging.debug('assign %s: %s', to_key, host_ips)
return host_ips
def generate_order(start=0, end=-1):
"""generate order num."""
while start < end or end < 0:
yield start
start += 1
def assign_by_order(_upper_ref, _from_key, lower_refs, _to_key,
prefix='',
orders=[], default_order=0, reverse=False,
conditions={}, **kwargs):
"""assign to_key by order."""
host_values = {}
orders = iter(orders)
lower_keys = lower_refs.keys()
if reverse:
lower_keys = reversed(lower_keys)
for lower_key in lower_keys:
if lower_key in conditions and conditions[lower_key]:
try:
order = orders.next()
except StopIteration:
order = default_order
host_values[lower_key] = prefix + type(prefix)(order)
logging.debug('assign orders: %s', host_values)
return host_values
def assign_from_pattern(_upper_ref, _from_key, lower_refs, to_key,
upper_keys=[], lower_keys=[], pattern='', **kwargs):
"""assign to_key by pattern."""
host_values = {}
upper_configs = {}
if set(upper_keys) & set(lower_keys):
raise KeyError(
'overlap between upper_keys %s and lower_keys %s' % (
upper_keys, lower_keys))
for key in upper_keys:
if key not in kwargs:
raise KeyError(
'param %s is missing' % key)
upper_configs[key] = kwargs[key]
for lower_key, _ in lower_refs.items():
group = copy.deepcopy(upper_configs)
for key in lower_keys:
if key not in kwargs:
raise KeyError('param %s is missing' % key)
if not isinstance(kwargs[key], dict):
raise KeyError(
'param %s type is %s while expected type is dict' % (
kwargs[key], type(kwargs[key])))
group[key] = kwargs[key][lower_key]
try:
host_values[lower_key] = pattern % group
except KeyError as error:
logging.error('failed to assign %s[%s] = %s %% %s',
lower_key, to_key, pattern, group)
raise error
return host_values
def assign_noproxy(_upper_ref, _from_key, lower_refs,
to_key, default=[], clusterid=None,
noproxy_pattern='',
hostnames={}, ips={}, **kwargs):
"""Assign no proxy to hosts."""
no_proxy_list = copy.deepcopy(default)
for _, value in kwargs.items():
if value:
no_proxy_list.append(value)
if not clusterid:
raise KeyError(
'clusterid %s is empty' % clusterid)
for lower_key, _ in lower_refs.items():
if lower_key not in hostnames:
raise KeyError(
'lower_key %s is not in hostnames %s' % (
lower_key, hostnames))
if lower_key not in ips:
raise KeyError(
'lower_key %s is not in ips %s' % (
lower_key, ips))
mapping = {
'clusterid': clusterid,
'hostname': hostnames[lower_key],
'ip': ips[lower_key]
}
try:
no_proxy_list.append(noproxy_pattern % mapping)
except KeyError as error:
logging.error('failed to assign %s[%s] = %s %% %s',
lower_key, to_key, noproxy_pattern, mapping)
raise error
no_proxy = ','.join([no_proxy for no_proxy in no_proxy_list if no_proxy])
host_no_proxy = {}
for lower_key, _ in lower_refs.items():
host_no_proxy[lower_key] = no_proxy
return host_no_proxy
def override_if_empty(_upper_ref, _ref_key, lower_ref, _to_key):
"""Override if the configuration value is empty."""
if not lower_ref.config:
return True
return False

View File

@ -1,343 +0,0 @@
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module to provide util class to access item in nested dict easily.
.. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
"""
import copy
import fnmatch
import os.path
import re
from compass.utils import util
def get_clean_config(config):
"""Get cleaned config from original config.
:param config: configuration to be cleaned.
:returns: clean configuration without key referring to None or empty dict.
"""
if config is None:
return None
if isinstance(config, dict):
extracted_config = {}
for key, value in config.items():
sub_config = get_clean_config(value)
if sub_config is not None:
extracted_config[key] = sub_config
if not extracted_config:
return None
return extracted_config
else:
return config
class ConfigReference(object):
"""Helper class to acess item in nested dict."""
def __init__(self, config, parent=None, parent_key=None):
"""Construct ConfigReference from configuration.
:param config: configuration to build the ConfigRerence instance.
:type config: dict
:param parent: parent ConfigReference instance.
:param parent_key: the key refers to the config in parent.
:type parent_key: str
:raises: TypeError
"""
if parent and not isinstance(parent, ConfigReference):
raise TypeError('parent %s type should be %s'
% (parent, ConfigReference))
if parent_key and not isinstance(parent_key, basestring):
raise TypeError('parent_key %s type should be [str, unicode]'
% parent_key)
self.config = config
self.refs_ = {'.': self}
self.parent_ = parent
self.parent_key_ = parent_key
if parent is not None:
self.refs_['..'] = parent
self.refs_['/'] = parent.refs_['/']
parent.refs_[parent_key] = self
if parent.config is None or not isinstance(parent.config, dict):
parent.__init__({}, parent=parent.parent_,
parent_key=parent.parent_key_)
parent.config[parent_key] = self.config
else:
self.refs_['..'] = self
self.refs_['/'] = self
if config and isinstance(config, dict):
for key, value in config.items():
if not isinstance(key, basestring):
msg = 'key type is %s while expected is [str, unicode]: %s'
raise TypeError(msg % (type(key), key))
ConfigReference(value, self, key)
def items(self, prefix=''):
"""Return key value pair of all nested items.
:param prefix: iterate key value pair under prefix.
:type prefix: str
:returns: list of (key, value)
"""
to_list = []
for key, ref in self.refs_.items():
if not self._special_path(key):
key_prefix = os.path.join(prefix, key)
to_list.append((key_prefix, ref.config))
to_list.extend(ref.items(key_prefix))
return to_list
def keys(self):
"""Return keys of :func:`ConfigReference.items`."""
return [key for key, _ in self.items()]
def values(self):
"""Return values of :func:`ConfigReference.items`."""
return [ref for _, ref in self.items()]
def __nonzero__(self):
return bool(self.config)
def __iter__(self):
return iter(self.keys())
def __len__(self):
return len(self.keys())
@classmethod
def _special_path(cls, path):
"""Check if path is special."""
return path in ['/', '.', '..']
def ref_items(self, path):
"""Return the refs matching the path glob.
:param path: glob pattern to match the path to the ref.
:type path: str
:returns: dict of key to :class:`ConfigReference` instance.
:raises: KeyError
"""
if not path:
raise KeyError('key %s is empty' % path)
parts = []
if isinstance(path, basestring):
parts = path.split('/')
else:
parts = path
if not parts[0]:
parts = parts[1:]
refs = [('/', self.refs_['/'])]
else:
refs = [('', self)]
for part in parts:
if not part:
continue
next_refs = []
for prefix, ref in refs:
if self._special_path(part):
sub_prefix = os.path.join(prefix, part)
next_refs.append((sub_prefix, ref.refs_[part]))
continue
for sub_key, sub_ref in ref.refs_.items():
if self._special_path(sub_key):
continue
matched = fnmatch.fnmatch(sub_key, part)
if not matched:
continue
sub_prefix = os.path.join(prefix, sub_key)
next_refs.append((sub_prefix, sub_ref))
refs = next_refs
return refs
def ref_keys(self, path):
"""Return keys of :func:`ConfigReference.ref_items`."""
return [key for key, _ in self.ref_items(path)]
def ref_values(self, path):
"""Return values of :func:`ConfigReference.ref_items`."""
return [ref for _, ref in self.ref_items(path)]
def ref(self, path, create_if_not_exist=False):
"""Get ref of the path.
:param path: str. The path to the ref.
:type path: str
:param create_if_not_exists: create ref if does not exist on path.
:type create_if_not_exist: bool
:returns: :class:`ConfigReference` instance to the path.
:raises: KeyError, TypeError
"""
if not path:
raise KeyError('key %s is empty' % path)
if '*' in path or '?' in path:
raise TypeError('key %s should not contain *')
parts = []
if isinstance(path, list):
parts = path
else:
parts = path.split('/')
if not parts[0]:
ref = self.refs_['/']
parts = parts[1:]
else:
ref = self
for part in parts:
if not part:
continue
if part in ref.refs_:
ref = ref.refs_[part]
elif create_if_not_exist:
ref = ConfigReference(None, ref, part)
else:
raise KeyError('key %s is not exist' % path)
return ref
def __repr__(self):
return '<ConfigReference: config=%r, refs[%s], parent=%s>' % (
self.config, self.refs_.keys(), self.parent_)
def __getitem__(self, path):
return self.ref(path).config
def __contains__(self, path):
try:
self.ref(path)
return True
except KeyError:
return False
def __setitem__(self, path, value):
ref = self.ref(path, True)
ref.__init__(value, ref.parent_, ref.parent_key_)
return ref.config
def __delitem__(self, path):
ref = self.ref(path)
if ref.parent_:
del ref.parent_.refs_[ref.parent_key_]
del ref.parent_.config[ref.parent_key_]
ref.__init__(None)
def update(self, config, override=True):
"""Update with config.
:param config: config to update.
:param override: if the instance config should be overrided
:type override: bool
"""
if (self.config is not None and
isinstance(self.config, dict) and
isinstance(config, dict)):
util.merge_dict(self.config, config, override)
elif self.config is None or override:
self.config = copy.deepcopy(config)
else:
return
self.__init__(self.config, self.parent_, self.parent_key_)
def get(self, path, default=None):
"""Get config of the path or default if does not exist.
:param path: path to the item
:type path: str
:param default: default value to return
:returns: item in path or default.
"""
try:
return self[path]
except KeyError:
return default
def setdefault(self, path, value=None):
"""Set default value to path.
:param path: path to the item.
:type path: str
:param value: the default value to set to the path.
:returns: the :class:`ConfigReference` to path
"""
ref = self.ref(path, True)
if ref.config is None:
ref.__init__(value, ref.parent_, ref.parent_key_)
return ref
def match(self, properties_match):
"""Check if config match the given properties."""
for property_name, property_value in properties_match.items():
config_value = self.get(property_name)
if config_value is None:
return False
if isinstance(config_value, list):
found = False
for config_value_item in config_value:
if re.match(property_value, str(config_value_item)):
found = True
if not found:
return False
else:
if not re.match(property_value, str(config_value)):
return False
return True
def filter(self, properties_name):
"""filter config by properties name."""
filtered_properties = {}
for property_name in properties_name:
config_value = self.get(property_name)
if config_value is None:
continue
filtered_properties[property_name] = config_value
return filtered_properties

View File

@ -1,329 +0,0 @@
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Config Translator module to translate orign config to dest config.
.. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
"""
import logging
from compass.config_management.utils import config_reference
class KeyTranslator(object):
"""Class to translate origin ref to dest ref."""
def __init__(self, translated_keys=[], from_keys={}, translated_value=None,
from_values={}, override=False, override_conditions={}):
"""Constructor
:param translated_keys: keys in dest ref to be translated to.
:type translated_keys: callable or list of (str or callable)
:param from_keys: extra kwargs parsed to translated key callback.
:type: from_keys: dict mapping name of kwargs to path in origin ref
:param translated_value: value or callback to get translated value.
:type translated_value: callback or any type
:param from_values: extra kwargs parsed to translated value callback.
:type from_vlaues: dictr mapping name of kwargs to path in origin ref.
:param override: if the translated value can be overridden.
:type override: callback or bool
:param override_conditions: extra kwargs parsed to override callback.
:type override_conditions: dict of kwargs name to origin ref path.
"""
self.translated_keys_ = translated_keys
self.from_keys_ = from_keys
self.translated_value_ = translated_value
self.from_values_ = from_values
self.override_ = override
self.override_conditions_ = override_conditions
self._is_valid()
def __repr__(self):
return (
'%s[translated_keys=%s,from_keys=%s,translated_value=%s,'
'from_values=%s,override=%s,override_conditions=%s]'
) % (
self.__class__.__name__, self.translated_keys_,
self.from_keys_, self.translated_value_, self.from_values_,
self.override_, self.override_conditions_
)
def _is_valid_translated_keys(self):
"""Check translated keys are valid."""
if callable(self.translated_keys_):
return
if not isinstance(self.translated_keys_, list):
raise TypeError(
'translated_keys %s type is %s while expected type is '
'list or callable' % (
self.translated_keys_, type(self.translated_keys_)))
for i, translated_key in enumerate(self.translated_keys_):
if isinstance(translated_key, basestring):
if '*' in translated_key:
raise KeyError(
'transalted_keys[%d] %s should not contain *' % (
i, translated_key))
elif not callable(translated_key):
raise TypeError(
'translated_keys[%d] type is %s while expected '
'types are str or callable: %s' % (
i, type(translated_key), translated_key))
def _is_valid_from_keys(self):
"""Check from keys are valid."""
if not isinstance(self.from_keys_, dict):
raise TypeError(
'from_keys %s type is %s while expected type is dict' % (
self.from_keys_, type(self.from_keys_)))
for mapping_key, from_key in self.from_keys_.items():
if not isinstance(mapping_key, basestring):
raise TypeError(
'from_keys key %s type is %s while '
'expected type is [str, unicode]' % (
mapping_key, type(mapping_key)))
if not isinstance(from_key, basestring):
raise TypeError(
'from_keys[%s] type is %s while '
'expected type is [str, unicode]: %s' % (
mapping_key, type(from_key), from_key))
if '*' in from_key:
raise KeyError(
'from_keys[%s] %s contains *' % (
mapping_key, from_key))
def _is_valid_from_values(self):
"""Check from values are valid."""
if not isinstance(self.from_values_, dict):
raise TypeError(
'from_values %s type is %s while expected type is dict' % (
self.from_values_, type(self.from_values_)))
for mapping_key, from_value in self.from_values_.items():
if not isinstance(mapping_key, basestring):
raise TypeError(
'from_values key %s type is %s while '
'expected type is [str, unicode]' % (
mapping_key, type(mapping_key)))
if not isinstance(from_value, basestring):
raise TypeError(
'from_values[%s] type is %s while '
'expected type is [str, unicode]: %s' % (
mapping_key, type(from_value), from_value))
if '*' in from_value:
raise KeyError(
'from_values[%s] %s contains *' % (
mapping_key, from_value))
def _is_valid_override_conditions(self):
"""Check override conditions are valid."""
if not isinstance(self.override_conditions_, dict):
raise TypeError(
'override_conditions %s type is %s '
'while expected type is dict' % (
self.override_conditions_,
type(self.override_conditions_)))
override_items = self.override_conditions_.items()
for mapping_key, override_condition in override_items:
if not isinstance(mapping_key, basestring):
raise TypeError(
'override_conditions key %s type is %s while '
'expected type is [str, unicode]' % (
mapping_key, type(mapping_key)))
if not isinstance(override_condition, basestring):
raise TypeError(
'override_conditions[%s] type is %s '
'while expected type is [str, unicode]: %s' % (
mapping_key, type(override_condition),
override_condition))
if '*' in override_condition:
raise KeyError(
'override_conditions[%s] %s contains *' % (
mapping_key, override_condition))
def _is_valid(self):
"""Check key translator is valid."""
self._is_valid_translated_keys()
self._is_valid_from_keys()
self._is_valid_from_values()
self._is_valid_override_conditions()
def _get_translated_keys(self, ref_key, sub_ref):
"""Get translated keys."""
key_configs = {}
for mapping_key, from_key in self.from_keys_.items():
if from_key in sub_ref:
key_configs[mapping_key] = sub_ref[from_key]
else:
logging.error('%s from_key %s missing in %s',
self, from_key, sub_ref)
if callable(self.translated_keys_):
translated_keys = self.translated_keys_(
sub_ref, ref_key, **key_configs)
return translated_keys
translated_keys = []
for translated_key in self.translated_keys_:
if callable(translated_key):
translated_key = translated_key(
sub_ref, ref_key, **key_configs)
if not translated_key:
logging.debug('%s ignore empty translated key', self)
continue
if not isinstance(translated_key, basestring):
logging.error(
'%s translated key %s should be [str, unicode]',
self, translated_key)
continue
translated_keys.append(translated_key)
return translated_keys
def _get_translated_value(self, ref_key, sub_ref,
translated_key, translated_sub_ref):
"""Get translated value."""
if self.translated_value_ is None:
return sub_ref.config
elif not callable(self.translated_value_):
return self.translated_value_
value_configs = {}
for mapping_key, from_value in self.from_values_.items():
if from_value in sub_ref:
value_configs[mapping_key] = sub_ref[from_value]
else:
logging.info('%s ignore from value %s for key %s',
self, from_value, ref_key)
return self.translated_value_(
sub_ref, ref_key, translated_sub_ref,
translated_key, **value_configs)
def _get_override(self, ref_key, sub_ref,
translated_key, translated_sub_ref):
"""Get override."""
if not callable(self.override_):
return bool(self.override_)
override_condition_configs = {}
override_items = self.override_conditions_.items()
for mapping_key, override_condition in override_items:
if override_condition in sub_ref:
override_condition_configs[mapping_key] = (
sub_ref[override_condition])
else:
logging.error('%s no override condition %s in %s',
self, override_condition, ref_key)
return self.override_(sub_ref, ref_key,
translated_sub_ref,
translated_key,
**override_condition_configs)
def translate(self, ref, key, translated_ref):
"""Translate content in ref[key] to translated_ref."""
logging.debug('translate %s', key)
for ref_key, sub_ref in ref.ref_items(key):
translated_keys = self._get_translated_keys(ref_key, sub_ref)
for translated_key in translated_keys:
translated_sub_ref = translated_ref.setdefault(
translated_key)
translated_value = self._get_translated_value(
ref_key, sub_ref, translated_key, translated_sub_ref)
if translated_value is None:
logging.debug(
'translated key %s will be ignored '
'since translated value is None', translated_key)
continue
override = self._get_override(
ref_key, sub_ref, translated_key, translated_sub_ref)
logging.debug('%s translate to %s value %s', ref_key,
translated_key, translated_value)
translated_sub_ref.update(translated_value, override)
class ConfigTranslator(object):
"""Class to translate origin config to expected dest config."""
def __init__(self, mapping):
"""Constructor
:param mapping: dict of config path to :class:`KeyTranslator` instance
"""
self.mapping_ = mapping
self._is_valid()
def __repr__(self):
return '%s[mapping=%s]' % (self.__class__.__name__, self.mapping_)
def _is_valid(self):
"""Check if ConfigTranslator is valid."""
if not isinstance(self.mapping_, dict):
raise TypeError(
'mapping type is %s while expected type is dict: %s' % (
type(self.mapping_), self.mapping_))
for key, values in self.mapping_.items():
if not isinstance(key, basestring):
raise TypeError(
'mapping key %s type is %s while expected '
'is str or unicode' % (key, type(key)))
if not isinstance(values, list):
raise TypeError(
'mapping[%s] type is %s '
'while expected type is list: %s' % (
key, type(values), values))
for i, value in enumerate(values):
if not isinstance(value, KeyTranslator):
raise TypeError(
'mapping[%s][%d] type is %s '
'while expected type is KeyTranslator: %s' % (
key, i, type(value), value))
def translate(self, config):
"""Translate config.
:param config: configuration to translate.
:returns: the translated configuration.
"""
ref = config_reference.ConfigReference(config)
translated_ref = config_reference.ConfigReference({})
for key, values in self.mapping_.items():
for value in values:
value.translate(ref, key, translated_ref)
translated_config = config_reference.get_clean_config(
translated_ref.config)
logging.debug('translate config\n%s\nto\n%s',
config, translated_config)
return translated_config

View File

@ -1,245 +0,0 @@
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""callback lib for config translator callbacks."""
import crypt
import logging
import re
from compass.utils import util
def get_key_from_pattern(
_ref, path, from_pattern='.*',
to_pattern='', **kwargs
):
"""Get translated key from pattern."""
match = re.match(from_pattern, path)
if not match:
return None
group = match.groupdict()
util.merge_dict(group, kwargs)
try:
translated_key = to_pattern % group
except KeyError as error:
logging.error('failed to get translated key from %s %% %s',
to_pattern, group)
raise error
logging.debug('got translated key %s for %s', translated_key, path)
return translated_key
def get_keys_from_config_mapping(ref, _path, **kwargs):
"""get translated keys from config."""
config = ref.config
translated_keys = config.keys()
logging.debug('got translated_keys %s from config mapping %s',
translated_keys, config)
return translated_keys
def get_keys_from_role_mapping(ref, _path, mapping={}, **_kwargs):
"""get translated keys from roles."""
roles = ref.config
translated_keys = []
for role in roles:
if role not in mapping:
continue
translated_keys.extend(mapping[role].keys())
logging.debug('got translated_keys %s from roles %s and mapping %s',
translated_keys, roles, mapping)
return translated_keys
def get_value_from_config_mapping(
ref, _path, _translated_ref, translated_path, **kwargs
):
"""get translated_value from config and translated_path."""
config = ref.config
if translated_path not in config:
return None
value = config[translated_path]
if isinstance(value, basestring):
translated_value = ref.get(value)
logging.debug('got translated_value %s from %s',
translated_value, value)
elif isinstance(value, list):
for value_in_list in value:
translated_value = ref.get(value_in_list)
logging.debug('got translated_value %s from %s',
translated_value, value_in_list)
if translated_value is not None:
break
else:
logging.error('unexpected type %s: %s',
type(value), value)
translated_value = None
logging.debug('got translated_value %s from translated_path %s',
translated_value, translated_path)
return translated_value
def get_value_from_role_mapping(
ref, _path, _translated_ref, translated_path,
mapping={}, **_kwargs
):
"""get translated value from roles and translated_path."""
roles = ref.config
for role in roles:
if role not in mapping:
continue
if translated_path not in mapping[role]:
continue
value = mapping[role][translated_path]
if isinstance(value, basestring):
translated_value = ref.get(value)
logging.debug('got translated_value %s from %s',
translated_value, value)
elif isinstance(value, list):
for value_in_list in value:
translated_value = ref.get(value_in_list)
logging.debug('got translated_value %s from %s',
translated_value, value_in_list)
if translated_value is not None:
break
else:
logging.error('unexpected type %s: %s',
type(value), value)
translated_value = None
logging.debug('got translated_value %s from roles %s '
'and translated_path %s',
translated_value, roles, translated_path)
return translated_value
return None
def get_encrypted_value(ref, _path, _translated_ref, _translated_path,
crypt_method=None, **_kwargs):
"""Get encrypted value."""
if not crypt_method:
if hasattr(crypt, 'METHOD_MD5'):
crypt_method = crypt.METHOD_MD5
else:
# for python2.7, copy python2.6 METHOD_MD5 logic here.
from random import choice
import string
_saltchars = string.ascii_letters + string.digits + './'
def _mksalt():
"""generate salt."""
salt = '$1$'
salt += ''.join(choice(_saltchars) for _ in range(8))
return salt
crypt_method = _mksalt()
return crypt.crypt(ref.config, crypt_method)
def set_value(ref, _path, _translated_ref,
_translated_path,
return_value_callback=None, **kwargs):
"""Set value into translated config."""
condition = True
for _, arg in kwargs.items():
if not arg:
condition = False
if condition:
translated_value = ref.config
else:
translated_value = None
if not return_value_callback:
return translated_value
else:
return return_value_callback(translated_value)
def add_value(ref, _path, translated_ref,
translated_path,
get_value_callback=None,
check_value_callback=None,
add_value_callback=None,
return_value_callback=None, **kwargs):
"""Append value into translated config."""
if not translated_ref.config:
value_list = []
else:
if not get_value_callback:
value_list = translated_ref.config
else:
value_list = get_value_callback(translated_ref.config)
logging.debug('%s value list is %s', translated_path, value_list)
if not isinstance(value_list, list):
raise TypeError(
'%s value %s type %s but expected type is list' % (
translated_path, value_list, type(value_list)))
condition = True
for _, arg in kwargs.items():
if not arg:
condition = False
logging.debug('%s add_value condition is %s', translated_path, condition)
if condition:
if not check_value_callback:
value_in_list = ref.config in value_list
else:
value_in_list = check_value_callback(ref.config, value_list)
if value_in_list:
logging.debug('%s found value %s in %s',
translated_path, value_list, value_in_list)
if not value_in_list:
if not add_value_callback:
value_list.append(ref.config)
else:
add_value_callback(ref.config, value_list)
logging.debug('%s value %s after added', translated_path, value_list)
if not return_value_callback:
return value_list
else:
return return_value_callback(value_list)
def override_if_any(_ref, _path, _translated_ref, _translated_path, **kwargs):
"""override if any kwargs is True."""
return any(kwargs.values())
def override_if_all(_ref, _path, _translated_ref, _translated_path, **kwargs):
"""override if all kwargs are True."""
return all(kwargs.values())
def override_path_has(_ref, path, _translated_ref, _translated_path,
should_exist='', **_kwargs):
"""override if expect part exists in path."""
return should_exist in path.split('/')

View File

@ -11,34 +11,3 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common database query."""
from compass.db.models import BASE
def model_query(session, model, *args, **kwargs):
if not issubclass(model, BASE):
raise Exception("model should be sublass of BASE!")
with session.begin(subtransactions=True):
query = session.query(model)
return query
def model_filter(query, model, filters, legal_keys):
for key in filters:
if key not in legal_keys:
continue
value = filters[key]
col_attr = getattr(model, key)
if isinstance(value, list):
query = query.filter(col_attr.in_(value))
else:
query = query.filter(col_attr == value)
return query

View File

@ -12,147 +12,227 @@
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adapter database operations."""
"""Adapter related database operations."""
import logging
import re
from compass.db import api
from compass.db.api import database
from compass.db.api.utils import wrap_to_dict
from compass.db.exception import RecordNotExists
from compass.db.models import Adapter
from compass.db.models import OSConfigMetadata
# from compass.db.models import PackageConfigMetadata
from compass.db.api import utils
from compass.db import exception
from compass.db import models
from compass.utils import setting_wrapper as setting
from compass.utils import util
SUPPORTED_FILTERS = ['name']
ERROR_MSG = {
'findNoAdapter': 'Cannot find the Adapter, ID is %d',
'findNoOs': 'Cannot find OS, ID is %d'
}
def _copy_adapters_from_parent(session, model, parent, system_name):
for child in parent.children:
if not child.adapters:
for adapter in parent.adapters:
if adapter.children:
continue
utils.add_db_object(
session, model,
True,
'%s(%s)' % (child.name, adapter.installer_name),
system_name=child, parent=adapter
)
_copy_adapters_from_parent(session, model, child, system_name)
@wrap_to_dict()
def get_adapter(adapter_id, return_roles=False):
with database.session() as session:
adapter = _get_adapter(session, adapter_id)
info = adapter.to_dict()
if return_roles:
roles = adapter.roles
info = [role.name for role in roles]
return info
def _complement_os_adapters(session):
with session.begin(subtransactions=True):
root_oses = utils.list_db_objects(
session, models.OperatingSystem,
parent_id=None
)
for root_os in root_oses:
_copy_adapters_from_parent(
session, models.OSAdapter, root_os, 'os'
)
@wrap_to_dict()
def get_adapter_config_schema(adapter_id, os_id):
def _complement_distributed_system_adapters(session):
with session.begin(subtransactions=True):
root_dses = utils.list_db_objects(
session, models.DistributedSystem,
parent_id=None
)
for root_ds in root_dses:
_copy_adapters_from_parent(
session, models.PackageAdapter, root_ds, 'distributed_system'
)
with database.session() as session:
adapter = _get_adapter(session, adapter_id)
os_list = []
if not os_id:
os_list = [os.id for os in adapter.support_os]
def _add_system(session, model, configs):
parents = {}
for config in configs:
object = utils.add_db_object(
session, model,
True, config['NAME'],
deployable=config.get('DEPLOYABLE', False)
)
parents[config['NAME']] = (
object, config.get('PARENT', None)
)
for name, (object, parent_name) in parents.items():
if parent_name:
parent, _ = parents[parent_name]
else:
os_list = [os_id]
schema = _get_adapter_config_schema(session, adapter_id, os_list)
return schema
parent = None
utils.update_db_object(session, object, parent=parent)
@wrap_to_dict()
def list_adapters(filters=None):
"""List all users, optionally filtered by some fields."""
with database.session() as session:
adapters = _list_adapters(session, filters)
adapters_list = [adapter.to_dict() for adapter in adapters]
return adapters_list
def _get_adapter(session, adapter_id):
"""Get the adapter by ID."""
def add_oses_internal(session):
configs = util.load_configs(setting.OS_DIR)
with session.begin(subtransactions=True):
adapter = api.model_query(session, Adapter).first()
if not adapter:
err_msg = ERROR_MSG['findNoAdapter'] % adapter_id
raise RecordNotExists(err_msg)
return adapter
_add_system(session, models.OperatingSystem, configs)
def _list_adapters(session, filters=None):
"""Get all adapters, optionally filtered by some fields."""
filters = filters or {}
def add_distributed_systems_internal(session):
configs = util.load_configs(setting.DISTRIBUTED_SYSTEM_DIR)
with session.begin(subtransactions=True):
query = api.model_query(session, Adapter)
adapters = api.model_filter(query, Adapter,
filters, SUPPORTED_FILTERS).all()
return adapters
_add_system(session, models.DistributedSystem, configs)
#TODO(Grace): TMP method
def _get_adapter_config_schema(session, adapter_id, os_list):
output_dict = {}
def add_os_adapters_internal(session):
parents = {}
configs = util.load_configs(setting.OS_ADAPTER_DIR)
with session.begin(subtransactions=True):
os_root = session.query(OSConfigMetadata).filter_by(name="os_config")\
.first()
# pk_root = session.query(PackageConfigMetadata\
# .filter_by(name="os_config").first()
for config in configs:
if 'OS' in config:
os = utils.get_db_object(
session, models.OperatingSystem,
name=config['OS']
)
else:
os = None
if 'INSTALLER' in config:
installer = utils.get_db_object(
session, models.OSInstaller,
name=config['INSTALLER']
)
else:
installer = None
object = utils.add_db_object(
session, models.OSAdapter,
True, config['NAME'], os=os, installer=installer
)
parents[config['NAME']] = (object, config.get('PARENT', None))
for name, (object, parent_name) in parents.items():
if parent_name:
parent, _ = parents[parent_name]
else:
parent = None
utils.update_db_object(
session, object, parent=parent
)
os_config_list = []
for os_id in os_list:
os_config_dict = {"_name": "os_config"}
output_dict = {}
output_dict["os_config"] = os_config_dict
_get_adapter_config_helper(os_root, os_config_dict,
output_dict, "os_id", os_id)
result = {"os_id": os_id}
result.update(output_dict)
os_config_list.append(result)
"""
package_config_dict = {"_name": "package_config"}
output_dict = {}
output_dict["package_config"] = package_config_dict
_get_adapter_config_internal(pk_root, package_config_dict,
output_dict, "adapter_id", adapter_id)
"""
output_dict = {}
output_dict["os_config"] = os_config_list
return output_dict
_complement_os_adapters(session)
# A recursive function
# This assumes that only leaf nodes have field entry and that
# an intermediate node in config_metadata table does not have field entries
def _get_adapter_config_helper(node, current_dict, parent_dict,
id_name, id_value):
children = node.children
def add_package_adapters_internal(session):
parents = {}
configs = util.load_configs(setting.PACKAGE_ADAPTER_DIR)
with session.begin(subtransactions=True):
for config in configs:
if 'DISTRIBUTED_SYSTEM' in config:
distributed_system = utils.get_db_object(
session, models.DistributedSystem,
name=config['DISTRIBUTED_SYSTEM']
)
else:
distributed_system = None
if 'INSTALLER' in config:
installer = utils.get_db_object(
session, models.PackageInstaller,
name=config['INSTALLER']
)
else:
installer = None
object = utils.add_db_object(
session, models.PackageAdapter,
True,
config['NAME'],
distributed_system=distributed_system,
installer=installer,
supported_os_patterns=config.get('SUPPORTED_OS_PATTERNS', [])
)
parents[config['NAME']] = (object, config.get('PARENT', None))
for name, (object, parent_name) in parents.items():
if parent_name:
parent, _ = parents[parent_name]
else:
parent = None
utils.update_db_object(session, object, parent=parent)
if children:
for c in children:
col_value = getattr(c, id_name)
if col_value is None or col_value == id_value:
child_dict = {"_name": c.name}
current_dict[c.name] = child_dict
_get_adapter_config_helper(c, child_dict, current_dict,
id_name, id_value)
del current_dict["_name"]
else:
fields = node.fields
fields_dict = {}
_complement_distributed_system_adapters(session)
for field in fields:
info = field.to_dict()
name = info['field']
del info['field']
fields_dict[name] = info
parent_dict[current_dict["_name"]] = fields_dict
def add_roles_internal(session):
configs = util.load_configs(setting.PACKAGE_ROLE_DIR)
with session.begin(subtransactions=True):
for config in configs:
package_adapter = utils.get_db_object(
session, models.PackageAdapter,
name=config['ADAPTER_NAME']
)
for role_dict in config['ROLES']:
utils.add_db_object(
session, models.PackageAdapterRole,
True, role_dict['role'], package_adapter.id,
description=role_dict['description'],
optional=role_dict.get('optional', False)
)
def add_adapters_internal(session):
with session.begin(subtransactions=True):
package_adapters = [
package_adapter
for package_adapter in utils.list_db_objects(
session, models.PackageAdapter
)
if package_adapter.deployable
]
os_adapters = [
os_adapter
for os_adapter in utils.list_db_objects(
session, models.OSAdapter
)
if os_adapter.deployable
]
adapters = []
for os_adapter in os_adapters:
adapters.append(utils.add_db_object(
session, models.Adapter, True,
os_adapter.id, None
))
for package_adapter in package_adapters:
adapters.append(utils.add_db_object(
session, models.Adapter, True,
None, package_adapter.id
))
for os_adapter in os_adapters:
for os_pattern in (
package_adapter.adapter_supported_os_patterns
):
if re.match(os_pattern, os_adapter.name):
adapters.append(utils.add_db_object(
session, models.Adapter, True,
os_adapter.id, package_adapter.id
))
break
return adapters
def get_adapters_internal(session):
adapter_mapping = {}
with session.begin(subtransactions=True):
adapters = utils.list_db_objects(
session, models.Adapter
)
for adapter in adapters:
adapter_dict = adapter.to_dict()
adapter_mapping[adapter.id] = adapter_dict
return adapter_mapping

View File

@ -0,0 +1,121 @@
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adapter related object holder."""
from compass.db.api import adapter as adapter_api
from compass.db.api import database
from compass.db.api import permission
from compass.db.api import user as user_api
from compass.db.api import utils
from compass.db import exception
SUPPORTED_FIELDS = [
'name', 'os', 'distributed_system', 'os_installer', 'package_installer'
]
OS_FIELD_MAPPING = {
'os': 'os_name',
'os_installer': 'installer_type'
}
PACKAGE_FIELD_MAPPING = {
'distributed_system': 'distributed_system_name',
'package_installer': 'installer_type'
}
def load_adapters():
with database.session() as session:
return adapter_api.get_adapters_internal(session)
ADAPTER_MAPPING = load_adapters()
def _filter_adapters(adapter_config, filter_name, filter_value):
if filter_name not in adapter_config:
return False
if isinstance(filter_value, list):
return bool(
adapter_config[filter_name] in filter_value
)
elif isinstance(filter_value, dict):
return all([
_filter_adapters(
adapter_config[filter_name],
sub_filter_key, sub_filter_value
)
for sub_filter_key, sub_filter_value in filter_value.items()
])
else:
return adapter_config[filter_name] == filter_value
@utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS)
def list_adapters(lister, **filters):
"""list adapters."""
translated_filters = {}
for filter_name, filter_value in filters:
if filter_name in OS_FIELD_MAPPING:
translated_filters.setdefault('os_adapter', {})[
OS_FIELD_MAPPING[filter_name]
] = filter_value
elif filter_name in PACKAGE_FIELD_MAPPING:
translated_filters.setdefault('package-adapter', {})[
PACKAGE_FIELD_MAPPING[filter_name]
] = filter_value
else:
translated_filters[filter_name] = filter_value
with database.session() as session:
user_api.check_user_permission_internal(
session, lister, permission.PERMISSION_LIST_ADAPTERS)
filtered_adapter_dicts = []
adapter_dicts = ADAPTER_MAPPING.values()
for adapter_dict in adapter_dicts:
if all([
_filter_adapters(adapter_dict, filter_name, filter_value)
for filter_name, filter_value in translated_filters.items()
]):
filtered_adapter_dicts.append(adapter_dict)
return filtered_adapter_dicts
@utils.supported_filters([])
def get_adapter(getter, adapter_id, **kwargs):
"""get adapter."""
with database.session() as session:
user_api.check_user_permission_internal(
session, getter, permission.PERMISSION_LIST_ADAPTERS)
if adapter_id not in ADAPTER_MAPPING:
raise exception.RecordNotExists(
'adpater %s does not exist' % adapter_id
)
return ADAPTER_MAPPING[adapter_id]
@utils.supported_filters([])
def get_adapter_roles(getter, adapter_id, **kwargs):
"""get adapter roles."""
with database.session() as session:
user_api.check_user_permission_internal(
session, getter, permission.PERMISSION_LIST_ADAPTERS)
if adapter_id not in ADAPTER_MAPPING:
raise exception.RecordNotExists(
'adpater %s does not exist' % adapter_id
)
adapter_dict = ADAPTER_MAPPING[adapter_id]
if 'package_adapter' not in adapter_dict:
raise exception.RecordNotExists(
'adapter %s does not contain package_adapter' % adapter_id
)
return ADAPTER_MAPPING[adapter_id]['package_adapter']['roles']

View File

@ -13,136 +13,785 @@
# limitations under the License.
"""Cluster database operations."""
import logging
import simplejson as json
from compass.db import api
from compass.db.api import database
from compass.db.api.utils import merge_dict
from compass.db.api.utils import wrap_to_dict
from compass.db.exception import InvalidParameter
from compass.db.exception import RecordNotExists
from compass.db.config_validation import default_validator
# from compass.db.config_validation import extension
from compass.db.models import Cluster
from compass.db.api import metadata_holder as metadata_api
from compass.db.api import permission
from compass.db.api import user as user_api
from compass.db.api import utils
from compass.db import exception
from compass.db import models
from compass.utils import util
SUPPORTED_FILTERS = ['name', 'adapter', 'owner']
ERROR_MSG = {
'findNoCluster': 'Cannot find the Cluster, ID is %d',
}
SUPPORTED_FIELDS = [
'name', 'os_name', 'distributed_system_name', 'owner', 'adapter_id'
]
SUPPORTED_CLUSTERHOST_FIELDS = []
RESP_FIELDS = [
'id', 'name', 'os_name', 'reinstall_distributed_system',
'distributed_system_name', 'distributed_system_installed',
'owner', 'adapter_id',
'created_at', 'updated_at'
]
RESP_CLUSTERHOST_FIELDS = [
'id', 'host_id', 'machine_id', 'name', 'cluster_id',
'mac', 'os_installed', 'distributed_system_installed',
'os_name', 'distributed_system_name',
'reinstall_os', 'reinstall_distributed_system',
'owner', 'cluster_id',
'created_at', 'updated_at'
]
RESP_CONFIG_FIELDS = [
'os_config',
'package_config',
'config_step',
'config_validated',
'created_at',
'updated_at'
]
RESP_CLUSTERHOST_CONFIG_FIELDS = [
'package_config',
'config_step',
'config_validated',
'created_at',
'updated_at'
]
RESP_STATE_FIELDS = [
'id', 'state', 'progress', 'message',
'created_at', 'updated_at'
]
RESP_CLUSTERHOST_STATE_FIELDS = [
'id', 'state', 'progress', 'message',
'created_at', 'updated_at'
]
RESP_REVIEW_FIELDS = [
'cluster', 'hosts'
]
RESP_ACTION_FIELDS = [
'status', 'details'
]
ADDED_FIELDS = ['name', 'adapter_id']
UPDATED_FIELDS = ['name', 'reinstall_distributed_system']
ADDED_CLUSTERHOST_FIELDS = ['machine_id']
UPDATED_CLUSTERHOST_FIELDS = ['name', 'reinstall_os']
UPDATED_HOST_FIELDS = ['name', 'reinstall_os']
UPDATED_CONFIG_FIELDS = [
'put_os_config', 'put_package_config', 'config_step'
]
PATCHED_CONFIG_FIELDS = [
'patched_os_config', 'patched_package_config', 'config_step'
]
UPDATED_CLUSTERHOST_CONFIG_FIELDS = [
'put_package_config'
]
PATCHED_CLUSTERHOST_CONFIG_FIELDS = [
'patched_package_config'
]
UPDATED_CLUSTERHOST_STATE_FIELDS = [
'state', 'progress', 'message'
]
@wrap_to_dict()
def get_cluster(cluster_id):
@utils.wrap_to_dict(RESP_FIELDS)
@utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS)
def list_clusters(lister, **filters):
"""List clusters."""
with database.session() as session:
cluster = _get_cluster(session, cluster_id)
info = cluster.to_dict()
return info
user_api.check_user_permission_internal(
session, lister, permission.PERMISSION_LIST_CLUSTERS)
return [
cluster.to_dict()
for cluster in utils.list_db_objects(
session, models.Cluster, **filters
)
]
@wrap_to_dict()
def list_clusters(filters=None):
"""List all users, optionally filtered by some fields."""
filters = filters or {}
@utils.wrap_to_dict(RESP_FIELDS)
@utils.supported_filters([])
def get_cluster(getter, cluster_id, **kwargs):
"""Get cluster info."""
with database.session() as session:
clusters = _list_clusters(session, filters)
clusters_info = [cluster.to_dict() for cluster in clusters]
return clusters_info
user_api.check_user_permission_internal(
session, getter, permission.PERMISSION_LIST_CLUSTERS)
return utils.get_db_object(
session, models.Cluster, id=cluster_id
).to_dict()
@wrap_to_dict()
def get_cluster_config(cluster_id):
"""Get configuration info for a specified cluster."""
with database.session() as session:
config = _get_cluster_config(session, cluster_id)
return config
def _conditional_exception(cluster, exception_when_not_editable):
if exception_when_not_editable:
raise exception.Forbidden(
'cluster %s is not editable' % cluster.name
)
else:
return False
def _get_cluster_config(session, cluster_id):
def is_cluster_editable(
session, cluster, user,
reinstall_distributed_system_set=False,
exception_when_not_editable=True
):
with session.begin(subtransactions=True):
cluster = _get_cluster(cluster_id)
config = cluster.config
return config
if reinstall_distributed_system_set:
if cluster.state.state == 'INSTALLING':
return _conditional_exception(
cluster, exception_when_not_editable
)
elif not cluster.reinstall_distributed_system:
return _conditional_exception(
cluster, exception_when_not_editable
)
if not user.is_admin and cluster.creator_id != user.id:
return _conditional_exception(
cluster, exception_when_not_editable
)
return True
def _get_cluster(session, cluster_id):
"""Get the adapter by ID."""
with session.begin(subtransactions=True):
cluster = session.query(Cluster).filter_by(id=cluster_id).first()
if not cluster:
err_msg = ERROR_MSG['findNoCluster'] % cluster_id
raise RecordNotExists(err_msg)
return cluster
def _list_clusters(session, filters=None):
"""Get all clusters, optionally filtered by some fields."""
filters = filters or {}
with session.begin(subtransactions=True):
query = api.model_query(session, Cluster)
clusters = api.model_filter(query, Cluster,
filters, SUPPORTED_FILTERS).all()
return clusters
def update_cluster_config(cluster_id, root_elem, config, patch=True):
result = None
if root_elem not in ["os_config", "package_config"]:
raise InvalidParameter("Invalid parameter %s" % root_elem)
@utils.wrap_to_dict(RESP_FIELDS)
@utils.supported_filters(ADDED_FIELDS)
def add_cluster(creator, name, adapter_id, **kwargs):
"""Create a cluster."""
with database.session() as session:
cluster = _get_cluster(session, cluster_id)
user_api.check_user_permission_internal(
session, creator, permission.PERMISSION_ADD_CLUSTER)
cluster = utils.add_db_object(
session, models.Cluster, True,
name, adapter_id=adapter_id, creator_id=creator.id, **kwargs
)
cluster_dict = cluster.to_dict()
return cluster_dict
id_name = None
id_value = None
if root_elem == "os_config":
id_name = "os_id"
id_value = getattr(cluster, "os_id")
@utils.wrap_to_dict(RESP_FIELDS)
@utils.supported_filters(optional_support_keys=UPDATED_FIELDS)
def update_cluster(updater, cluster_id, **kwargs):
"""Update a cluster."""
with database.session() as session:
user_api.check_user_permission_internal(
session, updater, permission.PERMISSION_ADD_CLUSTER)
cluster = utils.get_db_object(
session, models.Cluster, id=cluster_id
)
is_cluster_editable(
session, cluster, updater,
reinstall_distributed_system_set=(
kwargs.get('reinstall_distributed_system', False)
)
)
utils.update_db_object(session, cluster, **kwargs)
return cluster.to_dict()
@utils.wrap_to_dict(RESP_FIELDS)
@utils.supported_filters([])
def del_cluster(deleter, cluster_id, **kwargs):
"""Delete a cluster."""
with database.session() as session:
user_api.check_user_permission_internal(
session, deleter, permission.PERMISSION_DEL_CLUSTER)
cluster = utils.get_db_object(
session, models.Cluster, id=cluster_id
)
is_cluster_editable(session, cluster, deleter)
utils.del_db_object(session, cluster)
return cluster.to_dict()
@utils.wrap_to_dict(RESP_CONFIG_FIELDS)
@utils.supported_filters([])
def get_cluster_config(getter, cluster_id, **kwargs):
"""Get cluster config."""
with database.session() as session:
user_api.check_user_permission_internal(
session, getter, permission.PERMISSION_LIST_CLUSTER_CONFIG)
return utils.get_db_object(
session, models.Cluster, id=cluster_id
).to_dict()
def update_cluster_config_internal(session, updater, cluster, **kwargs):
"""Update a cluster config."""
with session.begin(subtransactions=True):
is_cluster_editable(session, cluster, updater)
utils.update_db_object(
session, cluster, config_validated=False, **kwargs
)
os_config = cluster.os_config
if os_config:
metadata_api.validate_os_config(
os_config, cluster.adapter_id
)
package_config = cluster.package_config
if package_config:
metadata_api.validate_package_config(
package_config, cluster.adapter_id
)
@utils.wrap_to_dict(RESP_CONFIG_FIELDS)
@utils.supported_filters(optional_support_keys=UPDATED_CONFIG_FIELDS)
def update_cluster_config(updater, cluster_id, **kwargs):
"""Update cluster config."""
with database.session() as session:
user_api.check_user_permission_internal(
session, updater, permission.PERMISSION_ADD_CLUSTER_CONFIG)
cluster = utils.get_db_object(
session, models.Cluster, id=cluster_id
)
update_cluster_config_internal(
session, updater, cluster, **kwargs
)
return cluster.to_dict()
@utils.wrap_to_dict(RESP_CONFIG_FIELDS)
@utils.supported_filters(optional_support_keys=PATCHED_CONFIG_FIELDS)
def patch_cluster_config(updater, cluster_id, **kwargs):
"""patch cluster config."""
with database.session() as session:
user_api.check_user_permission_internal(
session, updater, permission.PERMISSION_ADD_CLUSTER_CONFIG)
cluster = utils.get_db_object(
session, models.Cluster, id=cluster_id
)
update_cluster_config_internal(
session, updater, cluster, **kwargs
)
return cluster.to_dict()
@utils.wrap_to_dict(RESP_CONFIG_FIELDS)
@utils.supported_filters([])
def del_cluster_config(deleter, cluster_id):
"""Delete a cluster config."""
with database.session() as session:
user_api.check_user_permission_internal(
session, deleter, permission.PERMISSION_DEL_CLUSTER_CONFIG)
cluster = utils.get_db_object(
session, models.Cluster, id=cluster_id
)
is_cluster_editable(session, cluster, deleter)
utils.update_db_object(
session, cluster, os_config={},
package_config={}, config_validated=False
)
return cluster.to_dict()
@utils.supported_filters(
ADDED_CLUSTERHOST_FIELDS,
optional_support_keys=UPDATED_CLUSTERHOST_FIELDS
)
def add_clusterhost_internal(
session, cluster,
exception_when_existing=False,
machine_id=None, **kwargs
):
from compass.db.api import host as host_api
host_dict = {}
clusterhost_dict = {}
for key, value in kwargs.items():
if key in UPDATED_HOST_FIELDS:
host_dict[key] = value
else:
id_name = "adapter_id"
id_value = getattr(cluster, "adapter_id")
# Validate config format and values
is_valid, message = default_validator.validate_config(session,
config, id_name,
id_value, patch)
if not is_valid:
raise InvalidParameter(message)
# For addtional validation, you can define functions in extension,
# for example:
# os_name = get_os(cluster.os_id)['name']
# if getattr(extension, os_name):
# func = getattr(getattr(extension, os_name), 'validate_config')
# if not func(session, os_id, config, patch):
# return False
if root_elem == 'os_config':
os_config = cluster.os_global_config
os_config = json.loads(json.dumps(os_config))
merge_dict(os_config, config)
cluster.os_global_config = os_config
result = cluster.os_global_config
clusterhost_dict[key] = value
with session.begin(subtransactions=True):
host = utils.get_db_object(
session, models.Host, False, id=machine_id
)
if host:
if host_api.is_host_editable(
session, host, cluster.creator,
reinstall_os_set=host_dict.get('reinstall_os', False),
exception_when_not_editable=False
):
utils.update_db_object(
session, host, adapter=cluster.adapter.os_adapter,
**host_dict
)
else:
logging.info('host %s is not editable', host.name)
else:
package_config = cluster.package_global_config
package_config = json.loads(json.dumps(os_config))
merge_dict(package_config, config)
cluster.package_global_config = package_config
result = cluster.package_global_config
utils.add_db_object(
session, models.Host, False, machine_id,
os=cluster.os,
adapter=cluster.adapter.os_adapter,
creator=cluster.creator,
**host_dict
)
return utils.add_db_object(
session, models.ClusterHost, exception_when_existing,
cluster.id, machine_id, **clusterhost_dict
)
return result
def _add_clusterhosts(session, cluster, machine_dicts):
with session.begin(subtransactions=True):
for machine_dict in machine_dicts:
add_clusterhost_internal(
session, cluster,
**machine_dict
)
def _remove_clusterhosts(session, cluster, host_ids):
with session.begin(subtransactions=True):
utils.del_db_objects(
session, models.ClusterHost,
cluster_id=cluster.id, host_id=host_ids
)
def _set_clusterhosts(session, cluster, machine_dicts):
with session.begin(subtransactions=True):
utils.del_db_objects(
session, models.ClusterHost,
cluster_id=cluster.id
)
for machine_dict in machine_dicts:
add_clusterhost_internal(
session, cluster,
True, **machine_dict
)
@utils.wrap_to_dict(RESP_CLUSTERHOST_FIELDS)
@utils.supported_filters(optional_support_keys=SUPPORTED_CLUSTERHOST_FIELDS)
def list_cluster_hosts(lister, cluster_id, **filters):
"""Get cluster host info."""
with database.session() as session:
user_api.check_user_permission_internal(
session, lister, permission.PERMISSION_LIST_CLUSTERHOSTS)
return [
clusterhost.to_dict()
for clusterhost in utils.list_db_objects(
session, models.ClusterHost, cluster_id=cluster_id,
**filters
)
]
@utils.wrap_to_dict(RESP_CLUSTERHOST_FIELDS)
@utils.supported_filters(optional_support_keys=SUPPORTED_CLUSTERHOST_FIELDS)
def list_clusterhosts(lister, **filters):
"""Get cluster host info."""
with database.session() as session:
user_api.check_user_permission_internal(
session, lister, permission.PERMISSION_LIST_CLUSTERHOSTS)
return [
clusterhost.to_dict()
for clusterhost in utils.list_db_objects(
session, models.ClusterHost,
**filters
)
]
@utils.wrap_to_dict(RESP_CLUSTERHOST_FIELDS)
@utils.supported_filters([])
def get_cluster_host(getter, cluster_id, host_id, **kwargs):
"""Get clusterhost info."""
with database.session() as session:
user_api.check_user_permission_internal(
session, getter, permission.PERMISSION_LIST_CLUSTERHOSTS)
return utils.get_db_object(
session, models.ClusterHost,
cluster_id=cluster_id, host_id=host_id
).to_dict()
@utils.wrap_to_dict(RESP_CLUSTERHOST_FIELDS)
@utils.supported_filters([])
def get_clusterhost(getter, clusterhost_id, **kwargs):
"""Get clusterhost info."""
with database.session() as session:
user_api.check_user_permission_internal(
session, getter, permission.PERMISSION_LIST_CLUSTERHOSTS)
return utils.get_db_object(
session, models.ClusterHost, id=clusterhost_id
).to_dict()
@utils.wrap_to_dict(RESP_CLUSTERHOST_FIELDS)
@utils.supported_filters(
ADDED_CLUSTERHOST_FIELDS,
optional_support_keys=UPDATED_CLUSTERHOST_FIELDS
)
def add_cluster_host(creator, cluster_id, machine_id, **kwargs):
"""Add cluster host."""
with database.session() as session:
user_api.check_user_permission_internal(
session, creator, permission.PERMISSION_UPDATE_CLUSTER_HOSTS)
cluster = utils.get_db_object(
session, models.Cluster, id=cluster_id
)
clusterhost = add_clusterhost_internal(
session, cluster, True,
machine_id=machine_id, **kwargs
)
return clusterhost.to_dict()
@utils.wrap_to_dict(RESP_CLUSTERHOST_FIELDS)
@utils.supported_filters([])
def del_cluster_host(deleter, cluster_id, host_id, **kwargs):
"""Delete cluster host."""
with database.session() as session:
user_api.check_user_permission_internal(
session, deleter, permission.PERMISSION_DEL_CLUSTER_HOST)
clusterhost = utils.get_db_object(
session, models.ClusterHost,
cluster_id=cluster_id, host_id=host_id
)
utils.del_db_object(
session, clusterhost
)
return clusterhost.to_dict()
@utils.wrap_to_dict(RESP_CLUSTERHOST_FIELDS)
@utils.supported_filters([])
def del_clusterhost(deleter, clusterhost_id, **kwargs):
"""Delete cluster host."""
with database.session() as session:
user_api.check_user_permission_internal(
session, deleter, permission.PERMISSION_DEL_CLUSTER_HOST)
clusterhost = utils.get_db_object(
session, models.ClusterHost,
id=clusterhost_id
)
utils.del_db_object(
session, clusterhost
)
return clusterhost.to_dict()
@utils.wrap_to_dict(RESP_CLUSTERHOST_CONFIG_FIELDS)
@utils.supported_filters([])
def get_cluster_host_config(getter, cluster_id, host_id, **kwargs):
"""Get clusterhost config."""
with database.session() as session:
user_api.check_user_permission_internal(
session, getter, permission.PERMISSION_LIST_CLUSTERHOST_CONFIG)
return utils.get_db_object(
session, models.ClusterHost,
cluster_id=cluster_id, host_id=host_id
).to_dict()
@utils.wrap_to_dict(RESP_CLUSTERHOST_CONFIG_FIELDS)
@utils.supported_filters([])
def get_clusterhost_config(getter, clusterhost_id, **kwargs):
"""Get clusterhost config."""
with database.session() as session:
user_api.check_user_permission_internal(
session, getter, permission.PERMISSION_LIST_CLUSTERHOST_CONFIG)
return utils.get_db_object(
session, models.ClusterHost, id=clusterhost_id
).to_dict()
def update_clusterhost_config_internal(
session, updater, clusterhost, **kwargs
):
"""Update clusterhost config internal."""
with session.begin(subtransactions=True):
is_cluster_editable(session, clusterhost.cluster, updater)
utils.update_db_object(
session, clusterhost, config_validated=False, **kwargs
)
package_config = clusterhost.package_config
if package_config:
metadata_api.validate_package_config(
package_config, clusterhost.cluster.adapter_id
)
@utils.wrap_to_dict(RESP_CLUSTERHOST_CONFIG_FIELDS)
@utils.supported_filters(
optional_support_keys=UPDATED_CLUSTERHOST_CONFIG_FIELDS
)
def update_cluster_host_config(updater, cluster_id, host_id, **kwargs):
"""Update clusterhost config."""
with database.session() as session:
user_api.check_user_permission_internal(
session, updater, permission.PERMISSION_ADD_CLUSTERHOST_CONFIG)
clusterhost = utils.get_db_object(
session, models.ClusterHost,
cluster_id=cluster_id, host_id=host_id
)
update_clusterhost_config_internal(
session, updater, clusterhost, **kwargs
)
return clusterhost.to_dict()
@utils.wrap_to_dict(RESP_CLUSTERHOST_CONFIG_FIELDS)
@utils.supported_filters(
optional_support_keys=UPDATED_CLUSTERHOST_CONFIG_FIELDS
)
def update_clusterhost_config(updater, clusterhost_id, **kwargs):
"""Update clusterhost config."""
with database.session() as session:
user_api.check_user_permission_internal(
session, updater, permission.PERMISSION_ADD_CLUSTERHOST_CONFIG)
clusterhost = utils.get_db_object(
session, models.ClusterHost, id=clusterhost_id
)
update_clusterhost_config_internal(
session, updater, clusterhost, **kwargs
)
return clusterhost.to_dict()
@utils.wrap_to_dict(RESP_CLUSTERHOST_CONFIG_FIELDS)
@utils.supported_filters(PATCHED_CLUSTERHOST_CONFIG_FIELDS)
def patch_cluster_host_config(updater, cluster_id, host_id, **kwargs):
"""patch clusterhost config."""
with database.session() as session:
user_api.check_user_permission_internal(
session, updater, permission.PERMISSION_ADD_CLUSTERHOST_CONFIG)
clusterhost = utils.get_db_object(
session, models.ClusterHost,
cluster_id=cluster_id, host_id=host_id
)
update_clusterhost_config_internal(
session, updater, clusterhost, **kwargs
)
return clusterhost.to_dict()
@utils.wrap_to_dict(RESP_CLUSTERHOST_CONFIG_FIELDS)
@utils.supported_filters(PATCHED_CLUSTERHOST_CONFIG_FIELDS)
def patch_clusterhost_config(updater, clusterhost_id, **kwargs):
"""patch clusterhost config."""
with database.session() as session:
user_api.check_user_permission_internal(
session, updater, permission.PERMISSION_ADD_CLUSTERHOST_CONFIG)
clusterhost = utils.get_db_object(
session, models.ClusterHost, id=clusterhost_id
)
update_clusterhost_config_internal(
session, updater, clusterhost, **kwargs
)
return clusterhost.to_dict()
@utils.wrap_to_dict(RESP_CLUSTERHOST_CONFIG_FIELDS)
@utils.supported_filters([])
def delete_cluster_host_config(deleter, cluster_id, host_id):
"""Delet a clusterhost config."""
with database.session() as session:
user_api.check_user_permission_internal(
session, deleter, permission.PERMISSION_DEL_CLUSTERHOST_CONFIG)
clusterhost = utils.get_db_object(
session, models.ClusterHost,
cluster_id=cluster_id, hsot_id=host_id
)
is_cluster_editable(session, clusterhost.cluster, deleter)
utils.update_db_object(
session, clusterhost, package_config={}, config_validated=False
)
return clusterhost.to_dict()
@utils.wrap_to_dict(RESP_CLUSTERHOST_CONFIG_FIELDS)
@utils.supported_filters([])
def delete_clusterhost_config(deleter, clusterhost_id):
"""Delet a clusterhost config."""
with database.session() as session:
user_api.check_user_permission_internal(
session, deleter, permission.PERMISSION_DEL_CLUSTERHOST_CONFIG)
clusterhost = utils.get_db_object(
session, models.ClusterHost, id=clusterhost_id
)
is_cluster_editable(session, clusterhost.cluster, deleter)
utils.update_db_object(
session, clusterhost, package_config={}, config_validated=False
)
return clusterhost.to_dict()
@utils.wrap_to_dict(RESP_CLUSTERHOST_FIELDS)
@utils.supported_filters(
optional_support_keys=['add_hosts', 'remove_hosts', 'set_hosts']
)
def update_cluster_hosts(
updater, cluster_id, add_hosts=[], set_hosts=None,
remove_hosts=[]
):
"""Update cluster hosts."""
with database.session() as session:
user_api.check_user_permission_internal(
session, updater, permission.PERMISSION_UPDATE_CLUSTER_HOSTS)
cluster = utils.get_db_object(
session, models.Cluster, id=cluster_id
)
is_cluster_editable(session, cluster, updater)
if remove_hosts:
_remove_clusterhosts(session, cluster, remove_hosts)
if add_hosts:
_add_clusterhosts(session, cluster, add_hosts)
if set_hosts is not None:
_set_clusterhosts(session, cluster, set_hosts)
return [host.to_dict() for host in cluster.clusterhosts]
@utils.wrap_to_dict(RESP_REVIEW_FIELDS)
@utils.supported_filters([])
def review_cluster(reviewer, cluster_id):
"""review cluster."""
from compass.db.api import host as host_api
with database.session() as session:
user_api.check_user_permission_internal(
session, reviewer, permission.PERMISSION_REVIEW_CLUSTER)
cluster = utils.get_db_object(
session, models.Cluster, id=cluster_id
)
is_cluster_editable(session, cluster, reviewer)
os_config = cluster.os_config
if os_config:
metadata_api.validate_os_config(
os_config, cluster.adapter_id, True
)
for clusterhost in cluster.clusterhosts:
host = clusterhost.host
if not host_api.is_host_editable(
session, host, reviewer, False
):
logging.info(
'ignore update host %s config '
'since it is not editable' % host.name
)
continue
host_os_config = host.os_config
deployed_os_config = util.merge_dict(
os_config, host_os_config
)
metadata_api.validate_os_config(
deployed_os_config, host.adapter_id, True
)
host.deployed_os_config = deployed_os_config
host.config_validated = True
package_config = cluster.package_config
if package_config:
metadata_api.validate_package_config(
package_config, cluster.adapter_id, True
)
for clusterhost in cluster.clusterhosts:
clusterhost_package_config = clusterhost.package_config
deployed_package_config = util.mrege_dict(
package_config, clusterhost_package_config
)
metadata_api.validate_os_config(
deployed_package_config,
cluster.adapter_id, True
)
clusterhost.deployed_package_config = deployed_package_config
clusterhost.config_validated = True
cluster.config_validated = True
return {
'cluster': cluster.to_dict(),
'clusterhosts': [
clusterhost.to_dict()
for clusterhost in cluster.clusterhosts
]
}
@utils.wrap_to_dict(RESP_ACTION_FIELDS)
@utils.supported_filters(optional_support_keys=['clusterhosts'])
def deploy_cluster(deployer, cluster_id, clusterhosts=[], **kwargs):
"""deploy cluster."""
from compass.tasks import client as celery_client
with database.session() as session:
user_api.check_user_permission_internal(
session, deployer, permission.PERMISSION_DEPLOY_CLUSTER)
cluster = utils.get_db_object(
session, models.Cluster, id=cluster_id
)
is_cluster_editable(session, cluster, deployer)
celery_client.celery.send_task(
'compass.tasks.deploy',
(cluster_id, clusterhosts)
)
return {
'status': 'deploy action sent',
'details': {
}
}
@utils.wrap_to_dict(RESP_STATE_FIELDS)
@utils.supported_filters([])
def get_cluster_state(getter, cluster_id, **kwargs):
"""Get cluster state info."""
with database.session() as session:
user_api.check_user_permission_internal(
session, getter, permission.PERMISSION_GET_CLUSTER_STATE)
return utils.get_db_object(
session, models.Cluster, id=cluster_id
).state_dict()
@utils.wrap_to_dict(RESP_CLUSTERHOST_STATE_FIELDS)
@utils.supported_filters([])
def get_cluster_host_state(getter, cluster_id, host_id, **kwargs):
"""Get clusterhost state info."""
with database.session() as session:
user_api.check_user_permission_internal(
session, getter, permission.PERMISSION_GET_CLUSTERHOST_STATE)
return utils.get_db_object(
session, models.ClusterHost,
cluster_id=cluster_id, host_id=host_id
).state_dict()
@utils.wrap_to_dict(RESP_CLUSTERHOST_STATE_FIELDS)
@utils.supported_filters([])
def get_clusterhost_state(getter, clusterhost_id, **kwargs):
"""Get clusterhost state info."""
with database.session() as session:
user_api.check_user_permission_internal(
session, getter, permission.PERMISSION_GET_CLUSTERHOST_STATE)
return utils.get_db_object(
session, models.ClusterHost, id=clusterhost_id
).state_dict()
@utils.wrap_to_dict(RESP_CLUSTERHOST_STATE_FIELDS)
@utils.supported_filters(
optional_support_keys=UPDATED_CLUSTERHOST_STATE_FIELDS
)
def update_cluster_host_state(updater, cluster_id, host_id, **kwargs):
"""Update a clusterhost state."""
with database.session() as session:
user_api.check_user_permission_internal(
session, updater, permission.PERMISSION_UPDATE_CLUSTERHOST_STATE)
clusterhost = utils.get_db_object(
session, models.ClusterHost,
cluster_id=cluster_id, host_id=host_id
)
utils.update_db_object(session, clusterhost.state, **kwargs)
return clusterhost.state_dict()
@utils.wrap_to_dict(RESP_CLUSTERHOST_STATE_FIELDS)
@utils.supported_filters(
optional_support_keys=UPDATED_CLUSTERHOST_STATE_FIELDS
)
def update_clusterhost_state(updater, clusterhost_id, **kwargs):
"""Update a clusterhost state."""
with database.session() as session:
user_api.check_user_permission_internal(
session, updater, permission.PERMISSION_UPDATE_CLUSTERHOST_STATE)
clusterhost = utils.get_db_object(
session, models.ClusterHost, id=clusterhost_id
)
is_cluster_editable(session, clusterhost.cluster, updater)
return clusterhost.state_dict()

View File

@ -14,6 +14,7 @@
"""Provider interface to manipulate database."""
import logging
import netaddr
from contextlib import contextmanager
from sqlalchemy import create_engine
@ -21,94 +22,16 @@ from sqlalchemy.orm import scoped_session
from sqlalchemy.orm import sessionmaker
from threading import local
from compass.db import exception
from compass.db import models
# from compass.utils import setting_wrapper as setting
from compass.utils import setting_wrapper as setting
SQLALCHEMY_DATABASE_URI = "sqlite:////tmp/app.db"
ENGINE = create_engine(SQLALCHEMY_DATABASE_URI, convert_unicode=True)
SESSION = sessionmaker()
SESSION.configure(bind=ENGINE)
SCOPED_SESSION = scoped_session(SESSION)
ENGINE = None
SESSION = sessionmaker(autocommit=False, autoflush=False)
SCOPED_SESSION = None
SESSION_HOLDER = local()
models.BASE.query = SCOPED_SESSION.query_property()
# Default permissions for Permission table
DEFAULT_PERMS = [
{"name": "create_user", "alias": "create a user"},
{"name": "delete_user", "alias": "delete a user"},
{"name": "change_permission", "alias": "change permissions of a user"},
{"name": "delete_cluster", "alias": "delete a cluster"}
]
# Adapter
ADAPTERS = ['openstack', 'ceph', 'centos', 'ubuntu']
# OS
OS = ['CentOS', 'Ubuntu']
# adapter_os (adater_id, os_id)
ADAPTER_OS_DEF = {
1: [1, 2],
2: [1],
3: [1],
4: [2]
}
# adapter roles
ROLES = [
{"name": "compute", "adapter_id": 1},
{"name": "controller", "adapter_id": 1},
{"name": "metering", "adapter_id": 1},
{"name": "network", "adapter_id": 1},
{"name": "storage", "adapter_id": 1}
]
# OS config metatdata
OS_CONFIG_META_DEF = [
{"name": "os_config", "p_id": None, 'os_id': None},
{"name": "general", "p_id": 1, 'os_id': None},
{"name": "network", "p_id": 1, 'os_id': None},
{"name": "$interface", "p_id": 3, 'os_id': None},
{"name": "ext_example_meta", "p_id": 1, 'os_id': 2},
{"name": "server_credentials", "p_id": 1, 'os_id': None}
]
# OS config field
OS_CONFIG_FIELD_DEF = [
{"name": "language", "validator": None, 'is_required': True,
'ftype': 'str'},
{"name": "timezone", "validator": None, 'is_required': True,
'ftype': 'str'},
{"name": "ip", "validator": 'is_valid_ip', 'is_required': True,
'ftype': 'str'},
{"name": "netmask", "validator": 'is_valid_netmask', 'is_required': True,
'ftype': 'str'},
{"name": "gateway", "validator": 'is_valid_gateway', 'is_required': True,
'ftype': 'str'},
{"name": "ext_example_field", "validator": None, 'is_required': True,
'ftype': 'str'},
{"name": "username", "validator": None, 'is_required': True,
'ftype': 'str'},
{"name": "password", "validator": None, 'is_required': True,
'ftype': 'str'}
]
# OS config metadata field (metadata_id, field_id)
OS_CONFIG_META_FIELD_DEF = {
2: [1, 2],
4: [3, 4, 5],
5: [6],
6: [7, 8]
}
# Cluster: Demo purpose
CLUSTER = {
"name": "demo",
"adapter_id": 1,
"os_id": 2,
"created_by": 1
}
def init(database_url):
"""Initialize database.
@ -123,6 +46,9 @@ def init(database_url):
models.BASE.query = SCOPED_SESSION.query_property()
init(setting.SQLALCHEMY_DATABASE_URI)
def in_session():
"""check if in database session scope."""
if hasattr(SESSION_HOLDER, 'session'):
@ -138,25 +64,29 @@ def session():
.. note::
To operate database, it should be called in database session.
"""
import traceback
if hasattr(SESSION_HOLDER, 'session'):
logging.error('we are already in session')
raise Exception('session already exist')
raise exception.DatabaseException('session already exist')
else:
new_session = SCOPED_SESSION()
SESSION_HOLDER.session = new_session
setattr(SESSION_HOLDER, 'session', new_session)
try:
yield new_session
new_session.commit()
except Exception as error:
new_session.rollback()
#logging.error('failed to commit session')
#logging.exception(error)
raise error
logging.error('failed to commit session')
logging.exception(error)
if isinstance(error, exception.DatabaseException):
raise error
else:
raise exception.DatabaseException(str(error))
finally:
new_session.close()
SCOPED_SESSION.remove()
del SESSION_HOLDER.session
delattr(SESSION_HOLDER, 'session')
def current_session():
@ -169,87 +99,155 @@ def current_session():
except Exception as error:
logging.error('It is not in the session scope')
logging.exception(error)
raise error
if isinstance(error, exception.DatabaseException):
raise error
else:
raise exception.DatabaseException(str(error))
def _setup_user_table(user_session):
"""Initialize default user."""
logging.info('setup user table')
from compass.db.api import user
user.add_user_internal(
user_session,
setting.COMPASS_ADMIN_EMAIL,
setting.COMPASS_ADMIN_PASSWORD,
is_admin=True
)
def _setup_permission_table(permission_session):
"""Initialize permission table."""
logging.info('setup permission table.')
from compass.db.api import permission
permission.add_permissions_internal(
permission_session
)
def _setup_switch_table(switch_session):
"""Initialize switch table."""
logging.info('setup switch table')
from compass.db.api import switch
switch.add_switch_internal(
switch_session, long(netaddr.IPAddress(setting.DEFAULT_SWITCH_IP))
)
def _setup_os_installers(installer_session):
"""Initialize os_installer table."""
logging.info('setup os installer table')
from compass.db.api import installer
installer.add_os_installers_internal(
installer_session
)
def _setup_package_installers(installer_session):
"""Initialize package_installer table."""
logging.info('setup package installer table')
from compass.db.api import installer
installer.add_package_installers_internal(
installer_session
)
def _setup_oses(os_session):
"""Initialize os table."""
logging.info('setup os table')
from compass.db.api import adapter
adapter.add_oses_internal(
os_session
)
def _setup_distributed_systems(distributed_system_session):
"""Initialize distributed system table."""
logging.info('setup distributed system table')
from compass.db.api import adapter
adapter.add_distributed_systems_internal(
distributed_system_session
)
def _setup_os_adapters(adapter_session):
"""Initialize os adapter table."""
logging.info('setup os adapter table')
from compass.db.api import adapter
adapter.add_os_adapters_internal(
adapter_session)
def _setup_package_adapters(adapter_session):
"""Initialize package adapter table."""
logging.info('setup package adapter table')
from compass.db.api import adapter
adapter.add_package_adapters_internal(
adapter_session)
def _setup_adapters(adapter_session):
"""Initialize adapter table."""
logging.info('setup adapter table')
from compass.db.api import adapter
adapter.add_adapters_internal(adapter_session)
def _setup_os_fields(field_session):
"""Initialize os field table."""
logging.info('setup os field table')
from compass.db.api import metadata
metadata.add_os_field_internal(field_session)
def _setup_package_fields(field_session):
"""Initialize package field table."""
logging.info('setup package field table')
from compass.db.api import metadata
metadata.add_package_field_internal(field_session)
def _setup_os_metadatas(metadata_session):
"""Initialize os metadata table."""
logging.info('setup os metadata table')
from compass.db.api import metadata
metadata.add_os_metadata_internal(metadata_session)
def _setup_package_metadatas(metadata_session):
"""Initialize package metadata table."""
logging.info('setup package metadata table')
from compass.db.api import metadata
metadata.add_package_metadata_internal(metadata_session)
def _setup_package_adapter_roles(role_session):
"""Initialize package adapter role table."""
logging.info('setup package adapter role table')
from compass.db.api import adapter
adapter.add_roles_internal(role_session)
def create_db():
"""Create database."""
try:
models.BASE.metadata.create_all(bind=ENGINE)
except Exception as e:
print e
with session() as _session:
# Initialize default user
user = models.User(email='admin@abc.com',
password='admin', is_admin=True)
_session.add(user)
print "Checking .....\n"
# Initialize default permissions
permissions = []
for perm in DEFAULT_PERMS:
permissions.append(models.Permission(**perm))
_session.add_all(permissions)
# Populate adapter table
adapters = []
for name in ADAPTERS:
adapters.append(models.Adapter(name=name))
_session.add_all(adapters)
# Populate adapter roles
roles = []
for entry in ROLES:
roles.append(models.AdapterRole(**entry))
_session.add_all(roles)
# Populate os table
oses = []
for name in OS:
oses.append(models.OperatingSystem(name=name))
_session.add_all(oses)
# Populate adapter_os table
for key in ADAPTER_OS_DEF:
adapter = adapters[key - 1]
for os_id in ADAPTER_OS_DEF[key]:
os = oses[os_id - 1]
adapter.support_os.append(os)
# Populate OS config metatdata
os_meta = []
for key in OS_CONFIG_META_DEF:
if key['p_id'] is None:
meta = models.OSConfigMetadata(name=key['name'],
os_id=key['os_id'])
else:
parent = os_meta[key['p_id'] - 1]
meta = models.OSConfigMetadata(name=key['name'],
os_id=key['os_id'],
parent=parent)
os_meta.append(meta)
_session.add_all(os_meta)
# Populate OS config field
os_fields = []
for field in OS_CONFIG_FIELD_DEF:
os_fields.append(models.OSConfigField(
field=field['name'], validator=field['validator'],
is_required=field['is_required'], ftype=field['ftype']))
_session.add_all(os_fields)
# Populate OS config metatdata field
for meta_id in OS_CONFIG_META_FIELD_DEF:
meta = os_meta[meta_id - 1]
for field_id in OS_CONFIG_META_FIELD_DEF[meta_id]:
field = os_fields[field_id - 1]
meta.fields.append(field)
# Populate one cluster -- DEMO PURPOSE
cluster = models.Cluster(**CLUSTER)
_session.add(cluster)
models.BASE.metadata.create_all(bind=ENGINE)
with session() as my_session:
_setup_permission_table(my_session)
_setup_user_table(my_session)
_setup_switch_table(my_session)
_setup_os_installers(my_session)
_setup_package_installers(my_session)
_setup_oses(my_session)
_setup_distributed_systems(my_session)
_setup_os_adapters(my_session)
_setup_package_adapters(my_session)
_setup_package_adapter_roles(my_session)
_setup_adapters(my_session)
_setup_os_fields(my_session)
_setup_package_fields(my_session)
_setup_os_metadatas(my_session)
_setup_package_metadatas(my_session)
def drop_db():
@ -263,6 +261,44 @@ def create_table(table):
:param table: Class of the Table defined in the model.
"""
table.__table__.create(bind=ENGINE, checkfirst=True)
with session() as my_session:
if table == models.User:
_setup_user_table(my_session)
elif table == models.Permission:
_setup_permission_table(my_session)
elif table == models.Switch:
_setup_switch_table(my_session)
elif table in [
models.OSInstaller,
models.PackageInstaller,
models.OperatingSystem,
models.DistributedSystems,
models.OSAdapter,
models.PackageAdapter,
models.Adapter
]:
_setup_os_installers(my_session)
_setup_package_installers(my_session)
_setup_os_adapters(my_session)
_setup_package_adapters(my_session)
_setup_package_adapter_roles(my_session)
_setup_adapters(my_session)
_setup_os_fields(my_session)
_setup_os_metadatas(my_session)
_setup_package_fields(my_session)
_setup_package_metadatas(my_session)
elif table == models.PackageAdapterRole:
_setup_package_adapter_roles(my_session)
elif table in [
models.OSConfigField,
models.PackageConfigField,
models.OSConfigMetadata,
models.PackageConfigMetadata
]:
_setup_os_fields(my_session)
_setup_os_metadatas(my_session)
_setup_package_fields(my_session)
_setup_package_metadatas(my_session)
def drop_table(table):

400
compass/db/api/host.py Normal file
View File

@ -0,0 +1,400 @@
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Host database operations."""
import logging
from compass.db.api import database
from compass.db.api import metadata_holder as metadata_api
from compass.db.api import permission
from compass.db.api import user as user_api
from compass.db.api import utils
from compass.db import exception
from compass.db import models
SUPPORTED_FIELDS = ['name', 'os_name', 'owner', 'mac']
SUPPORTED_NETOWORK_FIELDS = [
'interface', 'ip', 'subnet', 'is_mgmt', 'is_promiscuous'
]
RESP_FIELDS = [
'id', 'name', 'os_name', 'owner', 'mac',
'reinstall_os', 'os_installed', 'tag', 'location',
'created_at', 'updated_at'
]
RESP_CLUSTER_FIELDS = [
'id', 'name', 'os_name', 'reinstall_distributed_system',
'distributed_system_name', 'owner', 'adapter_id',
'distributed_system_installed',
'adapter_id', 'created_at', 'updated_at'
]
RESP_NETWORK_FIELDS = [
'id', 'ip', 'interface', 'netmask', 'is_mgmt', 'is_promiscuous'
]
RESP_CONFIG_FIELDS = [
'os_config',
]
UPDATED_FIELDS = ['name', 'reinstall_os']
UPDATED_CONFIG_FIELDS = [
'put_os_config'
]
PATCHED_CONFIG_FIELDS = [
'patched_os_config'
]
ADDED_NETWORK_FIELDS = [
'interface', 'ip', 'subnet_id'
]
OPTIONAL_ADDED_NETWORK_FIELDS = ['is_mgmt', 'is_promiscuous']
UPDATED_NETWORK_FIELDS = [
'interface', 'ip', 'subnet_id', 'subnet', 'is_mgmt',
'is_promiscuous'
]
RESP_STATE_FIELDS = [
'id', 'state', 'progress', 'message'
]
UPDATED_STATE_FIELDS = [
'id', 'state', 'progress', 'message'
]
@utils.wrap_to_dict(RESP_FIELDS)
@utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS)
def list_hosts(lister, **filters):
"""List hosts."""
with database.session() as session:
user_api.check_user_permission_internal(
session, lister, permission.PERMISSION_LIST_HOSTS)
return [
host.to_dict()
for host in utils.list_db_objects(
session, models.Host, **filters
)
]
@utils.wrap_to_dict(RESP_FIELDS)
@utils.supported_filters([])
def get_host(getter, host_id, **kwargs):
"""get host info."""
with database.session() as session:
user_api.check_user_permission_internal(
session, getter, permission.PERMISSION_LIST_HOSTS)
return utils.get_db_object(
session, models.Host, id=host_id
).to_dict()
@utils.wrap_to_dict(RESP_CLUSTER_FIELDS)
@utils.supported_filters([])
def get_host_clusters(getter, host_id, **kwargs):
"""get host clusters."""
with database.session() as session:
user_api.check_user_permission_internal(
session, getter, permission.PERMISSION_LIST_HOST_CLUSTERS)
host = utils.get_db_object(
session, models.Host, id=host_id
)
clusterhosts = host.clusterhosts
return [clusterhost.cluster.to_dict() for clusterhost in clusterhosts]
def _conditional_exception(host, exception_when_not_editable):
if exception_when_not_editable:
raise exception.Forbidden(
'host %s is not editable' % host.name
)
else:
return False
def is_host_editable(
session, host, user,
reinstall_os_set=False, exception_when_not_editable=True
):
with session.begin(subtransactions=True):
if reinstall_os_set:
if host.state.state == 'INSTALLING':
return _conditional_exception(
host, exception_when_not_editable
)
elif not host.reinstall_os:
return _conditional_exception(
host, exception_when_not_editable
)
if not user.is_admin and host.creator_id != user.id:
return _conditional_exception(
host, exception_when_not_editable
)
return True
@utils.wrap_to_dict(RESP_FIELDS)
@utils.supported_filters(UPDATED_FIELDS)
def update_host(updater, host_id, **kwargs):
"""Update a host."""
with database.session() as session:
user_api.check_user_permission_internal(
session, updater, permission.PERMISSION_UPDATE_HOST)
host = utils.get_db_object(
session, models.Host, id=host_id
)
is_host_editable(
session, host, updater,
reinstall_os_set=kwargs.get('reinstall_os', False)
)
utils.update_db_object(session, host, **kwargs)
return host.to_dict()
@utils.wrap_to_dict(RESP_FIELDS)
@utils.supported_filters([])
def del_host(deleter, host_id, **kwargs):
"""Delete a host."""
with database.session() as session:
user_api.check_user_permission_internal(
session, deleter, permission.PERMISSION_DEL_HOST)
host = utils.get_db_object(
session, models.Host, id=host_id
)
is_host_editable(session, host, deleter)
utils.del_db_object(session, host)
return host.to_dict()
@utils.wrap_to_dict(RESP_CONFIG_FIELDS)
@utils.supported_filters([])
def get_host_config(getter, host_id, **kwargs):
"""Get host config."""
with database.session() as session:
user_api.check_user_permission_internal(
session, getter, permission.PERMISSION_LIST_HOST_CONFIG)
return utils.get_db_object(
session, models.Host, id=host_id
).to_dict()
def _update_host_config(updater, host_id, **kwargs):
"""Update host config."""
with database.session() as session:
user_api.check_user_permission_internal(
session, updater, permission.PERMISSION_ADD_HOST_CONFIG)
host = utils.get_db_object(
session, models.Host, id=host_id
)
is_host_editable(session, host, updater)
utils.update_db_object(session, host, config_validated=False, **kwargs)
os_config = host.os_config
if os_config:
metadata_api.validate_os_config(
os_config, host.adapter_id
)
return host.to_dict()
@utils.wrap_to_dict(RESP_CONFIG_FIELDS)
@utils.supported_filters(UPDATED_CONFIG_FIELDS)
def update_host_config(updater, host_id, **kwargs):
return _update_host_config(updater, host_id, **kwargs)
@utils.wrap_to_dict(RESP_CONFIG_FIELDS)
@utils.supported_filters(PATCHED_CONFIG_FIELDS)
def patch_host_config(updater, host_id, **kwargs):
return _update_host_config(updater, host_id, **kwargs)
@utils.wrap_to_dict(RESP_CONFIG_FIELDS)
@utils.supported_filters([])
def del_host_config(deleter, host_id):
"""delete a host config."""
with database.session() as session:
user_api.check_user_permission_internal(
session, deleter, permission.PERMISSION_DEL_HOST_CONFIG)
host = utils.get_db_object(
session, models.Host, id=host_id
)
is_host_editable(session, host, deleter)
utils.update_db_object(
session, host, os_config={}, config_validated=False
)
return host.to_dict()
@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
@utils.supported_filters(
optional_support_keys=SUPPORTED_NETOWORK_FIELDS
)
def list_host_networks(lister, host_id, **filters):
"""Get host networks."""
with database.session() as session:
user_api.check_user_permission_internal(
session, lister, permission.PERMISSION_LIST_HOST_NETWORKS)
host_networks = utils.list_db_objects(
session, models.HostNetwork,
host_id=host_id, **filters
)
return [host_network.to_dict() for host_network in host_networks]
@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
@utils.supported_filters(
optional_support_keys=SUPPORTED_NETOWORK_FIELDS
)
def list_hostnetworks(lister, **filters):
"""Get host networks."""
with database.session() as session:
user_api.check_user_permission_internal(
session, lister, permission.PERMISSION_LIST_HOST_NETWORKS)
host_networks = utils.list_db_objects(
session, models.HostNetwork, **filters
)
return [host_network.to_dict() for host_network in host_networks]
@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
@utils.supported_filters([])
def get_host_network(getter, host_id, subnet_id, **kwargs):
"""Get host network."""
with database.session() as session:
user_api.check_user_permission_internal(
session, getter, permission.PERMISSION_LIST_HOST_NETWORKS)
host_network = utils.get_db_object(
session, models.HostNetwork,
host_id=host_id, subnet_id=subnet_id
)
return host_network.to_dict()
@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
@utils.supported_filters([])
def get_hostnetwork(getter, host_network_id, **kwargs):
"""Get host network."""
with database.session() as session:
user_api.check_user_permission_internal(
session, getter, permission.PERMISSION_LIST_HOST_NETWORKS)
host_network = utils.get_db_object(
session, models.HostNetwork,
id=host_network_id
)
return host_network.to_dict()
@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
@utils.supported_filters(
ADDED_NETWORK_FIELDS, optional_support_keys=OPTIONAL_ADDED_NETWORK_FIELDS
)
def add_host_network(creator, host_id, **kwargs):
"""Create a host network."""
with database.session() as session:
user_api.check_user_permission_internal(
session, creator, permission.PERMISSION_ADD_HOST_NETWORK)
host = utils.get_db_object(
session, models.Host, id=host_id
)
is_host_editable(session, host, creator)
host_network = utils.add_db_object(
session, models.HostNetwork, True,
host_id, **kwargs
)
return host_network.to_dict()
@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
@utils.supported_filters(
optional_support_keys=UPDATED_NETWORK_FIELDS
)
def update_host_network(updater, host_id, subnet_id, **kwargs):
"""Update a host network."""
with database.session() as session:
user_api.check_user_permission_internal(
session, updater, permission.PERMISSION_ADD_HOST_NETWORK)
host_network = utils.get_db_object(
session, models.HostNetwork,
host_id=host_id, subnet_id=subnet_id
)
is_host_editable(session, host_network.host, updater)
utils.update_db_object(session, host_network, **kwargs)
return host_network.to_dict()
@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
@utils.supported_filters(UPDATED_NETWORK_FIELDS)
def update_hostnetwork(updater, host_network_id, **kwargs):
"""Update a host network."""
with database.session() as session:
user_api.check_user_permission_internal(
session, updater, permission.PERMISSION_ADD_HOST_NETWORK)
host_network = utils.get_db_object(
session, models.HostNetwork, id=host_network_id
)
is_host_editable(session, host_network.host, updater)
utils.update_db_object(session, host_network, **kwargs)
return host_network.to_dict()
@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
@utils.supported_filters([])
def del_host_network(deleter, host_id, subnet_id, **kwargs):
"""Delete a host network."""
with database.session() as session:
user_api.check_user_permission_internal(
session, deleter, permission.PERMISSION_DEL_HOST_NETWORK)
host_network = utils.get_db_object(
session, models.HostNetwork,
host_id=host_id, subnet_id=subnet_id
)
is_host_editable(session, host_network.host, deleter)
utils.del_db_object(session, host_network)
return host_network.to_dict()
@utils.wrap_to_dict(RESP_NETWORK_FIELDS)
@utils.supported_filters([])
def del_hostnetwork(deleter, host_network_id, **kwargs):
"""Delete a host network."""
with database.session() as session:
user_api.check_user_permission_internal(
session, deleter, permission.PERMISSION_DEL_HOST_NETWORK)
host_network = utils.get_db_object(
session, models.HostNetwork, id=host_network_id
)
is_host_editable(session, host_network.host, deleter)
utils.del_db_object(session, host_network)
return host_network.to_dict()
@utils.wrap_to_dict(RESP_STATE_FIELDS)
@utils.supported_filters([])
def get_host_state(getter, host_id, **kwargs):
"""Get host state info."""
with database.session() as session:
user_api.check_user_permission_internal(
session, getter, permission.PERMISSION_GET_HOST_STATE)
return utils.get_db_object(
session, models.Host, id=host_id
).state_dict()
@utils.wrap_to_dict(RESP_STATE_FIELDS)
@utils.supported_filters(UPDATED_STATE_FIELDS)
def update_host_state(updater, host_id, **kwargs):
"""Update a host state."""
with database.session() as session:
user_api.check_user_permission_internal(
session, updater, permission.PERMISSION_UPDATE_HOST_STATE)
host = utils.get_db_object(
session, models.Host, id=host_id
)
utils.update_db_object(session, host.state, **kwargs)
return host.state_dict()

View File

@ -0,0 +1,49 @@
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adapter database operations."""
import logging
import os
from compass.db.api import database
from compass.db.api import utils
from compass.db import exception
from compass.db import models
from compass.utils import setting_wrapper as setting
from compass.utils import util
def _add_installers(session, model, configs):
installers = []
for config in configs:
installers.append(utils.add_db_object(
session, model,
True, config['NAME'],
installer_type=config['TYPE'],
config=config['CONFIG']
))
return installers
def add_os_installers_internal(session):
configs = util.load_configs(setting.OS_INSTALLER_DIR)
with session.begin(subtransactions=True):
return _add_installers(session, models.OSInstaller, configs)
def add_package_installers_internal(session):
configs = util.load_configs(setting.PACKAGE_INSTALLER_DIR)
with session.begin(subtransactions=True):
return _add_installers(session, models.PackageInstaller, configs)

155
compass/db/api/machine.py Normal file
View File

@ -0,0 +1,155 @@
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Switch database operations."""
import logging
from compass.db.api import database
from compass.db.api import permission
from compass.db.api import user as user_api
from compass.db.api import utils
from compass.db import exception
from compass.db import models
from compass.utils import setting_wrapper as setting
from compass.utils import util
SUPPORTED_FIELDS = ['mac', 'tag']
UPDATED_FIELDS = ['ipmi_credentials', 'tag', 'location']
PATCHED_FIELDS = [
'patched_ipmi_credentials', 'patched_tag',
'patched_location'
]
RESP_FIELDS = [
'id', 'mac', 'ipmi_credentials',
'tag', 'location', 'created_at', 'updated_at'
]
def _check_ipmi_credentials_ip(ip):
utils.check_ip(ip)
def _check_ipmi_credentials(ipmi_credentials):
if not ipmi_credentials:
return
if not isinstance(ipmi_credentials, dict):
raise exception.InvalidParameter(
'invalid ipmi credentials %s' % ipmi_credentials
)
for key in ipmi_credentials:
if key not in ['ip', 'username', 'password']:
raise exception.InvalidParameter(
'unrecognized field %s in ipmi credentials %s' % (
key, ipmi_credentials
)
)
for key in ['ip', 'username', 'password']:
if key not in ipmi_credentials:
raise exception.InvalidParameter(
'no field %s in ipmi credentials %s' % (
key, ipmi_credentials
)
)
check_ipmi_credential_field = '_check_ipmi_credentials_%s' % key
this_module = globals()
if hasattr(this_module, check_ipmi_credential_field):
getattr(this_module, check_ipmi_credential_field)(
ipmi_credentials[key]
)
else:
logging.debug(
'function %s is not defined', check_ipmi_credential_field
)
@utils.wrap_to_dict(RESP_FIELDS)
@utils.supported_filters([])
def get_machine(getter, machine_id, **kwargs):
"""get field dict of a machine."""
with database.session() as session:
user_api.check_user_permission_internal(
session, getter, permission.PERMISSION_LIST_MACHINES)
return utils.get_db_object(
session, models.Machine, True, id=machine_id
).to_dict()
@utils.output_filters(
tag=utils.general_filter_callback,
location=utils.general_filter_callback
)
@utils.wrap_to_dict(RESP_FIELDS)
@utils.supported_filters(
optional_support_keys=SUPPORTED_FIELDS
)
def list_machines(lister, **filters):
"""List machines."""
with database.session() as session:
user_api.check_user_permission_internal(
session, lister, permission.PERMISSION_LIST_MACHINES)
return [
machine.to_dict()
for machine in utils.list_db_objects(
session, models.Machine, **filters
)
]
def _update_machine(updater, machine_id, **kwargs):
"""Update a machine."""
with database.session() as session:
user_api.check_user_permission_internal(
session, updater, permission.PERMISSION_ADD_MACHINE)
machine = utils.get_db_object(session, models.Machine, id=machine_id)
utils.update_db_object(session, machine, **kwargs)
machine_dict = machine.to_dict()
utils.validate_outputs(
{'ipmi_credentials': _check_ipmi_credentials},
machine_dict
)
return machine_dict
@utils.wrap_to_dict(RESP_FIELDS)
@utils.input_validates(ipmi_credentials=_check_ipmi_credentials)
@utils.supported_filters(optional_support_keys=UPDATED_FIELDS)
def update_machine(updater, machine_id, **kwargs):
return _update_machine(
updater, machine_id,
**kwargs
)
@utils.wrap_to_dict(RESP_FIELDS)
@utils.supported_filters(optional_support_keys=PATCHED_FIELDS)
def patch_machine(updater, machine_id, **kwargs):
return _update_machine(
updater, machine_id,
**kwargs
)
@utils.wrap_to_dict(RESP_FIELDS)
@utils.supported_filters()
def del_machine(deleter, machine_id, **kwargs):
"""Delete a machine."""
with database.session() as session:
user_api.check_user_permission_internal(
session, deleter, permission.PERMISSION_DEL_MACHINE)
machine = utils.get_db_object(session, models.Switch, id=machine_id)
utils.del_db_object(session, machine)
return machine.to_dict()

245
compass/db/api/metadata.py Normal file
View File

@ -0,0 +1,245 @@
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metadata related database operations."""
import logging
from compass.db.api import database
from compass.db.api import utils
from compass.db import exception
from compass.db import models
from compass.db import validator
from compass.utils import setting_wrapper as setting
from compass.utils import util
def _add_field_internal(session, model, configs):
fields = []
for config in configs:
fields.append(utils.add_db_object(
session, model, True,
config['NAME'],
field_type=config.get('FIELD_TYPE', basestring),
display_type=config.get('DISPLAY_TYPE', 'text'),
validator=config.get('VALIDATOR', None),
js_validator=config.get('JS_VALIDATOR', None),
description=config.get('DESCRIPTION', None)
))
return fields
def add_os_field_internal(session):
configs = util.load_configs(
setting.OS_FIELD_DIR,
env_locals=validator.VALIDATOR_LOCALS
)
with session.begin(subtransactions=True):
return _add_field_internal(
session, models.OSConfigField, configs
)
def add_package_field_internal(session):
configs = util.load_configs(
setting.PACKAGE_FIELD_DIR,
env_locals=validator.VALIDATOR_LOCALS
)
with session.begin(subtransactions=True):
return _add_field_internal(
session, models.PackageConfigField, configs
)
def _add_metadata(
session, field_model, metadata_model, name, config,
parent=None, adapter=None
):
metadata = config.get('_self', {})
print 'add metadata %s to adapter %s' % (metadata, adapter)
if 'field' in metadata:
field = utils.get_db_object(
session, field_model, field=metadata['field']
)
else:
field = None
object = utils.add_db_object(
session, metadata_model, True,
name, adapter=adapter, parent=parent, field=field,
display_name=metadata.get('display_name', name),
description=metadata.get('description', None),
is_required=metadata.get('is_required', False),
required_in_whole_config=metadata.get(
'required_in_whole_config', False
),
mapping_to=metadata.get('mapping_to', None),
validator=metadata.get('validator', None),
js_validator=metadata.get('js_validator', None),
default_value=metadata.get('default_value', None),
options=metadata.get('options', []),
required_in_options=metadata.get('required_in_options', False)
)
for key, value in config.items():
if key not in '_self':
_add_metadata(
session, field_model, metadata_model, key, value,
parent=object, adapter=adapter,
)
return object
def add_os_metadata_internal(session):
os_metadatas = []
configs = util.load_configs(
setting.OS_METADATA_DIR,
env_locals=validator.VALIDATOR_LOCALS
)
with session.begin(subtransactions=True):
for config in configs:
adapter = utils.get_db_object(
session, models.OSAdapter, name=config['ADAPTER']
)
for key, value in config['METADATA'].items():
os_metadatas.append(_add_metadata(
session, models.OSConfigField,
models.OSConfigMetadata,
key, value, parent=None,
adapter=adapter
))
return os_metadatas
def add_package_metadata_internal(session):
package_metadatas = []
configs = util.load_configs(
setting.PACKAGE_METADATA_DIR,
env_locals=validator.VALIDATOR_LOCALS
)
with session.begin(subtransactions=True):
for config in configs:
adapter = utils.get_db_object(
session, models.PackageAdapter, name=config['ADAPTER']
)
for key, value in config['METADATA'].items():
package_metadatas.append(_add_metadata(
session, models.PackageConfigField,
models.PackageConfigMetadata,
key, value, parent=None,
adapter=adapter
))
return package_metadatas
def get_metadatas_internal(session):
metadata_mapping = {}
with session.begin(subtransactions=True):
adapters = utils.list_db_objects(
session, models.Adapter
)
for adapter in adapters:
metadata_dict = adapter.metadata_dict()
metadata_mapping[adapter.id] = metadata_dict
return metadata_mapping
def _validate_self(
config_path, config_key, config, metadata, whole_check
):
if '_self' not in metadata:
return
field_type = metadata['_self'].get('field_type', 'basestring')
if not isinstance(config, field_type):
raise exception.InvalidParameter(
'%s config type is not %s' % (config_path, field_type)
)
required_in_options = metadata['_self'].get(
'required_in_options', False
)
options = metadata['_self'].get('options', [])
if required_in_options:
if field_type in [int, basestring, float, bool]:
if config not in options:
raise exception.InvalidParameter(
'%s config is not in %s' % (config_path, options)
)
elif field_type in [list, tuple]:
if not set(config).issubset(set(options)):
raise exception.InvalidParameter(
'%s config is not in %s' % (config_path, options)
)
elif field_type == dict:
if not set(config.keys()).issubset(set(options)):
raise exception.InvalidParameter(
'%s config is not in %s' % (config_path, options)
)
validator = metadata['_self'].get('validator', None)
if validator:
if not validator(config_key, config):
raise exception.InvalidParameter(
'%s config is invalid' % config_path
)
if issubclass(field_type, dict):
_validate_config(config_path, config, metadata, whole_check)
def _validate_config(config_path, config, metadata, whole_check):
generals = {}
specified = {}
for key, value in metadata.items():
if key.startswith('$'):
generals[key] = value
elif key.startswith('_'):
pass
else:
specified[key] = value
config_keys = set(config.keys())
specified_keys = set(specified.keys())
intersect_keys = config_keys & specified_keys
not_found_keys = config_keys - specified_keys
redundant_keys = specified_keys - config_keys
for key in redundant_keys:
if '_self' not in specified[key]:
continue
if specified[key]['_self'].get('is_required', False):
raise exception.InvalidParameter(
'%s/%s does not find is_required' % (
config_path, key
)
)
if (
whole_check and
specified[key]['_self'].get(
'required_in_whole_config', False
)
):
raise exception.InvalidParameter(
'%s/%s does not find required_in_whole_config' % (
config_path, key
)
)
for key in intersect_keys:
_validate_self(
'%s/%s' % (config_path, key),
key, config[key], specified[key], whole_check
)
for key in not_found_keys:
for general_key, general_value in generals.items():
_validate_self(
'%s/%s' % (config_path, key),
key, config[key], general_value, whole_check
)
def validate_config_internal(config, metadata, whole_check):
_validate_config('', config, metadata, whole_check)

View File

@ -0,0 +1,96 @@
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metadata related object holder."""
import logging
from compass.db.api import database
from compass.db.api import metadata as metadata_api
from compass.db.api import permission
from compass.db.api import user as user_api
from compass.db.api import utils
from compass.db import exception
def load_metadatas():
with database.session() as session:
return metadata_api.get_metadatas_internal(session)
METADATA_MAPPING = load_metadatas()
def _validate_config(
config, adapter_id,
metadata_mapping, metadata_field, whole_check
):
if adapter_id not in metadata_mapping:
raise exception.InvalidParameter(
'adapter id %s is not found in metadata mapping' % adapter_id
)
metadatas = metadata_mapping[adapter_id]
if metadata_field not in metadatas:
return
metadata_api.validate_config_internal(
config, metadatas[metadata_field], whole_check
)
def validate_os_config(config, adapter_id, whole_check=False):
_validate_config(
config, adapter_id, METADATA_MAPPING, 'os_config',
whole_check
)
def validate_package_config(config, adapter_id, whole_check=False):
_validate_config(
config, adapter_id, METADATA_MAPPING,
'package_config', whole_check
)
def _filter_metadata(metadata):
if not isinstance(metadata, dict):
return metadata
filtered_metadata = {}
for key, value in metadata.items():
if key == '_self':
filtered_metadata[key] = {
'name': value['name'],
'description': value.get('description', None),
'is_required': value['is_required'],
'required_in_whole_config': value['required_in_whole_config'],
'js_validator': value.get('js_validator', None),
'options': value.get('options', []),
'required_in_options': value['required_in_options'],
'field_type': value['field_type_data'],
'display_type': value.get('display_type', None),
}
else:
filtered_metadata[key] = _filter_metadata(value)
return filtered_metadata
@utils.supported_filters([])
def get_metadata(getter, adapter_id, **kwargs):
"""get adapter."""
with database.session() as session:
user_api.check_user_permission_internal(
session, getter, permission.PERMISSION_LIST_METADATAS)
if adapter_id not in METADATA_MAPPING:
raise exception.RecordNotExists(
'adpater %s does not exist' % adapter_id
)
return _filter_metadata(METADATA_MAPPING[adapter_id])

111
compass/db/api/network.py Normal file
View File

@ -0,0 +1,111 @@
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Network related database operations."""
import logging
import netaddr
from compass.db.api import database
from compass.db.api import permission
from compass.db.api import user as user_api
from compass.db.api import utils
from compass.db import exception
from compass.db import models
SUPPORTED_FIELDS = ['subnet']
RESP_FIELDS = ['id', 'subnet', 'created_at', 'updated_at']
ADDED_FIELDS = ['subnet']
UPDATED_FIELDS = ['subnet']
def _check_subnet(subnet):
try:
netaddr.IPNetwork(subnet)
except Exception as error:
logging.exception(error)
raise exception.InvalidParameter(
'subnet %s format unrecognized' % subnet)
@utils.wrap_to_dict(RESP_FIELDS)
@utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS)
def list_subnets(lister, **filters):
"""List subnets."""
with database.session() as session:
user_api.check_user_permission_internal(
session, lister, permission.PERMISSION_LIST_NETWORKS)
return [
network.to_dict()
for network in utils.list_db_objects(
session, models.Network, **filters
)
]
@utils.wrap_to_dict(RESP_FIELDS)
@utils.supported_filters([])
def get_subnet(getter, subnet_id, **kwargs):
"""Get subnet info."""
with database.session() as session:
user_api.check_user_permission_internal(
session, getter, permission.PERMISSION_LIST_NETWORKS)
return utils.get_db_object(
session, models.Network, id=subnet_id
).to_dict()
@utils.wrap_to_dict(RESP_FIELDS)
@utils.input_validates(subnet=_check_subnet)
@utils.supported_filters(ADDED_FIELDS)
def add_subnet(creator, subnet, **kwargs):
"""Create a subnet."""
with database.session() as session:
user_api.check_user_permission_internal(
session, creator, permission.PERMISSION_ADD_NETWORK)
network = utils.add_db_object(
session, models.Network, True, subnet
)
network_dict = network.to_dict()
print 'network: %s' % network_dict
return network_dict
@utils.wrap_to_dict(RESP_FIELDS)
@utils.input_validates(subnet=_check_subnet)
@utils.supported_filters(UPDATED_FIELDS)
def update_subnet(updater, subnet_id, **kwargs):
"""Update a subnet."""
with database.session() as session:
user_api.check_user_permission_internal(
session, updater, permission.PERMISSION_ADD_NETWORK)
network = utils.get_db_object(
session, models.Network, id=subnet_id
)
utils.update_db_object(session, network, **kwargs)
return network.to_dict()
@utils.wrap_to_dict(RESP_FIELDS)
@utils.supported_filters([])
def del_subnet(deleter, subnet_id, **kwargs):
"""Delete a subnet."""
with database.session() as session:
user_api.check_user_permission_internal(
session, deleter, permission.PERMISSION_DEL_NETWORK)
network = utils.get_db_object(
session, models.Network, id=subnet_id
)
utils.del_db_object(session, network)
return network.to_dict()

View File

@ -0,0 +1,294 @@
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Permission database operations."""
from compass.db.api import database
from compass.db.api import utils
from compass.db import exception
from compass.db import models
SUPPORTED_FIELDS = ['name', 'alias', 'description']
RESP_FIELDS = ['id', 'name', 'alias', 'description']
class PermissionWrapper(object):
def __init__(self, name, alias, description):
self.name = name
self.alias = alias
self.description = description
def to_dict(self):
return {
'name': self.name,
'alias': self.alias,
'description': self.description
}
PERMISSION_LIST_PERMISSIONS = PermissionWrapper(
'list_permissions', 'list permissions', 'list all permissions'
)
PERMISSION_LIST_SWITCHES = PermissionWrapper(
'list_switches', 'list switches', 'list all switches'
)
PERMISSION_ADD_SWITCH = PermissionWrapper(
'add_switch', 'add switch', 'add switch'
)
PERMISSION_DEL_SWITCH = PermissionWrapper(
'delete_switch', 'delete switch', 'delete switch'
)
PERMISSION_LIST_SWITCH_MACHINES = PermissionWrapper(
'list_switch_machines', 'list switch machines', 'list switch machines'
)
PERMISSION_ADD_SWITCH_MACHINE = PermissionWrapper(
'add_switch_machine', 'add switch machine', 'add switch machine'
)
PERMISSION_DEL_SWITCH_MACHINE = PermissionWrapper(
'del_switch_machine', 'delete switch machine', 'del switch machine'
)
PERMISSION_UPDATE_SWITCH_MACHINES = PermissionWrapper(
'update_switch_machines',
'update switch machines',
'update switch machines'
)
PERMISSION_LIST_MACHINES = PermissionWrapper(
'list_machines', 'list machines', 'list machines'
)
PERMISSION_ADD_MACHINE = PermissionWrapper(
'add_machine', 'add machine', 'add machine'
)
PERMISSION_DEL_MACHINE = PermissionWrapper(
'delete_machine', 'delete machine', 'delete machine'
)
PERMISSION_LIST_ADAPTERS = PermissionWrapper(
'list_adapters', 'list adapters', 'list adapters'
)
PERMISSION_LIST_METADATAS = PermissionWrapper(
'list_metadatas', 'list metadatas', 'list metadatas'
)
PERMISSION_LIST_NETWORKS = PermissionWrapper(
'list_networks', 'list networks', 'list networks'
)
PERMISSION_ADD_NETWORK = PermissionWrapper(
'add_network', 'add network', 'add network'
)
PERMISSION_DEL_NETWORK = PermissionWrapper(
'del_network', 'del network', 'del network'
)
PERMISSION_LIST_CLUSTERS = PermissionWrapper(
'list_clusters', 'list clusters', 'list clusters'
)
PERMISSION_ADD_CLUSTER = PermissionWrapper(
'add_cluster', 'add cluster', 'add cluster'
)
PERMISSION_DEL_CLUSTER = PermissionWrapper(
'del_cluster', 'del cluster', 'del cluster'
)
PERMISSION_LIST_CLUSTER_CONFIG = PermissionWrapper(
'list_cluster_config', 'list cluster config', 'list cluster config'
)
PERMISSION_ADD_CLUSTER_CONFIG = PermissionWrapper(
'add_cluster_config', 'add cluster config', 'add cluster config'
)
PERMISSION_DEL_CLUSTER_CONFIG = PermissionWrapper(
'del_cluster_config', 'del cluster config', 'del cluster config'
)
PERMISSION_UPDATE_CLUSTER_HOSTS = PermissionWrapper(
'update_cluster_hosts',
'update cluster hosts',
'update cluster hosts'
)
PERMISSION_DEL_CLUSTER_HOST = PermissionWrapper(
'del_clusterhost', 'delete clusterhost', 'delete clusterhost'
)
PERMISSION_REVIEW_CLUSTER = PermissionWrapper(
'review_cluster', 'review cluster', 'review cluster'
)
PERMISSION_DEPLOY_CLUSTER = PermissionWrapper(
'deploy_cluster', 'deploy cluster', 'deploy cluster'
)
PERMISSION_GET_CLUSTER_STATE = PermissionWrapper(
'get_cluster_state', 'get cluster state', 'get cluster state'
)
PERMISSION_LIST_HOSTS = PermissionWrapper(
'list_hosts', 'list hosts', 'list hosts'
)
PERMISSION_LIST_HOST_CLUSTERS = PermissionWrapper(
'list_host_clusters',
'list host clusters',
'list host clusters'
)
PERMISSION_UPDATE_HOST = PermissionWrapper(
'update_host', 'update host', 'update host'
)
PERMISSION_DEL_HOST = PermissionWrapper(
'del_host', 'del host', 'del host'
)
PERMISSION_LIST_HOST_CONFIG = PermissionWrapper(
'list_host_config', 'list host config', 'list host config'
)
PERMISSION_ADD_HOST_CONFIG = PermissionWrapper(
'add_host_config', 'add host config', 'add host config'
)
PERMISSION_DEL_HOST_CONFIG = PermissionWrapper(
'del_host_config', 'del host config', 'del host config'
)
PERMISSION_LIST_HOST_NETWORKS = PermissionWrapper(
'list_host_networks',
'list host networks',
'list host networks'
)
PERMISSION_ADD_HOST_NETWORK = PermissionWrapper(
'add_host_network', 'add host network', 'add host network'
)
PERMISSION_DEL_HOST_NETWORK = PermissionWrapper(
'del_host_network', 'del host network', 'del host network'
)
PERMISSION_GET_HOST_STATE = PermissionWrapper(
'get_host_state', 'get host state', 'get host state'
)
PERMISSION_UPDATE_HOST_STATE = PermissionWrapper(
'update_host_state', 'update host sate', 'update host state'
)
PERMISSION_LIST_CLUSTERHOSTS = PermissionWrapper(
'list_clusterhosts', 'list cluster hosts', 'list cluster hosts'
)
PERMISSION_LIST_CLUSTERHOST_CONFIG = PermissionWrapper(
'list_clusterhost_config',
'list clusterhost config',
'list clusterhost config'
)
PERMISSION_ADD_CLUSTERHOST_CONFIG = PermissionWrapper(
'add_clusterhost_config',
'add clusterhost config',
'add clusterhost config'
)
PERMISSION_DEL_CLUSTERHOST_CONFIG = PermissionWrapper(
'del_clusterhost_config',
'del clusterhost config',
'del clusterhost config'
)
PERMISSION_GET_CLUSTERHOST_STATE = PermissionWrapper(
'get_clusterhost_state',
'get clusterhost state',
'get clusterhost state'
)
PERMISSION_UPDATE_CLUSTERHOST_STATE = PermissionWrapper(
'update_clusterhost_state',
'update clusterhost state',
'update clusterhost state'
)
PERMISSIONS = [
PERMISSION_LIST_PERMISSIONS,
PERMISSION_LIST_SWITCHES,
PERMISSION_ADD_SWITCH,
PERMISSION_DEL_SWITCH,
PERMISSION_LIST_SWITCH_MACHINES,
PERMISSION_ADD_SWITCH_MACHINE,
PERMISSION_DEL_SWITCH_MACHINE,
PERMISSION_UPDATE_SWITCH_MACHINES,
PERMISSION_LIST_MACHINES,
PERMISSION_ADD_MACHINE,
PERMISSION_DEL_MACHINE,
PERMISSION_LIST_ADAPTERS,
PERMISSION_LIST_METADATAS,
PERMISSION_LIST_NETWORKS,
PERMISSION_ADD_NETWORK,
PERMISSION_DEL_NETWORK,
PERMISSION_LIST_CLUSTERS,
PERMISSION_ADD_CLUSTER,
PERMISSION_DEL_CLUSTER,
PERMISSION_LIST_CLUSTER_CONFIG,
PERMISSION_ADD_CLUSTER_CONFIG,
PERMISSION_DEL_CLUSTER_CONFIG,
PERMISSION_UPDATE_CLUSTER_HOSTS,
PERMISSION_DEL_CLUSTER_HOST,
PERMISSION_REVIEW_CLUSTER,
PERMISSION_DEPLOY_CLUSTER,
PERMISSION_GET_CLUSTER_STATE,
PERMISSION_LIST_HOSTS,
PERMISSION_LIST_HOST_CLUSTERS,
PERMISSION_UPDATE_HOST,
PERMISSION_DEL_HOST,
PERMISSION_LIST_HOST_CONFIG,
PERMISSION_ADD_HOST_CONFIG,
PERMISSION_DEL_HOST_CONFIG,
PERMISSION_LIST_HOST_NETWORKS,
PERMISSION_ADD_HOST_NETWORK,
PERMISSION_DEL_HOST_NETWORK,
PERMISSION_GET_HOST_STATE,
PERMISSION_UPDATE_HOST_STATE,
PERMISSION_LIST_CLUSTERHOSTS,
PERMISSION_LIST_CLUSTERHOST_CONFIG,
PERMISSION_ADD_CLUSTERHOST_CONFIG,
PERMISSION_DEL_CLUSTERHOST_CONFIG,
PERMISSION_GET_CLUSTERHOST_STATE,
PERMISSION_UPDATE_CLUSTERHOST_STATE,
]
def list_permissions_internal(session, **filters):
"""internal functions used only by other db.api modules."""
return utils.list_db_objects(session, models.Permission, **filters)
@utils.wrap_to_dict(RESP_FIELDS)
@utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS)
def list_permissions(lister, **filters):
"""list permissions."""
from compass.db.api import user as user_api
with database.session() as session:
user_api.check_user_permission_internal(
session, lister, PERMISSION_LIST_PERMISSIONS
)
return [
permission.to_dict()
for permission in utils.list_db_objects(
session, models.Permission, **filters
)
]
@utils.wrap_to_dict(RESP_FIELDS)
@utils.supported_filters()
def get_permission(getter, permission_id, **kwargs):
"""get permissions."""
from compass.db.api import user as user_api
with database.session() as session:
user_api.check_user_permission_internal(
session, getter, PERMISSION_LIST_PERMISSIONS
)
permission = utils.get_db_object(
session, models.Permission, id=permission_id
)
return permission.to_dict()
def add_permissions_internal(session):
"""internal functions used by other db.api modules only."""
permissions = []
with session.begin(subtransactions=True):
for permission in PERMISSIONS:
permissions.append(
utils.add_db_object(
session, models.Permission,
True,
permission.name,
alias=permission.alias,
description=permission.description
)
)
return permissions

771
compass/db/api/switch.py Normal file
View File

@ -0,0 +1,771 @@
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Switch database operations."""
import logging
import netaddr
import re
from compass.db.api import database
from compass.db.api import permission
from compass.db.api import user as user_api
from compass.db.api import utils
from compass.db import exception
from compass.db import models
from compass.utils import setting_wrapper as setting
SUPPORTED_FIELDS = ['ip_int', 'vendor', 'state']
SUPPORTED_FILTER_FIELDS = ['ip_int', 'vendor', 'state']
SUPPORTED_SWITCH_MACHINES_FIELDS = ['ip_int', 'port', 'vlans', 'mac', 'tag']
SUPPORTED_MACHINES_FIELDS = ['port', 'vlans', 'mac', 'tag']
ADDED_FIELDS = ['ip']
OPTIONAL_ADDED_FIELDS = ['credentials', 'vendor', 'state', 'err_msg']
UPDATED_FIELDS = ['credentials', 'vendor', 'state', 'err_msg']
PATCHED_FIELDS = ['patched_credentials']
UPDATED_FILTERS_FIELDS = ['filters']
PATCHED_FILTERS_FIELDS = ['patched_filters']
ADDED_MACHINES_FIELDS = ['mac', 'port']
OPTIONAL_ADDED_MACHINES_FIELDS = [
'vlans', 'ipmi_credentials', 'tag', 'location'
]
CHECK_FILTER_FIELDS = ['filter_name', 'filter_type']
OPTIONAL_CHECK_FILTER_FIELDS = [
'ports', 'port_prefix', 'port_suffix',
'port_start', 'port_end'
]
ALL_ADDED_MACHINES_FIELDS = ['port', 'vlans']
UPDATED_MACHINES_FIELDS = [
'port', 'vlans', 'ipmi_credentials',
'tag', 'location'
]
UPDATED_SWITCH_MACHINES_FIELDS = ['port', 'vlans']
PATCHED_MACHINES_FIELDS = [
'patched_vlans', 'patched_ipmi_credentials',
'patched_tag', 'patched_location'
]
PATCHED_SWITCH_MACHINES_FIELDS = ['patched_vlans']
RESP_FIELDS = [
'id', 'ip', 'credentials', 'vendor', 'state', 'err_msg',
'created_at', 'updated_at'
]
RESP_FILTERS_FIELDS = [
'id', 'ip', 'filters', 'created_at', 'updated_at'
]
RESP_ACTION_FIELDS = [
'status', 'details'
]
RESP_MACHINES_FIELDS = [
'id', 'switch_id', 'machine_id', 'port', 'vlans', 'mac',
'ipmi_credentials', 'tag', 'location',
'created_at', 'updated_at'
]
def _check_credentials_version(version):
if version not in ['1', '2c', '3']:
raise exception.InvalidParameter(
'unknown snmp version %s' % version
)
def _check_credentials(credentials):
if not credentials:
return
if not isinstance(credentials, dict):
raise exception.InvalidParameter(
'credentials %s is not dict' % credentials
)
for key in credentials:
if key not in ['version', 'community']:
raise exception.InvalidParameter(
'unrecognized key %s in credentials %s' % (key, credentials)
)
for key in ['version', 'community']:
if key not in credentials:
raise exception.InvalidParameter(
'there is no %s field in credentials %s' % (key, credentials)
)
key_check_func_name = '_check_credentials_%s' % key
this_module = globals()
if key_check_func_name in this_module:
this_module[key_check_func_name](
credentials[key]
)
else:
logging.debug(
'function %s is not defined in %s',
key_check_func_name, this_module
)
def _check_filter(switch_filter):
if not isinstance(switch_filter, dict):
raise exception.InvalidParameter(
'filter %s is not dict' % switch_filter
)
_check_filter_internal(**switch_filter)
@utils.supported_filters(
CHECK_FILTER_FIELDS, optional_support_keys=OPTIONAL_CHECK_FILTER_FIELDS
)
def _check_filter_internal(
filter_name, filter_type, **switch_filter
):
if filter_type not in ['allow', 'deny']:
raise exception.InvalidParameter(
'filter_type should be `allow` or `deny` in %s' % switch_filter
)
if 'ports' in switch_filter:
if not isinstance(switch_filter['ports'], list):
raise exception.InvalidParameter(
'`ports` is not list in filter %s' % switch_filter
)
for key in ['port_start', 'port_end']:
if key in switch_filter:
if not isinstance(switch_filter[key], int):
raise exception.InvalidParameter(
'`key` is not int in filer %s' % switch_filter
)
def _check_vlan(vlan):
if not isinstance(vlan, int):
raise exception.InvalidParameter(
'vlan %s is not int' % vlan
)
def add_switch_internal(
session, ip_int, exception_when_existing=True, **kwargs
):
with session.begin(subtransactions=True):
return utils.add_db_object(
session, models.Switch, exception_when_existing, ip_int,
filters=setting.SWITCHES_DEFAULT_FILTERS, **kwargs
)
def get_switch_internal(
session, exception_when_missing=True, **kwargs
):
"""Get switch."""
with session.begin(subtransactions=True):
return utils.get_db_object(
session, models.Switch, exception_when_missing,
**kwargs
)
@utils.wrap_to_dict(RESP_FIELDS)
@utils.supported_filters([])
def get_switch(getter, switch_id, **kwargs):
"""get field dict of a switch."""
with database.session() as session:
user_api.check_user_permission_internal(
session, getter, permission.PERMISSION_LIST_SWITCHES)
return utils.get_db_object(
session, models.Switch, id=switch_id
).to_dict()
@utils.wrap_to_dict(RESP_FIELDS)
@utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS)
def list_switches(lister, **filters):
"""List switches."""
with database.session() as session:
user_api.check_user_permission_internal(
session, lister, permission.PERMISSION_LIST_SWITCHES)
return [
switch.to_dict()
for switch in utils.list_db_objects(
session, models.Switch, **filters
)
]
@utils.wrap_to_dict(RESP_FIELDS)
@utils.supported_filters([])
def del_switch(deleter, switch_id, **kwargs):
"""Delete a switch."""
with database.session() as session:
user_api.check_user_permission_internal(
session, deleter, permission.PERMISSION_DEL_SWITCH)
switch = utils.get_db_object(session, models.Switch, id=switch_id)
utils.del_db_object(session, switch)
return switch.to_dict()
@utils.wrap_to_dict(RESP_FIELDS)
@utils.input_validates(
ip=utils.check_ip,
credentials=_check_credentials
)
@utils.supported_filters(
ADDED_FIELDS,
optional_support_keys=OPTIONAL_ADDED_FIELDS
)
def add_switch(creator, ip, **kwargs):
"""Create a switch."""
ip_int = long(netaddr.IPAddress(ip))
with database.session() as session:
user_api.check_user_permission_internal(
session, creator, permission.PERMISSION_ADD_SWITCH)
return add_switch_internal(
session, ip_int, **kwargs
).to_dict()
def update_switch_internal(session, switch, **kwargs):
"""update switch."""
with session.begin(subtransactions=True):
return utils.update_db_object(
session, switch,
**kwargs
)
def _update_switch(updater, switch_id, **kwargs):
"""Update a switch."""
with database.session() as session:
user_api.check_user_permission_internal(
session, updater, permission.PERMISSION_ADD_SWITCH)
switch = utils.get_db_object(
session, models.Switch, id=switch_id
)
utils.update_db_object(session, switch, **kwargs)
switch_dict = switch.to_dict()
utils.validate_outputs(
{'credentials': _check_credentials},
switch_dict
)
return switch_dict
@utils.wrap_to_dict(RESP_FIELDS)
@utils.input_validates(credentials=_check_credentials)
@utils.supported_filters(optional_support_keys=UPDATED_FIELDS)
def update_switch(updater, switch_id, **kwargs):
_update_switch(updater, switch_id, **kwargs)
@utils.wrap_to_dict(RESP_FIELDS)
@utils.supported_filters(optional_support_keys=PATCHED_FIELDS)
def patch_switch(updater, switch_id, **kwargs):
_update_switch(updater, switch_id, **kwargs)
@utils.wrap_to_dict(RESP_FILTERS_FIELDS)
@utils.supported_filters(optional_support_keys=SUPPORTED_FILTER_FIELDS)
def list_switch_filters(lister, **filters):
"""list switch filters."""
with database.session() as session:
user_api.check_user_permission_internal(
session, lister, permission.PERMISSION_LIST_SWITCHES
)
return [
switch.to_dict()
for switch in utils.list_db_objects(
session, models.Switch, **filters
)
]
@utils.wrap_to_dict(RESP_FILTERS_FIELDS)
@utils.supported_filters()
def get_switch_filters(getter, switch_id, **kwargs):
"""get switch filter."""
with database.session() as session:
user_api.check_user_permission_internal(
session, getter, permission.PERMISSION_LIST_SWITCHES)
return utils.get_db_object(
session, models.Switch, id=switch_id
).to_dict()
@utils.wrap_to_dict(RESP_FILTERS_FIELDS)
@utils.input_validates(filters=_check_filter)
@utils.supported_filters(optional_support_keys=UPDATED_FILTERS_FIELDS)
def update_switch_filters(updater, switch_id, **kwargs):
"""Update a switch filter."""
with database.session() as session:
user_api.check_user_permission_internal(
session, updater, permission.PERMISSION_ADD_SWITCH)
switch = utils.get_db_object(session, models.Switch, id=switch_id)
utils.update_db_object(session, switch, **kwargs)
return switch.to_dict()
@utils.wrap_to_dict(RESP_FILTERS_FIELDS)
@utils.input_validates(patched_filters=_check_filter)
@utils.supported_filters(optional_support_keys=PATCHED_FILTERS_FIELDS)
def patch_switch_filter(updater, switch_id, **kwargs):
"""Update a switch."""
with database.session() as session:
user_api.check_user_permission_internal(
session, updater, permission.PERMISSION_ADD_SWITCH)
switch = utils.get_db_object(session, models.Switch, id=switch_id)
utils.update_db_object(session, switch, **kwargs)
return switch.to_dict()
def filter_machine_internal(filters, port):
for port_filter in filters:
logging.debug('apply filter %s on port %s', port_filter, port)
filter_allowed = port_filter['filter_type'] == 'allow'
if 'ports' in port_filter:
if port in port_filter['ports']:
logging.debug('port is allowed? %s', filter_allowed)
return filter_allowed
else:
logging.debug('port is allowed? %s', not filter_allowed)
return not filter_allowed
port_prefix = port_filter.get('port_prefix', '')
port_suffix = port_filter.get('port_suffix', '')
pattern = re.compile(r'%s(\d+)%s' % (port_prefix, port_suffix))
match = pattern.match(port)
if match:
logging.debug(
'port %s matches pattern %s',
port, pattern.pattern
)
port_number = match.group(1)
if (
'port_start' not in port_filter or
port_number >= port_filter['port_start']
) and (
'port_end' not in port_filter or
port_number <= port_filter['port_end']
):
logging.debug('port is allowed? %s', filter_allowed)
return filter_allowed
else:
logging.debug(
'port %s does not match pattern %s',
port, pattern.pattern
)
return True
def get_switch_machines_internal(session, **filters):
with session.begin(subtransactions=True):
return utils.list_db_objects(
session, models.SwitchMachine, **filters
)
def _filter_port(port_filter, obj):
port_prefix = port_filter.get('startswith', '')
port_suffix = port_filter.get('endswith', '')
pattern = re.compile(r'%s(\d+)%s' % (port_prefix, port_suffix))
match = pattern.match(obj)
if not match:
return False
port_number = int(match.group(1))
if (
'resp_lt' in port_filter and
port_number >= port_filter['resp_lt']
):
return False
if (
'resp_le' in port_filter and
port_number > port_filter['resp_le']
):
return False
if (
'resp_gt' in port_filter and
port_number <= port_filter['resp_gt']
):
return False
if (
'resp_ge' in port_filter and
port_number < port_filter['resp_ge']
):
return False
if 'resp_range' in port_filter:
in_range = False
for port_start, port_end in port_filter['resp_range']:
if port_start <= port_number <= port_end:
in_range = True
break
if not in_range:
return False
return True
def _filter_vlans(vlan_filter, obj):
vlans = set(obj)
if 'resp_in' in vlan_filter:
resp_vlans = set(vlan_filter['resp_in'])
if not (vlans & resp_vlans):
return False
return True
@utils.output_filters(port=_filter_port, vlans=_filter_vlans)
@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
@utils.supported_filters(optional_support_keys=SUPPORTED_MACHINES_FIELDS)
def list_switch_machines(getter, switch_id, **filters):
"""Get switch machines."""
with database.session() as session:
user_api.check_user_permission_internal(
session, getter, permission.PERMISSION_LIST_SWITCH_MACHINES)
switch_machines = get_switch_machines_internal(
session, switch_id=switch_id, **filters
)
return [
switch_machine.to_dict() for switch_machine in switch_machines
if filter_machine_internal(
switch_machine.switch.filters,
switch_machine.port
)
]
@utils.output_filters(port=_filter_port, vlans=_filter_vlans)
@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
@utils.supported_filters(
optional_support_keys=SUPPORTED_SWITCH_MACHINES_FIELDS
)
def list_switchmachines(lister, **filters):
"""List switch machines."""
with database.session() as session:
user_api.check_user_permission_internal(
session, lister, permission.PERMISSION_LIST_SWITCH_MACHINES)
switch_machines = [
switch_machine
for switch_machine in get_switch_machines_internal(
session, **filters
)
if filter_machine_internal(
switch_machine.switch.filters, switch_machine.port
)
]
return [
switch_machine.to_dict()
for switch_machine in switch_machines
]
def add_switch_machines_internal(
session, switch, machine_dicts,
exception_when_switch_machine_existing=True
):
with session.begin(subtransactions=True):
machine_id_switch_machine_dict = {}
for mac, all_dict in machine_dicts.items():
switch_machine_dict = {}
machine_dict = {}
for key, value in all_dict.items():
if key in ALL_ADDED_MACHINES_FIELDS:
switch_machine_dict[key] = value
else:
machine_dict[key] = value
#TODO(xiaodong): add ipmi field checks'
machine = utils.add_db_object(
session, models.Machine, False,
mac, **machine_dict)
machine_id_switch_machine_dict[machine.id] = switch_machine_dict
switches = [switch]
if switch.ip != setting.DEFAULT_SWITCH_IP:
switches.append(utils.get_db_object(
session, models.Switch,
ip_int=long(netaddr.IPAddress(setting.DEFAULT_SWITCH_IP))
))
switch_machines = []
for machine_switch in switches:
for machine_id, switch_machine_dict in (
machine_id_switch_machine_dict.items()
):
utils.add_db_object(
session, models.SwitchMachine,
exception_when_switch_machine_existing,
machine_switch.id, machine_id, **switch_machine_dict
)
switch_machines.extend(machine_switch.switch_machines)
return switch_machines
@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
@utils.input_validates(mac=utils.check_mac, vlans=_check_vlan)
@utils.supported_filters(
ADDED_MACHINES_FIELDS,
optional_support_keys=OPTIONAL_ADDED_MACHINES_FIELDS
)
def add_switch_machine(creator, switch_id, mac, port, **kwargs):
"""Add switch machine."""
with database.session() as session:
user_api.check_user_permission_internal(
session, creator, permission.PERMISSION_ADD_SWITCH_MACHINE)
switch = utils.get_db_object(
session, models.Switch, id=switch_id)
kwargs['port'] = port
switch_machines = add_switch_machines_internal(
session, switch, {mac: kwargs})
return switch_machines[0].to_dict()
@utils.wrap_to_dict(RESP_ACTION_FIELDS)
@utils.supported_filters()
def poll_switch_machines(poller, switch_id, **kwargs):
"""poll switch machines."""
from compass.tasks import client as celery_client
with database.session() as session:
user_api.check_user_permission_internal(
session, poller, permission.PERMISSION_UPDATE_SWITCH_MACHINES)
switch = utils.get_db_object(session, models.Switch, id=switch_id)
celery_client.celery.send_task(
'compass.tasks.pollswitch',
(switch.ip, switch.credentials)
)
return {
'status': 'find_machines action sent',
'details': {
}
}
@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
@utils.supported_filters([])
def get_switch_machine(getter, switch_id, machine_id, **kwargs):
"""get field dict of a switch machine."""
with database.session() as session:
user_api.check_user_permission_internal(
session, getter, permission.PERMISSION_LIST_SWITCH_MACHINES)
return utils.get_db_object(
session, models.SwitchMachine,
switch_id=switch_id, machine_id=machine_id
).to_dict()
@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
@utils.supported_filters([])
def get_switchmachine(getter, switch_machine_id, **kwargs):
"""get field dict of a switch machine."""
with database.session() as session:
user_api.check_user_permission_internal(
session, getter, permission.PERMISSION_LIST_SWITCH_MACHINES)
return utils.get_db_object(
session, models.SwitchMachine, id=switch_machine_id
).to_dict()
def update_switch_machine_internal(
session, switch_machine, switch_machines_fields, **kwargs
):
"""Update switch machine internal."""
switch_machine_dict = {}
machine_dict = {}
for key, value in kwargs.items():
if key in switch_machines_fields:
switch_machine_dict[key] = value
else:
machine_dict[key] = value
with session.begin(subtransactions=True):
utils.update_db_object(
session, switch_machine, **switch_machine_dict
)
if machine_dict:
utils.update_db_object(
session, switch_machine.machine, **machine_dict
)
@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
@utils.input_validates(vlans=_check_vlan)
@utils.supported_filters(optional_support_keys=UPDATED_MACHINES_FIELDS)
def update_switch_machine(updater, switch_id, machine_id, **kwargs):
"""Update switch machine."""
with database.session() as session:
user_api.check_user_permission_internal(
session, updater, permission.PERMISSION_ADD_SWITCH_MACHINE)
switch_machine = utils.get_db_object(
session, models.SwitchMachine,
switch_id=switch_id, machine_id=machine_id
)
update_switch_machine_internal(
session, switch_machine,
UPDATED_SWITCH_MACHINES_FIELDS, **kwargs
)
return switch_machine.to_dict()
@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
@utils.input_validates(vlans=_check_vlan)
@utils.supported_filters(optional_support_keys=UPDATED_MACHINES_FIELDS)
def update_switchmachine(updater, switch_machine_id, **kwargs):
"""Update switch machine."""
with database.session() as session:
user_api.check_user_permission_internal(
session, updater, permission.PERMISSION_ADD_SWITCH_MACHINE)
switch_machine = utils.get_db_object(
session, models.SwitchMachine,
id=switch_machine_id
)
update_switch_machine_internal(
session, switch_machine,
UPDATED_SWITCH_MACHINES_FIELDS, **kwargs
)
return switch_machine.to_dict()
@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
@utils.input_validates(patched_vlans=_check_vlan)
@utils.supported_filters(optional_support_keys=PATCHED_MACHINES_FIELDS)
def patch_switch_machine(updater, switch_id, machine_id, **kwargs):
"""Patch switch machine."""
with database.session() as session:
user_api.check_user_permission_internal(
session, updater, permission.PERMISSION_ADD_SWITCH_MACHINE)
switch_machine = utils.get_db_object(
session, models.SwitchMachine,
switch_id=switch_id, machine_id=machine_id
)
update_switch_machine_internal(
session, switch_machine,
PATCHED_SWITCH_MACHINES_FIELDS, **kwargs
)
return switch_machine.to_dict()
@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
@utils.input_validates(patched_vlans=_check_vlan)
@utils.supported_filters(optional_support_keys=PATCHED_MACHINES_FIELDS)
def patch_switchmachine(updater, switch_machine_id, **kwargs):
"""Patch switch machine."""
with database.session() as session:
user_api.check_user_permission_internal(
session, updater, permission.PERMISSION_ADD_SWITCH_MACHINE)
switch_machine = utils.get_db_object(
session, models.SwitchMachine,
id=switch_machine_id
)
update_switch_machine_internal(
session, switch_machine,
PATCHED_SWITCH_MACHINES_FIELDS, **kwargs
)
return switch_machine.to_dict()
@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
@utils.supported_filters()
def del_switch_machine(deleter, switch_id, machine_id, **kwargs):
"""Delete switch machines."""
with database.session() as session:
user_api.check_user_permission_internal(
session, deleter, permission.PERMISSION_DEL_SWITCH_MACHINE
)
switch_machine = utils.get_db_object(
session, models.SwitchMachine,
switch_id=switch_id, machine_id=machine_id
)
utils.del_db_object(session, switch_machine)
return switch_machine.to_dict()
@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
@utils.supported_filters()
def del_switchmachine(deleter, switch_machine_id, **kwargs):
"""Delete switch machines."""
with database.session() as session:
user_api.check_user_permission_internal(
session, deleter, permission.PERMISSION_DEL_SWITCH_MACHINE
)
switch_machine = utils.get_db_object(
session, models.SwitchMachine,
id=switch_machine_id
)
utils.del_db_object(session, switch_machine)
return switch_machine.to_dict()
@utils.supported_filters(optional_support_keys=UPDATED_SWITCH_MACHINES_FIELDS)
def _update_machine_internal(session, switch_id, machine_id, **kwargs):
with session.begin(subtransactions=True):
utils.add_db_object(
session, models.SwitchMachine, False, switch_id, machine_id,
**kwargs
)
def _add_machines(session, switch, machines):
for machine_id, switch_machine_attrs in machines.items():
_update_machine_internal(
session, switch.id, machine_id, **switch_machine_attrs
)
def _remove_machines(session, switch, machines):
with session.begin(subtransactions=True):
utils.del_db_objects(
session, models.SwitchMachine,
switch_id=switch.id, machine_id=machines
)
def _set_machines(session, switch, machines):
with session.begin(subtransactions=True):
utils.del_db_objects(
session, models.SwitchMachine,
switch_id=switch.id
)
for machine_id, switch_machine_attrs in machines.items():
_update_machine_internal(
session, switch.id, machine_id, **switch_machine_attrs
)
@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
@utils.supported_filters(
optional_support_keys=[
'add_machines', 'remove_machines', 'set_machines'
]
)
def update_switch_machines(
updater, switch_id,
add_machines=[], remove_machines=[],
set_machines=None, **kwargs
):
"""update switch machines."""
with database.session() as session:
user_api.check_user_permission_internal(
session, updater, permission.PERMISSION_UPDATE_SWITCH_MACHINES)
switch = utils.get_db_object(
session, models.Switch, id=switch_id
)
if remove_machines:
_remove_machines(
session, switch, remove_machines
)
if add_machines:
_add_machines(
session, switch, add_machines
)
if set_machines is not None:
_set_machines(
session, switch,
set_machines
)
return [
switch_machine.to_dict()
for switch_machine in switch.switch_machines
]

View File

@ -13,126 +13,471 @@
# limitations under the License.
"""User database operations."""
import datetime
from flask.ext.login import UserMixin
from compass.db import api
from compass.db.api import database
from compass.db.api.utils import wrap_to_dict
from compass.db.exception import DuplicatedRecord
from compass.db.exception import Forbidden
from compass.db.exception import RecordNotExists
from compass.db.models import User
from compass.db.api import permission
from compass.db.api import utils
from compass.db import exception
from compass.db import models
from compass.utils import setting_wrapper as setting
from compass.utils import util
SUPPORTED_FILTERS = ['email', 'admin']
UPDATED_FIELDS = ['firstname', 'lastname', 'password']
RESP_FIELDS = ['id', 'email', 'is_admin', 'active', 'firstname',
'lastname', 'created_at', 'last_login_at']
ERROR_MSG = {
'findNoUser': 'Cannot find the user, ID is %d',
'duplicatedUser': 'User already exists!',
'forbidden': 'User has no permission to make this request.'
}
SUPPORTED_FIELDS = ['email', 'is_admin', 'active']
PERMISSION_SUPPORTED_FIELDS = ['name']
SELF_UPDATED_FIELDS = ['email', 'firstname', 'lastname', 'password']
ADMIN_UPDATED_FIELDS = ['is_admin', 'active']
UPDATED_FIELDS = [
'email', 'firstname', 'lastname', 'password', 'is_admin', 'active'
]
ADDED_FIELDS = ['email', 'password']
OPTIONAL_ADDED_FIELDS = ['is_admin', 'active']
PERMISSION_ADDED_FIELDS = ['permission_id']
RESP_FIELDS = [
'id', 'email', 'is_admin', 'active', 'firstname',
'lastname', 'created_at', 'updated_at'
]
RESP_TOKEN_FIELDS = [
'id', 'user_id', 'token', 'expire_timestamp'
]
PERMISSION_RESP_FIELDS = [
'id', 'user_id', 'permission_id', 'name', 'alias', 'description',
'created_at', 'updated_at'
]
@wrap_to_dict(RESP_FIELDS)
def get_user(user_id):
def _check_email(email):
if '@' not in email:
raise exception.InvalidParameter(
'there is no @ in email address %s.' % email
)
def get_user_internal(session, exception_when_missing=True, **kwargs):
"""internal function used only by other db.api modules."""
return utils.get_db_object(
session, models.User, exception_when_missing, **kwargs
)
def add_user_internal(
session, email, password,
exception_when_existing=True, **kwargs
):
"""internal function used only by other db.api modules."""
user = utils.add_db_object(session, models.User,
exception_when_existing, email,
password=password, **kwargs)
_add_user_permissions(
session, user,
name=setting.COMPASS_DEFAULT_PERMISSIONS
)
return user
def _check_user_permission(session, user, permission):
"""Check user has permission."""
with session.begin(subtransactions=True):
if user.is_admin:
return
user_permission = utils.get_db_object(
session, models.UserPermission,
False, user_id=user.id, name=permission.name
)
if not user_permission:
raise exception.Forbidden(
'user %s does not have permission %s' % (
user.email, permission.name
)
)
def check_user_permission_internal(session, user, permission):
"""internal function only used by other db.api modules."""
_check_user_permission(session, user, permission)
def _add_user_permissions(session, user, **permission_filters):
"""add permissions to a user."""
from compass.db.api import permission as permission_api
with session.begin(subtransactions=True):
for api_permission in permission_api.list_permissions_internal(
session, **permission_filters
):
utils.add_db_object(
session, models.UserPermission, False,
user.id, api_permission.id
)
def _remove_user_permissions(session, user, **permission_filters):
"""remove permissions to a user."""
from compass.db.api import permission as permission_api
with session.begin(subtransactions=True):
permission_ids = []
for api_permission in permission_api.list_permissions_internal(
session, **permission_filters
):
permission_ids.append(api_permission.id)
utils.del_db_objects(
session, models.UserPermission,
user_id=user.id, permission_id=permission_ids
)
def _set_user_permissions(session, user, **permission_filters):
"""set permissions to a user."""
from compass.db.api import permission as permission_api
with session.begin(subtransactions=True):
utils.del_db_objects(
session, models.UserPermission,
user_id=user.id, permission_id=permission.id
)
_add_user_permissions(session, user, **permission_filters)
class UserWrapper(UserMixin):
def __init__(
self, id, email, crypted_password,
active, is_admin, expire_timestamp, token='', **kwargs
):
self.id = id
self.email = email
self.password = crypted_password
self.active = active
self.is_admin = is_admin
self.expire_timestamp = expire_timestamp
if not token:
self.token = self.get_auth_token()
else:
self.token = token
super(UserWrapper, self).__init__()
def authenticate(self, password):
if not util.encrypt(password, self.password) == self.password:
raise exception.Unauthorized('%s password mismatch' % self.email)
def get_auth_token(self):
return util.encrypt(self.email)
def is_active(self):
return self.active
def get_id(self):
return self.token
def is_authenticated(self):
current_time = datetime.datetime.now()
return current_time < self.expire_timestamp
def __str__(self):
return '%s[email:%s,password:%s]' % (
self.__class__.__name__, self.email, self.password)
def get_user_object(email, **kwargs):
with database.session() as session:
user = _get_user(session, user_id)
user_info = user.to_dict()
return user_info
user_dict = utils.get_db_object(
session, models.User, email=email
).to_dict()
user_dict.update(kwargs)
return UserWrapper(**user_dict)
@wrap_to_dict(RESP_FIELDS)
def list_users(filters=None):
"""List all users, optionally filtered by some fields."""
def get_user_object_from_token(token):
expire_timestamp = {
'ge': datetime.datetime.now()
}
with database.session() as session:
users = _list_users(session, filters)
users_list = [user.to_dict() for user in users]
return users_list
user_token = utils.get_db_object(
session, models.UserToken,
token=token, expire_timestamp=expire_timestamp
)
user_dict = utils.get_db_object(
session, models.User, id=user_token.user_id
).to_dict()
user_dict['token'] = token
user_dict['expire_timestamp'] = user_token.expire_timestamp
return UserWrapper(**user_dict)
@wrap_to_dict(RESP_FIELDS)
def add_user(creator_id, email, password, firstname=None, lastname=None):
"""Create a user."""
REQUIRED_PERM = 'create_user'
@utils.wrap_to_dict(RESP_TOKEN_FIELDS)
@utils.supported_filters()
def record_user_token(user, token, expire_timestamp):
"""record user token in database."""
with database.session() as session:
user_token = utils.add_db_object(
session, models.UserToken, True,
token, user_id=user.id,
expire_timestamp=expire_timestamp
)
return user_token.to_dict()
creator = _get_user(session, creator_id)
if not creator.is_admin or REQUIRED_PERM not in creator.permissions:
@utils.wrap_to_dict(RESP_TOKEN_FIELDS)
@utils.supported_filters()
def clean_user_token(user, token):
"""clean user token in database."""
with database.session() as session:
user_tokens = utils.del_db_objects(
session, models.UserToken,
token=token
)
return [user_token.to_dict() for user_token in user_tokens]
@utils.wrap_to_dict(RESP_FIELDS)
@utils.supported_filters()
def get_user(getter, user_id, **kwargs):
"""get field dict of a user."""
with database.session() as session:
user = utils.get_db_object(session, models.User, id=user_id)
if not getter.is_admin and getter.id != user_id:
# The user is not allowed to get user
raise exception.Forbidden(
'User %s has no permission to list user %s.' % (
getter.email, user.email
)
)
return user.to_dict()
@utils.wrap_to_dict(RESP_FIELDS)
@utils.supported_filters(
optional_support_keys=SUPPORTED_FIELDS
)
def list_users(lister, **filters):
"""List fields of all users by some fields."""
with database.session() as session:
if not lister.is_admin:
# The user is not allowed to list users
raise exception.Forbidden(
'User %s has no permission to list users.' % (
lister.email
)
)
return [
user.to_dict()
for user in utils.list_db_objects(
session, models.User, **filters
)
]
@utils.wrap_to_dict(RESP_FIELDS)
@utils.input_validates(email=_check_email)
@utils.supported_filters(
ADDED_FIELDS, optional_support_keys=OPTIONAL_ADDED_FIELDS
)
def add_user(creator, email, password, **kwargs):
"""Create a user and return created user object."""
with database.session() as session:
if not creator.is_admin:
# The user is not allowed to create a user.
err_msg = ERROR_MSG['forbidden']
raise Forbidden(err_msg)
raise exception.Forbidden(
'User %s has no permission to create user.' % (
creator.email
)
)
if session.query(User).filter_by(email=email).first():
# The user already exists!
err_msg = ERROR_MSG['duplicatedUser']
raise DuplicatedRecord(err_msg)
new_user = _add_user(email, password, firstname, lastname)
new_user_info = new_user.to_dict()
return new_user_info
return add_user_internal(
session, email, password, **kwargs
).to_dict()
@wrap_to_dict(RESP_FIELDS)
def update_user(user_id, **kwargs):
"""Update a user."""
@utils.wrap_to_dict(RESP_FIELDS)
@utils.supported_filters()
def del_user(deleter, user_id, **kwargs):
"""delete a user and return the deleted user object."""
with database.session() as session:
user = _get_user(session, user_id)
if not deleter.is_admin:
raise exception.Forbidden(
'User %s has no permission to delete user.' % (
deleter.email
)
)
user = utils.get_db_object(session, models.User, id=user_id)
utils.del_db_object(session, user)
return user.to_dict()
@utils.wrap_to_dict(RESP_FIELDS)
@utils.input_validates(email=_check_email)
@utils.supported_filters(optional_support_keys=UPDATED_FIELDS)
def update_user(updater, user_id, **kwargs):
"""Update a user and return the updated user object."""
with database.session() as session:
user = utils.get_db_object(session, models.User, id=user_id)
update_info = {}
for key in kwargs:
if key in UPDATED_FIELDS:
update_info[key] = kwargs[key]
if updater.is_admin:
update_info.update(dict([
(key, value) for key, value in kwargs.items()
if key in ADMIN_UPDATED_FIELDS
]))
kwargs = dict([
(key, value) for key, value in kwargs.items()
if key not in ADMIN_UPDATED_FIELDS
])
user = _update_user(**update_info)
user_info = user.to_dict()
if updater.id == user_id:
update_info.update(dict([
(key, value) for key, value in kwargs.items()
if key in SELF_UPDATED_FIELDS
]))
kwargs = dict([
(key, value) for key, value in kwargs.items()
if key not in SELF_UPDATED_FIELDS
])
return user_info
if kwargs:
# The user is not allowed to update a user.
raise exception.Forbidden(
'User %s has no permission to update user %s: %s.' % (
updater.email, user.email, kwargs
)
)
utils.update_db_object(session, user, **update_info)
return user.to_dict()
def _get_user(session, user_id):
"""Get the user by ID."""
with session.begin(subtransactions=True):
user = session.query(User).filter_by(id=user_id).first()
if not user:
err_msg = ERROR_MSG['findNoUser'] % user_id
raise RecordNotExists(err_msg)
return user
@utils.wrap_to_dict(PERMISSION_RESP_FIELDS)
@utils.supported_filters(optional_support_keys=PERMISSION_SUPPORTED_FIELDS)
def get_permissions(getter, user_id, **kwargs):
"""List permissions of a user."""
with database.session() as session:
if not getter.is_admin and getter.id != user_id:
# The user is not allowed to list permissions
raise exception.Forbidden(
'User %s has no permission to list user %s permissions.' % (
getter.email, user_id
)
)
user_permissions = utils.list_db_objects(
session, models.UserPermission, user_id=user_id, **kwargs
)
return [
user_permission.to_dict()
for user_permission in user_permissions
]
def _list_users(session, filters=None):
"""Get all users, optionally filtered by some fields."""
@utils.wrap_to_dict(PERMISSION_RESP_FIELDS)
@utils.supported_filters()
def get_permission(getter, user_id, permission_id, **kwargs):
"""Get a specific user permission."""
with database.session() as session:
if not getter.is_admin and getter.id != user_id:
# The user is not allowed to get permission
raise exception.Forbidden(
'User %s has no permission to get user %s permission.' % (
getter.email, user_id
)
)
filters = filters if filters else {}
with session.begin(subtransactions=True):
query = api.model_query(session, User)
users = api.model_filter(query, User, filters, SUPPORTED_FILTERS).all()
return users
user_permission = utils.get_db_object(
session, models.UserPermission,
user_id=user_id, permission_id=permission_id,
**kwargs
)
return user_permission.to_dict()
def _add_user(session, email, password, firstname=None, lastname=None):
"""Create a user."""
with session.begin(subtransactions=True):
user = User(email=email, password=password,
firstname=firstname, lastname=lastname)
session.add(user)
@utils.wrap_to_dict(PERMISSION_RESP_FIELDS)
@utils.supported_filters()
def del_permission(deleter, user_id, permission_id, **kwargs):
"""Delete a specific user permission."""
with database.session() as session:
if not deleter.is_admin and deleter.id != user_id:
# The user is not allowed to delete permission
raise exception.Forbidden(
'User %s has no permission to delete user %s permission.' % (
deleter.email, user_id
)
)
return user
user_permission = utils.get_db_object(
session, models.UserPermission,
user_id=user_id, permission_id=permission_id,
**kwargs
)
utils.del_db_object(session, user_permission)
return user_permission.to_dict()
def _update_user(session, user_id, **kwargs):
"""Update user information."""
with session.begin(subtransactions=True):
session.query(User).filter_by(id=user_id).update(kwargs)
user = _get_user(session, user_id)
@utils.wrap_to_dict(PERMISSION_RESP_FIELDS)
@utils.supported_filters(
PERMISSION_ADDED_FIELDS
)
def add_permission(creator, user_id, permission_id):
"""Add an user permission."""
with database.session() as session:
if not creator.is_admin:
# The user is not allowed to add a permission.
raise exception.Forbidden(
'User %s has no permission to add a permission.' % (
creator.email
)
)
user_permission = utils.add_db_object(
session, models.UserPermission, True,
user_id, permission_id
)
return user_permission.to_dict()
return user
@utils.wrap_to_dict(PERMISSION_RESP_FIELDS)
@utils.supported_filters(
optional_support_keys=[
'add_permissions', 'remove_permissions', 'set_permissions'
]
)
def update_permissions(
updater, user_id,
add_permissions=[], remove_permissions=[],
set_permissions=None, **kwargs
):
"""update user permissions."""
def get_permission_filters(permission_ids):
if permission_ids == 'all':
return {}
else:
return {'id': permission_ids}
with database.session() as session:
if not updater.is_admin:
raise exception.Forbidden(
'User %s has no permission to update user %s: %s.' % (
updater.email, user_id, kwargs
)
)
user = utils.get_db_object(session, models.User, id=user_id)
if remove_permissions:
_remove_user_permissions(
session, user,
**get_permission_filters(remove_permissions)
)
if add_permissions:
_add_user_permissions(
session, user,
**get_permission_filters(add_permissions)
)
if set_permissions is not None:
_set_user_permissions(
session, user,
**get_permission_filters(set_permissions)
)
return [
user_permission.to_dict()
for user_permission in user.user_permissions
]

139
compass/db/api/user_log.py Normal file
View File

@ -0,0 +1,139 @@
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""UserLog database operations."""
import logging
from compass.db.api import database
from compass.db.api import user as user_api
from compass.db.api import utils
from compass.db import exception
from compass.db import models
SUPPORTED_FIELDS = ['user_email', 'timestamp']
USER_SUPPORTED_FIELDS = ['timestamp']
RESP_FIELDS = ['user_id', 'logs', 'timestamp']
def log_user_action(user_id, action):
"""Log user action."""
with database.session() as session:
utils.add_db_object(
session, models.UserLog, True, user_id=user_id, action=action
)
@utils.wrap_to_dict(RESP_FIELDS)
@utils.supported_filters(optional_support_keys=USER_SUPPORTED_FIELDS)
def list_user_actions(lister, user_id, **filters):
"""list user actions."""
with database.session() as session:
if not lister.is_admin and lister.id != user_id:
# The user is not allowed to list users actions.
raise exception.Forbidden(
'User %s has no permission to list user %s actions.' % (
lister.email, user_id
)
)
user_actions = []
for action in utils.list_db_objects(
session, models.UserLog, user_id=user_id, **filters
):
action_dict = action.to_dict()
del action_dict['user_id']
user_actions.append(action_dict)
return {'user_id': user_id, 'logs': user_actions}
@utils.wrap_to_dict(RESP_FIELDS)
@utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS)
def list_actions(lister, **filters):
"""list actions."""
with database.session() as session:
if not lister.is_admin:
# The user is not allowed to list users actions.
raise exception.Forbidden(
'User %s has no permission to list all users actions.' % (
lister.email
)
)
actions = {}
for action in utils.list_db_objects(
session, models.UserLog, **filters
):
action_dict = action.to_dict()
user_id = action_dict['user_id']
del action_dict['user_id']
actions.setdefault(user_id, []).append(action_dict)
return [
{'user_id': user_id, 'logs': user_actions}
for user_id, user_actions in actions.items()
]
@utils.wrap_to_dict(RESP_FIELDS)
@utils.supported_filters(optional_support_keys=USER_SUPPORTED_FIELDS)
def del_user_actions(deleter, user_id, **filters):
"""delete user actions."""
with database.session() as session:
if not deleter.is_admin and deleter.id != user_id:
# The user is not allowed to delete users actions.
raise exception.Forbidden(
'User %s has no permission to delete user %s actions.' % (
deleter.email, user_id
)
)
user_actions = []
for action in utils.del_db_objects(
session, models.UserLog, user_id=user_id, **filters
):
action_dict = action.to_dict()
del action_dict['user_id']
user_actions.append(action_dict)
return {'user_id': user_id, 'logs': user_actions}
@utils.wrap_to_dict(RESP_FIELDS)
@utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS)
def del_actions(deleter, **filters):
"""delete actions."""
with database.session() as session:
if not deleter.is_admin:
# The user is not allowed to delete users actions.
raise exception.Forbidden(
'User %s has no permission to delete all users actions.' % (
deleter.email
)
)
actions = {}
for action in utils.del_db_objects(
session, models.UserLog, **filters
):
action_dict = action.to_dict()
user_id = action_dict['user_id']
del action_dict['user_id']
actions.setdefault(user_id, []).append(action_dict)
return [
{'user_id': user_id, 'logs': user_actions}
for user_id, user_actions in actions.items()
]

View File

@ -14,67 +14,454 @@
"""Utils for database usage."""
import copy
from functools import wraps
import functools
import inspect
import logging
import netaddr
import re
from sqlalchemy import and_
from sqlalchemy import or_
from compass.db import exception
from compass.db import models
def wrap_to_dict(support_keys=None):
def model_query(session, model):
"""model query."""
if not issubclass(model, models.BASE):
raise exception.DatabaseException("model should be sublass of BASE!")
return session.query(model)
def _default_list_condition_func(col_attr, value, condition_func):
conditions = []
for sub_value in value:
condition = condition_func(col_attr, sub_value)
if condition is not None:
conditions.append(condition)
if conditions:
return or_(*conditions)
else:
return None
def _one_item_list_condition_func(col_attr, value, condition_func):
if value:
return condition_func(col_attr, value[0])
else:
return None
def _model_filter_by_condition(
query, col_attr, value, condition_func,
list_condition_func=_default_list_condition_func
):
if isinstance(value, list):
condition = list_condition_func(
col_attr, value, condition_func
)
else:
condition = condition_func(col_attr, value)
if condition is not None:
query = query.filter(condition)
return query
def _between_condition(col_attr, value):
if value[0] is not None and value[1] is not None:
col_attr.between(value[0], value[1])
if value[0] is not None:
return col_attr >= value[0]
if value[1] is not None:
return col_attr <= value[1]
return None
def model_filter(query, model, **filters):
print 'model query %s: filter %s' % (query, filters)
for key, value in filters.items():
col_attr = getattr(model, key)
if isinstance(value, list):
query = query.filter(col_attr.in_(value))
elif isinstance(value, dict):
if 'eq' in value:
query = _model_filter_by_condition(
query, col_attr, value['eq'],
lambda attr, data: attr == data,
lambda attr, data, condition_func: attr.in_(data)
)
if 'lt' in value:
query = _model_filter_by_condition(
query, col_attr, value['lt'],
lambda attr, data: attr < data,
_one_item_list_condition_func
)
if 'gt' in value:
query = _model_filter_by_condition(
query, col_attr, value['gt'],
lambda attr, data: attr > data,
_one_item_list_condition_func
)
if 'le' in value:
query = _model_filter_by_condition(
query, col_attr, value['le'],
lambda attr, data: attr <= data,
_one_item_list_condition_func
)
if 'ge' in value:
query = _model_filter_by_condition(
query, col_attr, value['ge'],
lambda attr, data: attr >= data,
_one_item_list_condition_func
)
if 'ne' in value:
query = _model_filter_by_condition(
query, col_attr, value['eq'], None,
lambda attr, data, condition_func: ~attr.in_(data)
)
if 'in' in value:
query = query.filter(col_attr.in_(value['in']))
if 'startswith' in value:
query = _model_filter_by_condition(
query, col_attr, value['startswitch'],
lambda attr, data: attr.like('%s%%' % data)
)
if 'endswith' in value:
query = _model_filter_by_condition(
query, col_attr, value['endswitch'],
lambda attr, data: attr.like('%%%s' % data)
)
if 'like' in value:
query = _model_filter_by_condition(
query, col_attr, value['like'],
lambda attr, data: attr.like('%%%s%%' % data)
)
if 'between' in value:
query = _model_filter_by_condition(
query, col_attr, value['between'],
_between_condition
)
else:
query = query.filter(col_attr == value)
return query
def wrap_to_dict(support_keys=[]):
def decorator(func):
@wraps(func)
@functools.wraps(func)
def wrapper(*args, **kwargs):
obj = func(*args, **kwargs)
obj_info = None
if isinstance(obj, list):
obj_info = [_wrapper_dict(o, support_keys) for o in obj]
obj = [_wrapper_dict(o, support_keys) for o in obj]
else:
obj_info = _wrapper_dict(obj, support_keys)
return obj_info
obj = _wrapper_dict(obj, support_keys)
return obj
return wrapper
return decorator
def _wrapper_dict(data, support_keys=None):
"""Helper for warpping db object into dictionaryi."""
if support_keys is None:
return data
def _wrapper_dict(data, support_keys):
"""Helper for warpping db object into dictionary."""
info = {}
if not isinstance(data, dict):
data = data.to_dict()
for key in support_keys:
if key in data:
info[key] = data[key]
return info
def merge_dict(lhs, rhs, override=True):
"""Merge nested right dict into left nested dict recursively.
def supported_filters(support_keys=[], optional_support_keys=[]):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **filters):
print 'filter %s %s' % (args, filters)
must_support_keys = set(support_keys)
all_support_keys = must_support_keys | set(optional_support_keys)
supports = {}
for filter_key, filter_value in filters.items():
if filter_key not in all_support_keys:
raise exception.InvalidParameter(
'filter key %s is not supported' % filter_key
)
:param lhs: dict to be merged into.
:type lhs: dict
:param rhs: dict to merge from.
:type rhs: dict
:param override: the value in rhs overide the value in left if True.
:type override: str
if filter_key in must_support_keys:
must_support_keys.remove(filter_key)
:raises: TypeError if lhs or rhs is not a dict.
"""
if not rhs:
return
supports[filter_key] = filter_value
if not isinstance(lhs, dict):
raise TypeError('lhs type is %s while expected is dict' % type(lhs),
lhs)
if must_support_keys:
raise exception.InvalidParameter(
'filter keys %s not found' % list(must_support_keys)
)
return func(*args, **supports)
return wrapper
return decorator
if not isinstance(rhs, dict):
raise TypeError('rhs type is %s while expected is dict' % type(rhs),
rhs)
for key, value in rhs.items():
def _obj_equal(check, obj):
if check == obj:
return True
if not issubclass(obj.__class__, check.__class__):
return False
if isinstance(obj, dict):
return _dict_equal(check, obj)
elif isinstance(obj, list):
return _list_equal(check, obj)
else:
return False
def _list_equal(check_list, obj_list):
return set(check_list).issubset(set(obj_list))
def _dict_equal(check_dict, obj_dict):
for key, value in check_dict.items():
if (
isinstance(value, dict) and key in lhs and
isinstance(lhs[key], dict)
key not in obj_dict or
not _obj_equal(check_dict[key], obj_dict[key])
):
merge_dict(lhs[key], value, override)
return False
return True
def general_filter_callback(general_filter, obj):
if 'resp_eq' in general_filter:
return _obj_equal(general_filter['resp_eq'], obj)
elif 'resp_in' in general_filter:
in_filters = general_filter['resp_in']
if not in_filters:
return True
for in_filer in in_filters:
if _obj_equal(in_filer, obj):
return True
return False
elif 'resp_lt' in general_filter:
return obj < general_filter['resp_lt']
elif 'resp_le' in general_filter:
return obj <= general_filter['resp_le']
elif 'resp_gt' in general_filter:
return obj > general_filter['resp_gt']
elif 'resp_ge' in general_filter:
return obj >= general_filter['resp_gt']
elif 'resp_match' in general_filter:
return bool(re.match(general_filter['resp_match'], obj))
else:
return True
def filter_output(filter_callbacks, filters, obj):
for callback_key, callback_value in filter_callbacks.items():
if callback_key not in filters:
continue
if callback_key not in obj:
raise exception.InvalidResponse(
'%s is not in %s' % (callback_key, obj)
)
if not callback_value(
filters[callback_key], obj[callback_key]
):
return False
return True
def output_filters(**filter_callbacks):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **filters):
filtered_obj_list = []
obj_list = func(*args, **filters)
for obj in obj_list:
if filter_output(filter_callbacks, filters, obj):
filtered_obj_list.append(obj)
return filtered_obj_list
return wrapper
return decorator
def _input_validates(args_validators, kwargs_validators, *args, **kwargs):
for i, value in enumerate(args):
if i < len(args_validators) and args_validators[i]:
if isinstance(value, list):
for sub_value in value:
args_validators[i](sub_value)
else:
args_validators[i](value)
for key, value in kwargs.items():
if kwargs_validators.get(key):
if isinstance(value, list):
for sub_value in value:
kwargs_validators[key](sub_value)
else:
kwargs_validators[key](value)
def input_validates(*args_validators, **kwargs_validators):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
_input_validates(
args_validators, kwargs_validators,
*args, **kwargs
)
return func(*args, **kwargs)
return wrapper
return decorator
def _output_validates(kwargs_validators, obj):
if not isinstance(obj, dict):
obj = obj.to_dict()
for key, value in obj.items():
if kwargs_validators.get(key):
kwargs_validators[key](value)
def validate_outputs(kwargs_validators, obj):
return _output_validates(kwargs_validators, obj)
def output_validates(**kwargs_validators):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
obj = func(*args, **kwargs)
if isinstance(obj, list):
for obj_item in obj:
_output_validates(kwargs_validators, obj_item)
else:
_output_validates(kwargs_validators, obj)
return wrapper
return decorator
def get_db_object(session, table, exception_when_missing=True, **kwargs):
"""Get db object."""
with session.begin(subtransactions=True):
logging.debug('get db object %s from table %s',
kwargs, table.__name__)
db_object = model_filter(
model_query(session, table), table, **kwargs
).first()
if db_object:
return db_object
if not exception_when_missing:
return None
raise exception.RecordNotExists(
'Cannot find the record in table %s: %s' % (
table.__name__, kwargs
)
)
def add_db_object(session, table, exception_when_existing=True,
*args, **kwargs):
"""Create db object."""
with session.begin(subtransactions=True):
logging.debug('add object %s atributes %s to table %s',
args, kwargs, table.__name__)
argspec = inspect.getargspec(table.__init__)
arg_names = argspec.args[1:]
arg_defaults = argspec.defaults
if not arg_defaults:
arg_defaults = []
if not (
len(arg_names) - len(arg_defaults) <= len(args) <= len(arg_names)
):
raise exception.InvalidParameter(
'arg names %s does not match arg values %s' % (
arg_names, args)
)
db_keys = dict(zip(arg_names, args))
if db_keys:
db_object = session.query(table).filter_by(**db_keys).first()
else:
if override or key not in lhs:
lhs[key] = copy.deepcopy(value)
db_object = None
new_object = False
if db_object:
if exception_when_existing:
raise exception.DuplicatedRecord(
'%s exists in table %s' % (db_keys, table.__name__)
)
else:
db_object = table(**db_keys)
new_object = True
for key, value in kwargs.items():
setattr(db_object, key, value)
if new_object:
session.add(db_object)
db_object.initialize()
session.flush()
db_object.validate()
return db_object
def list_db_objects(session, table, **filters):
"""List db objects."""
with session.begin(subtransactions=True):
logging.debug('list db objects by filters %s in table %s',
filters, table.__name__)
return model_filter(
model_query(session, table), table, **filters
).all()
def del_db_objects(session, table, **filters):
"""delete db objects."""
with session.begin(subtransactions=True):
logging.debug('delete db objects by filters %s in table %s',
filters, table.__name__)
query = model_filter(
model_query(session, table), table, **filters
)
db_objects = query.all()
query.delete()
return db_objects
def update_db_object(session, db_object, **kwargs):
"""Update db object."""
with session.begin(subtransactions=True):
logging.debug('update db object %s by value %s',
db_object, kwargs)
for key, value in kwargs.items():
setattr(db_object, key, value)
db_object.initialize()
session.flush()
db_object.validate()
def del_db_object(session, db_object):
"""Delete db object."""
with session.begin(subtransactions=True):
logging.debug('delete db object %s', db_object)
session.delete(db_object)
def check_ip(ip):
try:
netaddr.IPAddress(ip)
except Exception as error:
logging.exception(error)
raise exception.InvalidParameter(
'ip address %s format uncorrect' % ip
)
def check_mac(mac):
try:
netaddr.EUI(mac)
except Exception as error:
logging.exception(error)
raise exception.InvalidParameter(
'invalid mac address %s' % mac
)

View File

@ -1,17 +0,0 @@
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from compass.db.api import adapter
from compass.db.api import cluster
from compass.db.api import user

View File

@ -13,34 +13,65 @@
# limitations under the License.
"""Custom exception"""
import traceback
class RecordNotExists(Exception):
class DatabaseException(Exception):
def __init__(self, message):
super(DatabaseException, self).__init__(message)
self.traceback = traceback.format_exc()
self.status_code = 400
class RecordNotExists(DatabaseException):
"""Define the exception for referring non-existing object in DB."""
def __init__(self, message):
super(RecordNotExists, self).__init__(message)
self.message = message
self.status_code = 404
class DuplicatedRecord(Exception):
class DuplicatedRecord(DatabaseException):
"""Define the exception for trying to insert an existing object in DB."""
def __init__(self, message):
super(DuplicatedRecord, self).__init__(message)
self.message = message
self.status_code = 409
class Forbidden(Exception):
class Unauthorized(DatabaseException):
"""Define the exception for invalid user login."""
def __init__(self, message):
super(Unauthorized, self).__init__(message)
self.status_code = 401
class UserDisabled(DatabaseException):
"""Define the exception that a disabled user tries to do some operations.
"""
def __init__(self, message):
super(UserDisabled, self).__init__(message)
self.status_code = 403
class Forbidden(DatabaseException):
"""Define the exception that a user is trying to make some action
without the right permission.
"""
def __init__(self, message):
super(Forbidden, self).__init__(message)
self.message = message
self.status_code = 403
class InvalidParameter(Exception):
class InvalidParameter(DatabaseException):
"""Define the exception that the request has invalid or missing parameters.
"""
def __init__(self, message):
super(InvalidParameter, self).__init__(message)
self.message = message
self.status_code = 400
class InvalidResponse(DatabaseException):
"""Define the exception that the response is invalid.
"""
def __init__(self, message):
super(InvalidResponse, self).__init__(message)
self.status_code = 400

File diff suppressed because it is too large Load Diff

View File

@ -12,80 +12,108 @@
# See the License for the specific language governing permissions and
# limitations under the License.
"""Validator methods. Please note that functions below may not the best way
to do the validation.
"""
# TODO(xiaodong): please refactor/rewrite the following functions as necessary
"""Validator methods."""
import netaddr
import re
import socket
from compass.utils import setting_wrapper as setting
from compass.utils import util
def is_valid_ip(ip_addr):
def is_valid_ip(name, ip_addr):
"""Valid the format of an IP address."""
if not ip_addr:
return False
try:
socket.inet_aton(ip_addr)
except socket.error:
netaddr.IPAddress(ip_addr)
except Exception:
return False
return True
def is_valid_ipNetowrk(ip_network):
def is_valid_network(name, ip_network):
"""Valid the format of an Ip network."""
try:
netaddr.IPNetwork(ip_network)
except Exception:
return False
return False
if not ip_network:
def is_valid_netmask(name, ip_addr):
"""Valid the format of a netmask."""
if not is_valid_ip(ip_addr):
return False
ip = netaddr.IPAddress(ip_addr)
if ip.is_netmask():
return True
else:
return False
regex = (r'^(([0-9]|[1-9][0-9]|1[0-9]{2}|[1-2][0-4][0-9]|25[0-5])\.)'
r'{3}'
r'([0-9]|[1-9][0-9]|1[0-9]{2}|[1-2][0-4][0-9]|25[0-5])'
r'((\/[0-9]|\/[1-2][0-9]|\/[1-3][0-2]))$')
if re.match(regex, ip_network):
def is_valid_gateway(name, ip_addr):
"""Valid the format of gateway."""
if not is_valid_ip(ip_addr):
return False
ip = netaddr.IPAddress(ip_addr)
if ip.is_private() or ip.is_public():
return True
else:
return False
def is_valid_dns(name, dns):
"""Valid the format of DNS."""
if is_valid_ip(dns):
return True
try:
socket.gethostbyname_ex(dns)
except Exception:
return False
return True
def is_valid_username(name, username):
"""Valid the format of username."""
return bool(username)
def is_valid_password(name, password):
"""Valid the format of password."""
return bool(password)
def is_valid_partition(name, partition):
"""Valid the format of partition name."""
if name != 'swap' and not name.startswith('/'):
return False
if 'size' not in partition and 'percentage' not in partition:
return False
return True
def is_valid_percentage(name, percentage):
"""Valid the percentage."""
return 0 <= percentage <= 100
def is_valid_port(name, port):
"""Valid the format of port."""
return 0 < port < 65536
def is_valid_size(name, size):
if re.match(r'(\d+)(K|M|G|T)?', size):
return True
return False
def is_valid_netmask(ip_addr):
"""Valid the format of a netmask."""
if not ip_addr:
return False
try:
ip_address = netaddr.IPAddress(ip_addr)
return ip_address.is_netmask()
except Exception:
return False
def is_valid_gateway(ip_addr):
"""Valid the format of gateway."""
if not ip_addr:
return False
invalid_ip_prefix = ['0', '224', '169', '127']
try:
# Check if ip_addr is an IP address and not start with 0
ip_addr_prefix = ip_addr.split('.')[0]
if is_valid_ip(ip_addr) and ip_addr_prefix not in invalid_ip_prefix:
ip_address = netaddr.IPAddress(ip_addr)
if not ip_address.is_multicast():
# Check if ip_addr is not multicast and reserved IP
return True
return False
except Exception:
return False
def is_valid_dnsServer(dns):
"""Valid the format of DNS."""
if dns and not is_valid_ip(dns):
return False
return True
VALIDATOR_GLOBALS = globals()
VALIDATOR_LOCALS = locals()
VALIDATOR_CONFIGS = util.load_configs(
setting.VALIDATOR_DIR,
config_name_suffix='.py',
env_globals=VALIDATOR_GLOBALS,
env_locals=VALIDATOR_LOCALS
)
for validator_config in VALIDATOR_CONFIGS:
VALIDATOR_LOCALS.update(validator_config)

View File

@ -24,6 +24,7 @@ from compass.hdsdiscovery import utils
UNREACHABLE = 'unreachable'
NOTSUPPORTED = 'notsupported'
ERROR = 'error'
REPOLLING = 'repolling'
class HDManager(object):
@ -143,7 +144,7 @@ class HDManager(object):
logging.debug("[get_vendor] No vendor found! <==================")
return (None, NOTSUPPORTED, "Not supported switch vendor!")
return (vendor, "Found", "")
return (vendor, REPOLLING, "")
def get_sys_info(self, host, credential):
"""get sys info."""

View File

@ -1,13 +0,0 @@
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@ -1,361 +0,0 @@
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module to provider installing progress calculation for the adapter.
.. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
"""
import logging
import re
from compass.db import database
from compass.db.model import Cluster
from compass.db.model import ClusterHost
from compass.log_analyzor.line_matcher import Progress
class AdapterItemMatcher(object):
"""Progress matcher for the os installing or package installing."""
def __init__(self, file_matchers):
self.file_matchers_ = file_matchers
self.min_progress_ = 0.0
self.max_progress_ = 1.0
def update_progress_range(self, min_progress, max_progress):
"""update min_progress and max_progress."""
self.min_progress_ = min_progress
self.max_progress_ = max_progress
for file_matcher in self.file_matchers_:
file_matcher.update_absolute_progress_range(
self.min_progress_, self.max_progress_)
def __str__(self):
return '%s[file_matchers: %s, min_progress: %s, max_progress: %s]' % (
self.__class__.__name__, self.file_matchers_,
self.min_progress_, self.max_progress_)
def update_progress(self, fullname, progress):
"""Update progress.
:param fullname: the fullname of the installing host.
:type fullname: str
:param progress: Progress instance to update.
"""
for file_matcher in self.file_matchers_:
file_matcher.update_progress(fullname, progress)
class OSMatcher(object):
"""Progress matcher for os installer."""
def __init__(self, os_installer_name, os_pattern,
item_matcher, min_progress, max_progress):
if not (0.0 <= min_progress <= max_progress <= 1.0):
raise IndexError('%s restriction not mat:'
'0.0 <= min_progress(%s) '
'<= max_progress(%s) <= 1.0' % (
self.__class__.__name__,
min_progress, max_progress))
self.name_ = os_installer_name
self.os_regex_ = re.compile(os_pattern)
self.matcher_ = item_matcher
self.matcher_.update_progress_range(min_progress, max_progress)
def __repr__(self):
return '%s[name:%s, os_pattern:%s, matcher:%s]' % (
self.__class__.__name__, self.name_,
self.os_regex_.pattern, self.matcher_)
def match(self, os_installer_name, os_name):
"""Check if the os matcher is acceptable."""
return all([
self.name_ == os_installer_name,
self.os_regex_.match(os_name)])
def update_progress(self, fullname, progress):
"""Update progress."""
self.matcher_.update_progress(fullname, progress)
class PackageMatcher(object):
"""Progress matcher for package installer."""
def __init__(self, package_installer_name, target_system,
item_matcher, min_progress, max_progress):
if not (0.0 <= min_progress <= max_progress <= 1.0):
raise IndexError('%s restriction not mat:'
'0.0 <= min_progress(%s) '
'<= max_progress(%s) <= 1.0' % (
self.__class__.__name__,
min_progress, max_progress))
self.name_ = package_installer_name
self.target_system_ = target_system
self.matcher_ = item_matcher
self.matcher_.update_progress_range(min_progress, max_progress)
def __repr__(self):
return '%s[name:%s, target_system:%s, matcher:%s]' % (
self.__class__.__name__, self.name_,
self.target_system_, self.matcher_)
def match(self, package_installer_name, target_system):
"""Check if the package matcher is acceptable."""
return all([
self.name_ == package_installer_name,
self.target_system_ == target_system])
def update_progress(self, fullname, progress):
"""Update progress."""
self.matcher_.update_progress(fullname, progress)
class AdapterMatcher(object):
"""Adapter matcher to update adapter installing progress."""
def __init__(self, os_matcher, package_matcher):
self.os_matcher_ = os_matcher
self.package_matcher_ = package_matcher
def match(self, os_installer_name, os_name,
package_installer_name, target_system):
"""Check if the adapter matcher is acceptable.
:param os_installer_name: the os installer name.
:type os_installer_name: str
:param os_name: the os name.
:type os_name: str
:param package_installer_name: the package installer name.
:type package_installer_name: str
:param target_system: the target system to deploy
:type target_system: str
:returns: bool
.. note::
Return True if the AdapterMatcher can process the log files
generated from the os installation and package installation.
"""
return all([
self.os_matcher_.match(os_installer_name, os_name),
self.package_matcher_.match(
package_installer_name, target_system)])
def __str__(self):
return '%s[os_matcher:%s, package_matcher:%s]' % (
self.__class__.__name__,
self.os_matcher_, self.package_matcher_)
@classmethod
def _get_host_progress(cls, hostid):
"""Get Host Progress from database.
.. notes::
The function should be called in database session.
"""
session = database.current_session()
host = session.query(
ClusterHost
).filter_by(id=hostid).first()
if not host:
logging.error(
'there is no host for %s in ClusterHost', hostid)
return None, None, None
if not host.state:
logging.error('there is no related HostState for %s',
hostid)
return host.fullname, None, None
return (
host.fullname,
host.state.state,
Progress(host.state.progress,
host.state.message,
host.state.severity))
@classmethod
def _update_host_progress(cls, hostid, progress):
"""Update host progress to database.
.. note::
The function should be called in database session.
"""
session = database.current_session()
host = session.query(
ClusterHost).filter_by(id=hostid).first()
if not host:
logging.error(
'there is no host for %s in ClusterHost', hostid)
return
if not host.state:
logging.error(
'there is no related HostState for %s', hostid)
return
if host.state.state != 'INSTALLING':
logging.error(
'host %s is not in INSTALLING state',
hostid)
return
if host.state.progress > progress.progress:
logging.error(
'host %s progress is not increased '
'from %s to %s',
hostid, host.state, progress)
return
if (
host.state.progress == progress.progress and
host.state.message == progress.message
):
logging.info(
'ignore update host %s progress %s to %s',
hostid, progress, host.state)
return
host.state.progress = progress.progress
host.state.message = progress.message
if progress.severity:
host.state.severity = progress.severity
if host.state.progress >= 1.0:
host.state.state = 'READY'
if host.state.severity == 'ERROR':
host.state.state = 'ERROR'
if host.state.state != 'INSTALLING':
host.mutable = True
logging.debug(
'update host %s state %s',
hostid, host.state)
@classmethod
def _update_cluster_progress(cls, clusterid):
"""Update cluster installing progress to database.
.. note::
The function should be called in the database session.
"""
session = database.current_session()
cluster = session.query(
Cluster).filter_by(id=clusterid).first()
if not cluster:
logging.error(
'there is no cluster for %s in Cluster',
clusterid)
return
if not cluster.state:
logging.error(
'there is no ClusterState for %s',
clusterid)
if cluster.state.state != 'INSTALLING':
logging.error('cluster %s is not in INSTALLING state',
clusterid)
return
cluster_progress = 0.0
cluster_messages = {}
cluster_severities = set([])
hostids = []
for host in cluster.hosts:
if host.state:
hostids.append(host.id)
cluster_progress += host.state.progress
if host.state.message:
cluster_messages[host.hostname] = host.state.message
if host.state.severity:
cluster_severities.add(host.state.severity)
cluster.state.progress = cluster_progress / len(hostids)
cluster.state.message = '\n'.join(
[
'%s: %s' % (hostname, message)
for hostname, message in cluster_messages.items()
]
)
for severity in ['ERROR', 'WARNING', 'INFO']:
if severity in cluster_severities:
cluster.state.severity = severity
break
if cluster.state.progress >= 1.0:
cluster.state.state = 'READY'
if cluster.state.severity == 'ERROR':
cluster.state.state = 'ERROR'
if cluster.state.state != 'INSTALLING':
cluster.mutable = True
logging.debug(
'update cluster %s state %s',
clusterid, cluster.state)
def update_progress(self, clusterid, hostids):
"""Update cluster progress and hosts progresses.
:param clusterid: the id of the cluster to update.
:type clusterid: int.
:param hostids: the ids of the hosts to update.
:type hostids: list of int.
"""
host_progresses = {}
with database.session():
for hostid in hostids:
fullname, host_state, host_progress = (
self._get_host_progress(hostid))
if not fullname or not host_progress:
logging.error(
'nothing to update host %s => '
'state %s progress %s',
fullname, host_state, host_progress)
continue
logging.debug('got host %s state %s progress %s',
fullname, host_state, host_progress)
host_progresses[hostid] = (
fullname, host_state, host_progress)
for hostid, host_value in host_progresses.items():
fullname, host_state, host_progress = host_value
if host_state == 'INSTALLING' and host_progress.progress < 1.0:
self.os_matcher_.update_progress(
fullname, host_progress)
self.package_matcher_.update_progress(
fullname, host_progress)
else:
logging.error(
'there is no need to update host %s '
'progress: state %s progress %s',
fullname, host_state, host_progress)
with database.session():
for hostid in hostids:
if hostid not in host_progresses:
continue
_, _, host_progress = host_progresses[hostid]
self._update_host_progress(hostid, host_progress)
self._update_cluster_progress(clusterid)

View File

@ -1,347 +0,0 @@
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module to update intalling progress by processing log file.
.. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
"""
import logging
import os.path
from compass.db import database
from compass.db.model import LogProgressingHistory
from compass.log_analyzor.line_matcher import Progress
from compass.utils import setting_wrapper as setting
class FileFilter(object):
"""base class to filter log file."""
def __repr__(self):
return self.__class__.__name__
def filter(self, pathname):
"""Filter log file.
:param pathname: the absolute path name to the log file.
"""
raise NotImplementedError(str(self))
class CompositeFileFilter(FileFilter):
"""filter log file based on the list of filters."""
def __init__(self, filters):
self.filters_ = filters
def __str__(self):
return 'CompositeFileFilter[%s]' % self.filters_
def append_filter(self, file_filter):
"""append filter."""
self.filters_.append(file_filter)
def filter(self, pathname):
"""filter log file."""
for file_filter in self.filters_:
if not file_filter.filter(pathname):
return False
return True
class FilterFileExist(FileFilter):
"""filter log file if not exists."""
def filter(self, pathname):
"""filter log file."""
file_exist = os.path.isfile(pathname)
if not file_exist:
logging.error("%s is not exist", pathname)
return file_exist
def get_file_filter():
"""get file filter"""
composite_filter = CompositeFileFilter([FilterFileExist()])
return composite_filter
class FileReader(object):
"""Class to read log file.
The class provide support to read log file from the position
it has read last time. and update the position when it finish
reading the log.
"""
def __init__(self, pathname):
self.pathname_ = pathname
self.position_ = 0
self.partial_line_ = ''
def __repr__(self):
return (
'%s[pathname:%s, position:%s, partial_line:%s]' % (
self.__class__.__name__, self.pathname_, self.position_,
self.partial_line_
)
)
def get_history(self):
"""Get log file read history from database.
:returns: (line_matcher_name progress)
.. note::
The function should be called out of database session.
It reads the log_progressing_history table to get the
position in the log file it has read in last run,
the partial line of the log, the line matcher name
in the last run, the progress, the message and the
severity it has got in the last run.
"""
with database.session() as session:
history = session.query(
LogProgressingHistory
).filter_by(
pathname=self.pathname_
).first()
if history:
self.position_ = history.position
self.partial_line_ = history.partial_line
line_matcher_name = history.line_matcher_name
progress = Progress(history.progress,
history.message,
history.severity)
else:
line_matcher_name = 'start'
progress = Progress(0.0, '', None)
return line_matcher_name, progress
def update_history(self, line_matcher_name, progress):
"""Update log_progressing_history table.
:param line_matcher_name: the line matcher name.
:param progress: Progress instance to record the installing progress.
.. note::
The function should be called out of database session.
It updates the log_processing_history table.
"""
with database.session() as session:
history = session.query(LogProgressingHistory).filter_by(
pathname=self.pathname_).first()
if history:
if history.position >= self.position_:
logging.error(
'%s history position %s is ahead of current '
'position %s',
self.pathname_,
history.position,
self.position_)
return
history.position = self.position_
history.partial_line = self.partial_line_
history.line_matcher_name = line_matcher_name
history.progress = progress.progress
history.message = progress.message
history.severity = progress.severity
else:
history = LogProgressingHistory(
pathname=self.pathname_, position=self.position_,
partial_line=self.partial_line_,
line_matcher_name=line_matcher_name,
progress=progress.progress,
message=progress.message,
severity=progress.severity)
session.merge(history)
logging.debug('update file %s to history %s',
self.pathname_, history)
def readline(self):
"""Generate each line of the log file."""
old_position = self.position_
try:
with open(self.pathname_) as logfile:
logfile.seek(self.position_)
while True:
line = logfile.readline()
self.partial_line_ += line
position = logfile.tell()
if position > self.position_:
self.position_ = position
if self.partial_line_.endswith('\n'):
yield_line = self.partial_line_
self.partial_line_ = ''
yield yield_line
else:
break
if self.partial_line_:
yield self.partial_line_
except Exception as error:
logging.error('failed to processing file %s', self.pathname_)
raise error
logging.debug(
'processing file %s log %s bytes to position %s',
self.pathname_, self.position_ - old_position,
self.position_)
class FileReaderFactory(object):
"""factory class to create FileReader instance."""
def __init__(self, logdir, filefilter):
self.logdir_ = logdir
self.filefilter_ = filefilter
def __str__(self):
return '%s[logdir: %s filefilter: %s]' % (
self.__class__.__name__, self.logdir_, self.filefilter_)
def get_file_reader(self, fullname, filename):
"""Get FileReader instance.
:param fullname: fullname of installing host.
:param filename: the filename of the log file.
:returns: :class:`FileReader` instance if it is not filtered.
"""
pathname = os.path.join(self.logdir_, fullname, filename)
logging.debug('get FileReader from %s', pathname)
if not self.filefilter_.filter(pathname):
logging.error('%s is filtered', pathname)
return None
return FileReader(pathname)
FILE_READER_FACTORY = FileReaderFactory(
setting.INSTALLATION_LOGDIR, get_file_filter())
class FileMatcher(object):
"""File matcher to get the installing progress from the log file."""
def __init__(self, line_matchers, min_progress, max_progress, filename):
if not 0.0 <= min_progress <= max_progress <= 1.0:
raise IndexError(
'%s restriction is not mat: 0.0 <= min_progress'
'(%s) <= max_progress(%s) <= 1.0' % (
self.__class__.__name__,
min_progress,
max_progress))
self.line_matchers_ = line_matchers
self.min_progress_ = min_progress
self.max_progress_ = max_progress
self.absolute_min_progress_ = 0.0
self.absolute_max_progress_ = 1.0
self.absolute_progress_diff_ = 1.0
self.filename_ = filename
def update_absolute_progress_range(self, min_progress, max_progress):
"""update the min progress and max progress the log file indicates."""
progress_diff = max_progress - min_progress
self.absolute_min_progress_ = (
min_progress + self.min_progress_ * progress_diff)
self.absolute_max_progress_ = (
min_progress + self.max_progress_ * progress_diff)
self.absolute_progress_diff_ = (
self.absolute_max_progress_ - self.absolute_min_progress_)
def __str__(self):
return (
'%s[ filename: %s, progress range: [%s:%s], '
'line_matchers: %s]' % (
self.__class__.__name__, self.filename_,
self.absolute_min_progress_,
self.absolute_max_progress_, self.line_matchers_)
)
def update_total_progress(self, file_progress, total_progress):
"""Get the total progress from file progress."""
if not file_progress.message:
logging.info(
'ignore update file %s progress %s to total progress',
self.filename_, file_progress)
return
total_progress_data = min(
(
self.absolute_min_progress_ + (
file_progress.progress * self.absolute_progress_diff_
)
),
self.absolute_max_progress_
)
# total progress should only be updated when the new calculated
# progress is greater than the recored total progress or the
# progress to update is the same but the message is different.
if (
total_progress.progress < total_progress_data or (
total_progress.progress == total_progress_data and
total_progress.message != file_progress.message
)
):
total_progress.progress = total_progress_data
total_progress.message = file_progress.message
total_progress.severity = file_progress.severity
logging.debug('update file %s total progress %s',
self.filename_, total_progress)
else:
logging.info(
'ignore update file %s progress %s to total progress %s',
self.filename_, file_progress, total_progress)
def update_progress(self, fullname, total_progress):
"""update progress from file.
:param fullname: the fullname of the installing host.
:type fullname: str
:param total_progress: Progress instance to update.
the function update installing progress by reading the log file.
It contains a list of line matcher, when one log line matches
with current line matcher, the installing progress is updated.
and the current line matcher got updated.
Notes: some line may be processed multi times. The case is the
last line of log file is processed in one run, while in the other
run, it will be reprocessed at the beginning because there is
no line end indicator for the last line of the file.
"""
file_reader = FILE_READER_FACTORY.get_file_reader(
fullname, self.filename_)
if not file_reader:
return
line_matcher_name, file_progress = file_reader.get_history()
for line in file_reader.readline():
if line_matcher_name not in self.line_matchers_:
logging.debug('early exit at\n%s\nbecause %s is not in %s',
line, line_matcher_name, self.line_matchers_)
break
index = line_matcher_name
while index in self.line_matchers_:
line_matcher = self.line_matchers_[index]
index, line_matcher_name = line_matcher.update_progress(
line, file_progress)
file_reader.update_history(line_matcher_name, file_progress)
self.update_total_progress(file_progress, total_progress)

View File

@ -1,230 +0,0 @@
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module to get the progress when found match with a line of the log."""
import logging
import re
from abc import ABCMeta
from compass.utils import util
class Progress(object):
"""Progress object to store installing progress and message."""
def __init__(self, progress, message, severity):
"""Constructor
:param progress: installing progress between 0 to 1.
:param message: installing message.
:param severity: installing message severity.
"""
self.progress = progress
self.message = message
self.severity = severity
def __repr__(self):
return '%s[progress:%s, message:%s, severity:%s]' % (
self.__class__.__name__,
self.progress,
self.message,
self.severity)
class ProgressCalculator(object):
"""base class to generate progress."""
__metaclass__ = ABCMeta
@classmethod
def update_progress(
cls, progress_data, message,
severity, progress
):
"""Update progress with the given progress_data, message and severity.
:param progress_data: installing progress.
:type progress_data: float between 0 to 1.
:param message: installing progress message.
:param severity: installing message severity.
:param progress: :class:`Progress` instance to update
"""
# the progress is only updated when the new progress
# is greater than the stored progress or the progress
# to update is the same but the message is different.
if (
progress_data > progress.progress or (
progress_data == progress.progress and
message != progress.message
)
):
progress.progress = progress_data
if message:
progress.message = message
if severity:
progress.severity = severity
logging.debug('update progress to %s', progress)
else:
logging.info('ignore update progress %s to %s',
progress_data, progress)
def update(self, message, severity, progress):
"""vritual method to update progress by message and severity.
:param message: installing message.
:param severity: installing severity.
"""
raise NotImplementedError(str(self))
def __repr__(self):
return self.__class__.__name__
class IncrementalProgress(ProgressCalculator):
"""Class to increment the progress."""
def __init__(self, min_progress,
max_progress, incremental_ratio):
super(IncrementalProgress, self).__init__()
if not 0.0 <= min_progress <= max_progress <= 1.0:
raise IndexError(
'%s restriction is not mat: 0.0 <= min_progress(%s)'
' <= max_progress(%s) <= 1.0' % (
self.__class__.__name__, min_progress, max_progress))
if not 0.0 <= incremental_ratio <= 1.0:
raise IndexError(
'%s restriction is not mat: '
'0.0 <= incremental_ratio(%s) <= 1.0' % (
self.__class__.__name__, incremental_ratio))
self.min_progress_ = min_progress
self.max_progress_ = max_progress
self.incremental_progress_ = (
incremental_ratio * (max_progress - min_progress))
def __str__(self):
return '%s[%s:%s:%s]' % (
self.__class__.__name__,
self.min_progress_,
self.max_progress_,
self.incremental_progress_
)
def update(self, message, severity, progress):
"""update progress from message and severity."""
progress_data = max(
self.min_progress_,
min(
self.max_progress_,
progress.progress + self.incremental_progress_
)
)
self.update_progress(progress_data,
message, severity, progress)
class RelativeProgress(ProgressCalculator):
"""class to update progress to the given relative progress."""
def __init__(self, progress):
super(RelativeProgress, self).__init__()
if not 0.0 <= progress <= 1.0:
raise IndexError(
'%s restriction is not mat: 0.0 <= progress(%s) <= 1.0' % (
self.__class__.__name__, progress))
self.progress_ = progress
def __str__(self):
return '%s[%s]' % (self.__class__.__name__, self.progress_)
def update(self, message, severity, progress):
"""update progress from message and severity."""
self.update_progress(
self.progress_, message, severity, progress)
class SameProgress(ProgressCalculator):
"""class to update message and severity for progress."""
def update(self, message, severity, progress):
"""update progress from the message and severity."""
self.update_progress(progress.progress, message,
severity, progress)
class LineMatcher(object):
"""Progress matcher for each line."""
def __init__(self, pattern, progress=None,
message_template='', severity=None,
unmatch_sameline_next_matcher_name='',
unmatch_nextline_next_matcher_name='',
match_sameline_next_matcher_name='',
match_nextline_next_matcher_name=''):
self.regex_ = re.compile(pattern)
if not progress:
self.progress_ = SameProgress()
elif isinstance(progress, ProgressCalculator):
self.progress_ = progress
elif util.is_instance(progress, [int, float]):
self.progress_ = RelativeProgress(progress)
else:
raise TypeError(
'progress unsupport type %s: %s' % (
type(progress), progress))
self.message_template_ = message_template
self.severity_ = severity
self.unmatch_sameline_ = unmatch_sameline_next_matcher_name
self.unmatch_nextline_ = unmatch_nextline_next_matcher_name
self.match_sameline_ = match_sameline_next_matcher_name
self.match_nextline_ = match_nextline_next_matcher_name
def __str__(self):
return '%s[pattern:%r, message_template:%r, severity:%r]' % (
self.__class__.__name__, self.regex_.pattern,
self.message_template_, self.severity_)
def update_progress(self, line, progress):
"""Update progress by the line.
:param line: one line in log file to indicate the installing progress.
.. note::
The line may be partial if the latest line of the log file is
not the whole line. But the whole line may be resent
in the next run.
:praam progress: the :class:`Progress` instance to update.
"""
mat = self.regex_.search(line)
if not mat:
return (
self.unmatch_sameline_,
self.unmatch_nextline_)
try:
message = self.message_template_ % mat.groupdict()
except Exception as error:
logging.error('failed to get message %s %% %s in line matcher %s',
self.message_template_, mat.groupdict(), self)
raise error
self.progress_.update(message, self.severity_, progress)
return (
self.match_sameline_,
self.match_nextline_)

View File

@ -1,455 +0,0 @@
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""module to provide updating installing process function.
.. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
"""
import logging
from compass.log_analyzor.adapter_matcher import AdapterItemMatcher
from compass.log_analyzor.adapter_matcher import AdapterMatcher
from compass.log_analyzor.adapter_matcher import OSMatcher
from compass.log_analyzor.adapter_matcher import PackageMatcher
from compass.log_analyzor.file_matcher import FileMatcher
from compass.log_analyzor.line_matcher import IncrementalProgress
from compass.log_analyzor.line_matcher import LineMatcher
# TODO(weidong): reconsider intialization method for the following.
OS_INSTALLER_CONFIGURATIONS = {
'Ubuntu': AdapterItemMatcher(
file_matchers=[
FileMatcher(
filename='syslog',
min_progress=0.0,
max_progress=1.0,
line_matchers={
'start': LineMatcher(
pattern=r'.*',
progress=.05,
message_template='start installing',
unmatch_nextline_next_matcher_name='start',
match_nextline_next_matcher_name='ethdetect'
),
'ethdetect': LineMatcher(
pattern=r'Menu.*item.*\'ethdetect\'.*selected',
progress=.1,
message_template='ethdetect selected',
unmatch_nextline_next_matcher_name='ethdetect',
match_nextline_next_matcher_name='netcfg'
),
'netcfg': LineMatcher(
pattern=r'Menu.*item.*\'netcfg\'.*selected',
progress=.12,
message_template='netcfg selected',
unmatch_nextline_next_matcher_name='netcfg',
match_nextline_next_matcher_name='network-preseed'
),
'network-preseed': LineMatcher(
pattern=r'Menu.*item.*\'network-preseed\'.*selected',
progress=.15,
message_template='network-preseed selected',
unmatch_nextline_next_matcher_name='network-preseed',
match_nextline_next_matcher_name='localechooser'
),
'localechoose': LineMatcher(
pattern=r'Menu.*item.*\'localechooser\'.*selected',
progress=.18,
message_template='localechooser selected',
unmatch_nextline_next_matcher_name='localechooser',
match_nextline_next_matcher_name='download-installer'
),
'download-installer': LineMatcher(
pattern=(
r'Menu.*item.*\'download-installer\'.*selected'
),
progress=.2,
message_template='download installer selected',
unmatch_nextline_next_matcher_name=(
'download-installer'),
match_nextline_next_matcher_name='clock-setup'
),
'clock-setup': LineMatcher(
pattern=r'Menu.*item.*\'clock-setup\'.*selected',
progress=.3,
message_template='clock-setup selected',
unmatch_nextline_next_matcher_name='clock-setup',
match_nextline_next_matcher_name='disk-detect'
),
'disk-detect': LineMatcher(
pattern=r'Menu.*item.*\'disk-detect\'.*selected',
progress=.32,
message_template='disk-detect selected',
unmatch_nextline_next_matcher_name='disk-detect',
match_nextline_next_matcher_name='partman-base'
),
'partman-base': LineMatcher(
pattern=r'Menu.*item.*\'partman-base\'.*selected',
progress=.35,
message_template='partman-base selected',
unmatch_nextline_next_matcher_name='partman-base',
match_nextline_next_matcher_name='live-installer'
),
'live-installer': LineMatcher(
pattern=r'Menu.*item.*\'live-installer\'.*selected',
progress=.45,
message_template='live-installer selected',
unmatch_nextline_next_matcher_name='live-installer',
match_nextline_next_matcher_name='pkgsel'
),
'pkgsel': LineMatcher(
pattern=r'Menu.*item.*\'pkgsel\'.*selected',
progress=.5,
message_template='pkgsel selected',
unmatch_nextline_next_matcher_name='pkgsel',
match_nextline_next_matcher_name='grub-installer'
),
'grub-installer': LineMatcher(
pattern=r'Menu.*item.*\'grub-installer\'.*selected',
progress=.9,
message_template='grub-installer selected',
unmatch_nextline_next_matcher_name='grub-installer',
match_nextline_next_matcher_name='finish-install'
),
'finish-install': LineMatcher(
pattern=r'Menu.*item.*\'finish-install\'.*selected',
progress=.95,
message_template='finish-install selected',
unmatch_nextline_next_matcher_name='finish-install',
match_nextline_next_matcher_name='finish-install-done'
),
'finish-install-done': LineMatcher(
pattern=r'Running.*finish-install.d/.*save-logs',
progress=1.0,
message_template='finish-install is done',
unmatch_nextline_next_matcher_name=(
'finish-install-done'
),
match_nextline_next_matcher_name='exit'
),
}
),
FileMatcher(
filename='status',
min_progress=.2,
max_progress=.3,
line_matchers={
'start': LineMatcher(
pattern=r'Package: (?P<package>.*)',
progress=IncrementalProgress(0.0, 0.99, 0.05),
message_template='Installing udeb %(package)s',
unmatch_nextline_next_matcher_name='start',
match_nextline_next_matcher_name='start'
)
}
),
FileMatcher(
filename='initial-status',
min_progress=.5,
max_progress=.9,
line_matchers={
'start': LineMatcher(
pattern=r'Package: (?P<package>.*)',
progress=IncrementalProgress(0.0, 0.99, 0.01),
message_template='Installing deb %(package)s',
unmatch_nextline_next_matcher_name='start',
match_nextline_next_matcher_name='start'
)
}
),
]
),
'CentOS': AdapterItemMatcher(
file_matchers=[
FileMatcher(
filename='sys.log',
min_progress=0.0,
max_progress=0.1,
line_matchers={
'start': LineMatcher(
pattern=r'NOTICE (?P<message>.*)',
progress=IncrementalProgress(.1, .9, .1),
message_template='%(message)s',
unmatch_nextline_next_matcher_name='start',
match_nextline_next_matcher_name='exit'
),
}
),
FileMatcher(
filename='anaconda.log',
min_progress=0.1,
max_progress=1.0,
line_matchers={
'start': LineMatcher(
pattern=r'setting.*up.*kickstart',
progress=.1,
message_template=(
'Setting up kickstart configurations'),
unmatch_nextline_next_matcher_name='start',
match_nextline_next_matcher_name='STEP_STAGE2'
),
'STEP_STAGE2': LineMatcher(
pattern=r'starting.*STEP_STAGE2',
progress=.15,
message_template=(
'Downloading installation '
'images from server'),
unmatch_nextline_next_matcher_name='STEP_STAGE2',
match_nextline_next_matcher_name='start_anaconda'
),
'start_anaconda': LineMatcher(
pattern=r'Running.*anaconda.*script',
progress=.2,
unmatch_nextline_next_matcher_name=(
'start_anaconda'),
match_nextline_next_matcher_name=(
'start_kickstart_pre')
),
'start_kickstart_pre': LineMatcher(
pattern=r'Running.*kickstart.*pre.*script',
progress=.25,
unmatch_nextline_next_matcher_name=(
'start_kickstart_pre'),
match_nextline_next_matcher_name=(
'kickstart_pre_done')
),
'kickstart_pre_done': LineMatcher(
pattern=(
r'All.*kickstart.*pre.*script.*have.*been.*run'),
progress=.3,
unmatch_nextline_next_matcher_name=(
'kickstart_pre_done'),
match_nextline_next_matcher_name=(
'start_enablefilesystem')
),
'start_enablefilesystem': LineMatcher(
pattern=r'moving.*step.*enablefilesystems',
progress=0.3,
message_template=(
'Performing hard-disk partitioning and '
'enabling filesystems'),
unmatch_nextline_next_matcher_name=(
'start_enablefilesystem'),
match_nextline_next_matcher_name=(
'enablefilesystem_done')
),
'enablefilesystem_done': LineMatcher(
pattern=r'leaving.*step.*enablefilesystems',
progress=.35,
message_template='Filesystems are enabled',
unmatch_nextline_next_matcher_name=(
'enablefilesystem_done'),
match_nextline_next_matcher_name=(
'setup_repositories')
),
'setup_repositories': LineMatcher(
pattern=r'moving.*step.*reposetup',
progress=0.35,
message_template=(
'Setting up Customized Repositories'),
unmatch_nextline_next_matcher_name=(
'setup_repositories'),
match_nextline_next_matcher_name=(
'repositories_ready')
),
'repositories_ready': LineMatcher(
pattern=r'leaving.*step.*reposetup',
progress=0.4,
message_template=(
'Customized Repositories setting up are done'),
unmatch_nextline_next_matcher_name=(
'repositories_ready'),
match_nextline_next_matcher_name='checking_dud'
),
'checking_dud': LineMatcher(
pattern=r'moving.*step.*postselection',
progress=0.4,
message_template='Checking DUD modules',
unmatch_nextline_next_matcher_name='checking_dud',
match_nextline_next_matcher_name='dud_checked'
),
'dud_checked': LineMatcher(
pattern=r'leaving.*step.*postselection',
progress=0.5,
message_template='Checking DUD modules are done',
unmatch_nextline_next_matcher_name='dud_checked',
match_nextline_next_matcher_name='installing_packages'
),
'installing_packages': LineMatcher(
pattern=r'moving.*step.*installpackages',
progress=0.5,
message_template='Installing packages',
unmatch_nextline_next_matcher_name=(
'installing_packages'),
match_nextline_next_matcher_name=(
'packages_installed')
),
'packages_installed': LineMatcher(
pattern=r'leaving.*step.*installpackages',
progress=0.8,
message_template='Packages are installed',
unmatch_nextline_next_matcher_name=(
'packages_installed'),
match_nextline_next_matcher_name=(
'installing_bootloader')
),
'installing_bootloader': LineMatcher(
pattern=r'moving.*step.*instbootloader',
progress=0.9,
message_template='Installing bootloaders',
unmatch_nextline_next_matcher_name=(
'installing_bootloader'),
match_nextline_next_matcher_name=(
'bootloader_installed'),
),
'bootloader_installed': LineMatcher(
pattern=r'leaving.*step.*instbootloader',
progress=1.0,
message_template='bootloaders is installed',
unmatch_nextline_next_matcher_name=(
'bootloader_installed'),
match_nextline_next_matcher_name='exit'
),
}
),
FileMatcher(
filename='install.log',
min_progress=0.56,
max_progress=0.80,
line_matchers={
'start': LineMatcher(
pattern=r'Installing (?P<package>.*)',
progress=IncrementalProgress(0.0, 0.99, 0.005),
message_template='Installing %(package)s',
unmatch_sameline_next_matcher_name='package_complete',
unmatch_nextline_next_matcher_name='start',
match_nextline_next_matcher_name='start'
),
'package_complete': LineMatcher(
pattern='FINISHED.*INSTALLING.*PACKAGES',
progress=1.0,
message_template='installing packages finished',
unmatch_nextline_next_matcher_name='start',
match_nextline_next_matcher_name='exit'
),
}
),
]
),
}
PACKAGE_INSTALLER_CONFIGURATIONS = {
'openstack': AdapterItemMatcher(
file_matchers=[
FileMatcher(
filename='chef-client.log',
min_progress=0.1,
max_progress=1.0,
line_matchers={
'start': LineMatcher(
pattern=(
r'Processing\s*(?P<install_type>.*)'
r'\[(?P<package>.*)\].*'),
progress=IncrementalProgress(0.0, .90, 0.005),
message_template=(
'Processing %(install_type)s %(package)s'),
unmatch_sameline_next_matcher_name=(
'chef_complete'),
unmatch_nextline_next_matcher_name='start',
match_nextline_next_matcher_name='start'
),
'chef_complete': LineMatcher(
pattern=r'Chef.*Run.*complete',
progress=1.0,
message_template='Chef run complete',
unmatch_nextline_next_matcher_name='start',
match_nextline_next_matcher_name='exit'
),
}
),
]
),
}
ADAPTER_CONFIGURATIONS = [
AdapterMatcher(
os_matcher=OSMatcher(
os_installer_name='cobbler',
os_pattern='CentOS.*',
item_matcher=OS_INSTALLER_CONFIGURATIONS['CentOS'],
min_progress=0.0,
max_progress=0.6),
package_matcher=PackageMatcher(
package_installer_name='chef',
target_system='openstack',
item_matcher=PACKAGE_INSTALLER_CONFIGURATIONS['openstack'],
min_progress=0.6,
max_progress=1.0)
),
AdapterMatcher(
os_matcher=OSMatcher(
os_installer_name='cobbler',
os_pattern='Ubuntu.*',
item_matcher=OS_INSTALLER_CONFIGURATIONS['Ubuntu'],
min_progress=0.0,
max_progress=0.6),
package_matcher=PackageMatcher(
package_installer_name='chef',
target_system='openstack',
item_matcher=PACKAGE_INSTALLER_CONFIGURATIONS['openstack'],
min_progress=0.6,
max_progress=1.0)
),
]
def _get_adapter_matcher(
os_installer, os_name,
package_installer, target_system
):
"""Get adapter matcher by os name and package installer name."""
for configuration in ADAPTER_CONFIGURATIONS:
if configuration.match(os_installer, os_name,
package_installer, target_system):
return configuration
else:
logging.debug('configuration %s does not match %s and %s',
configuration, os_name, target_system)
logging.error('No configuration found with os installer %s os %s '
'package_installer %s, target_system %s',
os_installer, os_name, package_installer, target_system)
return None
def update_progress(os_installer, os_names, package_installer, target_systems,
cluster_hosts):
"""Update adapter installing progress.
:param os_installer: os installer name
:param package_installer: package installer name.
:param cluster_hosts: clusters and hosts in each cluster to update.
:param cluster_hosts: dict of int to list of int.
"""
for clusterid, hostids in cluster_hosts.items():
adapter = _get_adapter_matcher(os_installer, os_names[clusterid],
package_installer,
target_systems[clusterid])
if not adapter:
continue
adapter.update_progress(clusterid, hostids)

View File

@ -20,12 +20,9 @@ import logging
from celery.signals import setup_logging
from compass.actions import clean_deployment
from compass.actions import clean_installing_progress
from compass.actions import deploy
from compass.actions import poll_switch
from compass.actions import reinstall
from compass.actions import update_progress
from compass.tasks.client import celery
from compass.utils import flags
from compass.utils import logsetting
@ -43,18 +40,22 @@ setup_logging.connect(tasks_setup_logging)
@celery.task(name='compass.tasks.pollswitch')
def pollswitch(ip_addr, req_obj='mac', oper='SCAN'):
def pollswitch(ip_addr, credentials, req_obj='mac', oper='SCAN'):
"""Query switch and return expected result.
:param ip_addr: switch ip address.
:type ip_addr: str
:param credentials: switch credentials
:type credentials: dict
:param reqObj: the object requested to query from switch.
:type reqObj: str
:param oper: the operation to query the switch (SCAN, GET, SET).
:type oper: str
"""
try:
poll_switch.poll_switch(ip_addr, req_obj=req_obj, oper=oper)
poll_switch.poll_switch(
ip_addr, credentials, req_obj=req_obj, oper=oper
)
except Exception as error:
logging.exception(error)
@ -83,42 +84,3 @@ def reinstall_clusters(cluster_hosts):
reinstall.reinstall(cluster_hosts)
except Exception as error:
logging.exception(error)
@celery.task(name='compass.tasks.clean_deployment')
def clean_clusters_deployment(cluster_hosts):
"""clean deployment of the given cluster.
:param cluster_hosts: the cluster and hosts of each cluster to clean.
:type cluster_hosts: dict of int to list of int
"""
try:
clean_deployment.clean_deployment(cluster_hosts)
except Exception as error:
logging.exception(error)
@celery.task(name='compass.tasks.clean_installing_progress')
def clean_clusters_installing_progress(cluster_hosts):
"""clean installing progress of the given cluster.
:param cluster_hosts: the cluster and hosts of each cluster to clean.
:type cluster_hosts: dict of int to list of int
"""
try:
clean_installing_progress.clean_installing_progress(cluster_hosts)
except Exception as error:
logging.exception(error)
@celery.task(name='compass.tasks.update_progress')
def update_clusters_progress(cluster_hosts):
"""Calculate the installing progress of the given cluster.
:param cluster_hosts: the cluster and hosts of each cluster to update.
:type cluster_hosts: dict of int to list of int
"""
try:
update_progress.update_progress(cluster_hosts)
except Exception as error:
logging.exception(error)

View File

@ -6,7 +6,7 @@
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
@ -15,44 +15,45 @@
# limitations under the License.
"""test api module."""
import simplejson as json
import celery
import copy
import mock
import os
import unittest2
from compass.api import app
from compass.api.exception import ItemNotFound
os.environ['COMPASS_IGNORE_SETTING'] = 'true'
from compass.utils import setting_wrapper as setting
reload(setting)
# from compass.api import app
from compass.db.api import database
app.config['TESTING'] = True
from compass.utils import flags
from compass.utils import logsetting
from compass.utils import util
class ApiTestCase(unittest2.TestCase):
"""base api test class."""
DATABASE_URL = 'sqlite://'
def setUp(self):
super(ApiTestCase, self).setUp()
database.init(self.DATABASE_URL)
logsetting.init()
database.init('sqlite://')
database.create_db()
self.test_client = app.test_client()
def tearDown(self):
database.drop_db()
super(ApiTestCase, self).tearDown()
def test_get_user(self):
url = "/v1.0/users/1"
return_value = self.test_client.get(url)
data = json.loads(return_value.get_data())
excepted_code = 200
self.assertEqual(return_value.status_code, excepted_code)
def test_login(self):
pass
self.assertEqual(1, data['id'])
self.assertEqual("admin@abc.com", data['email'])
url = "/v1.0/users/2"
return_value = self.test_client.get(url)
excepted_code = 404
self.assertEqual(return_value.status_code, excepted_code)
if __name__ == '__main__':
flags.init()
logsetting.init()
unittest2.main()

View File

@ -16,6 +16,7 @@
.. moduleauthor:: Xiaodong Wang ,xiaodongwang@huawei.com>
"""
import datetime
import logging
import os
@ -31,12 +32,6 @@ CONFIG_FILE_FORMAT = 'python'
DATABASE_TYPE = 'file'
DATABASE_FILE = ''
SQLALCHEMY_DATABASE_URI = 'sqlite://'
OS_INSTALLER = 'cobbler'
COBBLER_INSTALLER_URL = ''
COBBLER_INSTALLER_TOKEN = ['cobbler', 'cobbler']
PACKAGE_INSTALLER = 'chef'
CHEF_INSTALLER_URL = ''
CHEF_GLOBAL_DATABAG_NAME = 'env_default'
INSTALLATION_LOGDIR = ''
DEFAULT_LOGLEVEL = 'info'
DEFAULT_LOGDIR = '/tmp'
@ -53,7 +48,29 @@ POLLSWITCH_INTERVAL = 60
SWITCHES = [
]
USER_SECRET_KEY = datetime.datetime.now().isoformat()
USER_AUTH_HEADER_NAME = 'X-Auth-Token'
USER_TOKEN_DURATION = '2h'
COMPASS_ADMIN_EMAIL = 'admin@abc.com'
COMPASS_ADMIN_PASSWORD = 'admin'
COMPASS_DEFAULT_PERMISSIONS = [
'list_permissions',
]
SWITCHES_DEFAULT_FILTERS = []
DEFAULT_SWITCH_IP = '0.0.0.0'
DEFAULT_SWITCH_PORT = 0
OS_INSTALLER_DIR = '/etc/compass/os_installer'
PACKAGE_INSTALLER_DIR = '/etc/compass/package_installer'
OS_DIR = '/etc/compass/os'
DISTRIBUTED_SYSTEM_DIR = '/etc/compass/distributed_system'
OS_ADAPTER_DIR = '/etc/compass/os_adapter'
PACKAGE_ADAPTER_DIR = '/etc/compass/package_adapter'
OS_METADATA_DIR = '/etc/compass/os_metadata'
PACKAGE_METADATA_DIR = '/etc/compass/package_metadata'
OS_FIELD_DIR = '/etc/compass/os_field'
PACKAGE_FIELD_DIR = '/etc/compass/package_field'
PACKAGE_ROLE_DIR = '/etc/compass/role'
VALIDATOR_DIR = '/etc/compass/validator'
if (
'COMPASS_IGNORE_SETTING' in os.environ and
os.environ['COMPASS_IGNORE_SETTING']

View File

@ -17,7 +17,67 @@
.. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
"""
import copy
import crypt
import datetime
import logging
import os
import os.path
import re
import sys
def parse_datetime(date_time, exception_class=Exception):
"""Parse datetime str to get datetime object."""
try:
return datetime.datetime.strptime(
date_time, '%Y-%m-%d %H:%M:%S'
)
except Exception as error:
logging.exception(error)
raise exception_class(
'date time %s format is invalid' % date_time
)
def parse_datetime_range(date_time_range, exception_class=Exception):
"""parse datetime range str to pair of datetime objects."""
try:
start, end = date_time_range.split(',')
except Exception as error:
logging.exception(error)
raise exception_class(
'there is no `,` in date time range %s' % date_time_range
)
if start:
start_datetime = parse_datetime(start, exception_class)
else:
start_datetime = None
if end:
end_datetime = parse_datetime(end, exception_class)
else:
end_datetime = None
return start_datetime, end_datetime
def parse_request_arg_dict(arg, exception_class=Exception):
"""parse string to dict."""
arg_dict = {}
arg_pairs = arg.split(';')
for arg_pair in arg_pairs:
try:
arg_name, arg_value = arg_pair.split('=', 1)
except Exception as error:
logging.exception(error)
raise exception_class(
'there is no `=` in %s' % arg_pair
)
arg_dict[arg_name] = arg_value
return arg_dict
def format_datetime(date_time):
"""Generate string from datetime object."""
return date_time.strftime("%Y-%m-%d %H:%M:%S")
def merge_dict(lhs, rhs, override=True):
@ -28,330 +88,105 @@ def merge_dict(lhs, rhs, override=True):
:param rhs: dict to merge from.
:type rhs: dict
:param override: the value in rhs overide the value in left if True.
:type override: str
:raises: TypeError if lhs or rhs is not a dict.
:type override: boolean
"""
if not rhs:
return
if not isinstance(lhs, dict):
raise TypeError('lhs type is %s while expected is dict' % type(lhs),
lhs)
if not isinstance(rhs, dict):
raise TypeError('rhs type is %s while expected is dict' % type(rhs),
rhs)
if not isinstance(lhs, dict) or not isinstance(rhs, dict):
if override:
return rhs
else:
return lhs
for key, value in rhs.items():
if (
isinstance(value, dict) and key in lhs and
isinstance(lhs[key], dict)
):
merge_dict(lhs[key], value, override)
if key not in lhs:
lhs[key] = rhs[key]
else:
if override or key not in lhs:
lhs[key] = copy.deepcopy(value)
lhs[key] = merge_dict(lhs[key], value, override)
return lhs
def order_keys(keys, orders):
"""Get ordered keys.
:param keys: keys to be sorted.
:type keys: list of str
:param orders: the order of the keys. '.' is all other keys not in order.
:type orders: list of str.
:returns: keys as list sorted by orders.
:raises: TypeError if keys or orders is not list.
"""
if not isinstance(keys, list):
raise TypeError('keys %s type should be list' % keys)
if not isinstance(orders, list):
raise TypeError('orders ^s type should be list' % orders)
found_dot = False
pres = []
posts = []
for order in orders:
if order == '.':
found_dot = True
def encrypt(value, crypt_method=None):
"""Get encrypted value."""
if not crypt_method:
if hasattr(crypt, 'METHOD_MD5'):
crypt_method = crypt.METHOD_MD5
else:
if found_dot:
posts.append(order)
else:
pres.append(order)
# for python2.7, copy python2.6 METHOD_MD5 logic here.
from random import choice
import string
return ([pre for pre in pres if pre in keys] +
[key for key in keys if key not in orders] +
[post for post in posts if post in keys])
_saltchars = string.ascii_letters + string.digits + './'
def _mksalt():
"""generate salt."""
salt = '$1$'
salt += ''.join(choice(_saltchars) for _ in range(8))
return salt
crypt_method = _mksalt()
return crypt.crypt(value, crypt_method)
def is_instance(instance, expected_types):
"""Check instance type is in one of expected types.
def parse_time_interval(time_interval_str):
if not time_interval_str:
return 0
:param instance: instance to check the type.
:param expected_types: types to check if instance type is in them.
:type expected_types: list of type
:returns: True if instance type is in expect_types.
"""
for expected_type in expected_types:
if isinstance(instance, expected_type):
return True
return False
def flat_lists_with_possibility(lists):
"""Return list of item from list of list of identity item.
:param lists: list of list of identity item.
:returns: list.
.. note::
For each first k elements in the returned list, it should be the k
most possible items. e.g. the input lists is
['a', 'a', 'a', 'a'], ['b', 'b'], ['c'],
the expected output is ['a', 'b', 'c', 'a', 'a', 'b', 'a'].
"""
lists = copy.deepcopy(lists)
lists = sorted(lists, key=len, reverse=True)
list_possibility = []
max_index = 0
total_elements = 0
possibilities = []
for items in lists:
list_possibility.append(0.0)
length = len(items)
if length > 0:
total_elements += length
possibilities.append(1.0 / length)
else:
possibilities.append(0.0)
output = []
while total_elements > 0:
if not lists[max_index]:
list_possibility[max_index] -= total_elements
else:
list_possibility[max_index] -= possibilities[max_index]
element = lists[max_index].pop(0)
output.append(element)
total_elements -= 1
max_index = list_possibility.index(max(list_possibility))
return output
def pretty_print(*contents):
"""pretty print contents."""
if len(contents) == 0:
print ""
else:
print "\n".join(content for content in contents)
def get_clusters_from_str(clusters_str):
"""get clusters from string."""
clusters = {}
for cluster_and_hosts in clusters_str.split(';'):
if not cluster_and_hosts:
continue
if ':' in cluster_and_hosts:
cluster_str, hosts_str = cluster_and_hosts.split(
':', 1)
else:
cluster_str = cluster_and_hosts
hosts_str = ''
hosts = [
host for host in hosts_str.split(',')
if host
]
clusters[cluster_str] = hosts
return clusters
def _get_switch_ips(switch_config):
"""Helper function to get switch ips."""
ips = []
blocks = switch_config['switch_ips'].split('.')
ip_blocks_list = []
for block in blocks:
ip_blocks_list.append([])
sub_blocks = block.split(',')
for sub_block in sub_blocks:
if not sub_block:
continue
if '-' in sub_block:
start_block, end_block = sub_block.split('-', 1)
start_block = int(start_block)
end_block = int(end_block)
if start_block > end_block:
continue
ip_block = start_block
while ip_block <= end_block:
ip_blocks_list[-1].append(str(ip_block))
ip_block += 1
else:
ip_blocks_list[-1].append(sub_block)
ip_prefixes = [[]]
for ip_blocks in ip_blocks_list:
prefixes = []
for ip_block in ip_blocks:
for prefix in ip_prefixes:
prefixes.append(prefix + [ip_block])
ip_prefixes = prefixes
for prefix in ip_prefixes:
if not prefix:
continue
ips.append('.'.join(prefix))
return ips
def _get_switch_filter_ports(switch_config):
"""Helper function to get switch filter ports."""
port_pat = re.compile(r'(\D*)(\d+(?:-\d+)?)')
filter_ports = []
for port_range in switch_config['filter_ports'].split(','):
if not port_range:
continue
mat = port_pat.match(port_range)
time_interval_tuple = [
time_interval_element
for time_interval_element in time_interval_str.split(' ')
if time_interval_element
]
time_interval_dict = {}
time_interval_unit_mapping = {
'd': 'days',
'w': 'weeks',
'h': 'hours',
'm': 'minutes',
's': 'seconds'
}
for time_interval_element in time_interval_tuple:
mat = re.match(r'^([+-]?\d+)(w|d|h|m|s).*', time_interval_element)
if not mat:
filter_ports.append(port_range)
else:
port_prefix = mat.group(1)
port_range = mat.group(2)
if '-' in port_range:
start_port, end_port = port_range.split('-', 1)
start_port = int(start_port)
end_port = int(end_port)
if start_port > end_port:
continue
port = start_port
while port <= end_port:
filter_ports.append('%s%s' % (port_prefix, port))
port += 1
else:
filter_ports.append('%s%s' % (port_prefix, port_range))
return filter_ports
def get_switch_filters(switch_configs):
"""get switch filters."""
switch_filters = []
for switch_config in switch_configs:
ips = _get_switch_ips(switch_config)
filter_ports = _get_switch_filter_ports(switch_config)
for ip_addr in ips:
for filter_port in filter_ports:
switch_filters.append(
{'ip': ip_addr, 'filter_port': filter_port})
return switch_filters
def get_switch_machines_from_file(filename):
"""get switch machines from file."""
switches = []
switch_machines = {}
with open(filename) as switch_file:
for line in switch_file:
line = line.strip()
if not line:
# ignore empty line
continue
if line.startswith('#'):
# ignore comments
continue
columns = [column for column in line.split(',')]
if not columns:
# ignore empty line
continue
if columns[0] == 'switch':
(switch_ip, switch_vendor, switch_version,
switch_community, switch_state) = columns[1:]
switches.append({
'ip': switch_ip,
'vendor_info': switch_vendor,
'credential': {
'version': switch_version,
'community': switch_community,
},
'state': switch_state,
})
elif columns[0] == 'machine':
switch_ip, switch_port, vlan, mac = columns[1:]
switch_machines.setdefault(switch_ip, []).append({
'mac': mac,
'port': switch_port,
'vlan': int(vlan)
})
return (switches, switch_machines)
def get_properties_from_str(properties_str):
"""get matching properties from string."""
properties = {}
if not properties_str:
return properties
for property_str in properties_str.split(','):
if not property_str:
# ignore empty str
continue
property_name, property_value = property_str.split('=', 1)
properties[property_name] = property_value
time_interval_value = int(mat.group(1))
time_interval_unit = time_interval_unit_mapping[mat.group(2)]
time_interval_dict[time_interval_unit] = (
time_interval_dict.get(time_interval_unit, 0) + time_interval_value
)
return properties
time_interval = datetime.timedelta(**time_interval_dict)
if sys.version_info[0:2] > (2, 6):
return time_interval.total_seconds()
else:
return (
time_interval.microseconds + (
time_interval.seconds + time_interval.days * 24 * 3600
) * 1e6
) / 1e6
def get_properties_name_from_str(properties_name_str):
"""get properties name to print from string."""
properties_name = []
for property_name in properties_name_str.split(','):
if not property_name:
# ignore empty str
def load_configs(
config_dir, config_name_suffix='.conf',
env_globals={}, env_locals={}
):
configs = []
if not os.path.exists(config_dir):
logging.debug('path %s does not exist', config_dir)
return configs
for component in os.listdir(config_dir):
if not component.endswith(config_name_suffix):
continue
properties_name.append(property_name)
return properties_name
def print_properties(properties):
"""print properties."""
print '-----------------------------------------------'
for property_item in properties:
property_pairs = []
for property_name, property_value in property_item.items():
property_pairs.append('%s=%s' % (property_name, property_value))
print ','.join(property_pairs)
print '-----------------------------------------------'
path = os.path.join(config_dir, component)
config_globals = {}
config_globals.update(env_globals)
config_locals = {}
config_locals.update(env_locals)
try:
execfile(path, config_globals, config_locals)
except Exception as error:
logging.exception(error)
raise error
configs.append(config_locals)
return configs

View File

@ -1,159 +0,0 @@
#!/bin/sh
#
# compassd Compass daemon
##################################
# LSB header
### BEGIN INIT INFO
# Provides: compassd
# Required-Start: $network $httpd
# Default-Start: 3 4 5
# Default-Stop: 0 1 2 6
# Short-Description: compassd
# Description: Compass daemon service
#
### END INIT INFO
# chkconfig header
# chkconfig: 345 99 99
# description: This is a daemon that provides Compass daemon service
#
# Checking Sanity
[ -x /opt/compass/bin/poll_switch.py ] || exit 0
[ -x /opt/compass/bin/progress_update.py ] || exit 0
UBUNTU=/etc/debian_version
SUSE=/etc/SuSE-release
if [ -f $UBUNTU ]; then
. /lib/lsb/init_functions
elif [ -f $SUSE -a -r /etc/rc.status ]; then
. /etc/rc.status
else
. /etc/rc.d/init.d/functions
fi
SERVICE=compassd
PROCESS=compassd
RETVAL=0
start() {
echo "Starting Compass: "
if [ -f $SUSE ]; then
echo -n "Start celeryd: "
startproc -f -p /var/run/celeryd.pid -l /tmp/celeryd.log "C_FORCE_ROOT=1 CELERY_CONFIG_MODULE=compass.utils.celeryconfig_wrapper celeryd"
rc_status -v
echo
echo -n "Start service progress_update: "
startproc -f -p /var/run/progress_update.pid -l /tmp/progress_update.log /opt/compass/bin/progress_update.py
rc_status -v
echo
elif [ -e $UBUNTU ]; then
if [ -f /var/run/celeryd.pid ]; then
echo "celeryd is already started"
RETVAL=1
elif C_FORCE_ROOT=1 CELERY_CONFIG_MODULE=compass.utils.celeryconfig_wrapper celeryd &> /tmp/celeryd.log; then
echo "celeryd starts OK"
RETVAL=0
fi
if [ -f /var/run/progress_update.pid ]; then
echo "progress_update is already started"
RETVAL=1
elif /usr/bin/python /opt/compass/bin/progress_update.py &> /tmp/progress_update.log; then
echo "progress_update starts OK"
RETVAL=0
fi
else
echo -n "Start celeryd: "
daemon --pidfile /var/run/celeryd.pid "C_FORCE_ROOT=1 CELERY_CONFIG_MODULE=compass.utils.celeryconfig_wrapper celeryd &>/tmp/celeryd.log & echo \$! > /var/run/celeryd.pid"
RETVAL=$?
echo
echo -n "Start service progress_update: "
daemon --pidfile /var/run/progress_update.pid "/opt/compass/bin/progress_update.py &>/tmp/progress_update.log & echo \$! > /var/run/progress_update.pid"
RETVAL=$?
echo
fi
echo
return $RETVAL
}
stop() {
echo "Stopping Compass: "
if [ -f $SUSE ]; then
echo -n "Stop service celeryd: "
killproc -t 10 -p /var/run/celeryd.pid celeryd
rc_status -v
echo
echo -n "Stop service progress_update: "
killproc -t 30 -p /var/run/progress_update.pid /opt/compass/bin/progress_update.py
rc_status -v
echo
elif [ -f $UBUNTU ]; then
echo "Unsupported"
RETVAL=1
else
echo -n "Stop service celeryd: "
killproc -p /var/run/celeryd.pid -d 30 celeryd
RETVAL=$?
echo
echo -n "Stop service progress_update: "
killproc -p /var/run/progress_update.pid -d 30 /opt/compass/bin/progress_update.py
RETVAL=$?
echo
fi
}
restart() {
stop
start
}
case "$1" in
start|stop|restart)
$1
;;
status)
echo "Checking compass: "
if [ -f $SUSE ]; then
echo -n "Checking for service celeryd: "
checkproc -v -p /var/run/celeryd.pid celeryd
rc_status -v
echo
echo -n "Checking for service progress_update: "
checkproc -v -p /var/run/progress_update.pid /opt/compass/bin/progress_update.py
rc_status -v
echo
elif [ -f $UBUNTU ]; then
echo -n "Checking for service celeryd"
if [ -f /var/run/celeryd.pid ]; then
RETVAL=0
echo "celeryd is running."
else
RETVAL=1
echo "celeryd is stopped."
fi
echo -n "Checking for service progress_update"
if [ -f /var/run/progress_update.pid ]; then
RETVAL=0
echo "progress_update is running."
else
RETVAL=1
echo "progress_update is stopped."
fi
else
echo -n "checking for service celeryd: "
status -p /var/run/celeryd.pid celeryd
retval=$?
echo
echo -n "checking for service progress_update: "
status -p /var/run/progress_update.pid /opt/compass/bin/progress_update.py
retval=$?
echo
fi
;;
*)
echo "Usage: $0 {start|stop|status|restart}"
exit 1
;;
esac
exit $RETVAL

View File

@ -0,0 +1,3 @@
NAME = 'ceph'
PARENT = 'general'
DEPLOYABLE = True

View File

@ -0,0 +1,2 @@
NAME = 'general'
PARENT = ''

View File

@ -0,0 +1,3 @@
NAME ='openstack'
PARENT = 'general'
DEPLOYABLE = True

View File

@ -1,64 +0,0 @@
networking = {
'global': {
'default_no_proxy': ['127.0.0.1', 'localhost', '$compass_ip', '$compass_hostname'],
'search_path_pattern': '%(clusterid)s.%(search_path)s %(search_path)s',
'noproxy_pattern': '%(hostname)s,%(ip)s'
},
'interfaces': {
'management': {
'dns_pattern': '%(hostname)s.%(clusterid)s.%(search_path)s',
'netmask': '255.255.255.0',
'nic': 'eth0',
'promisc': 0,
},
'tenant': {
'netmask': '255.255.255.0',
'nic': 'eth0',
'dns_pattern': 'virtual-%(hostname)s.%(clusterid)s.%(search_path)s',
'promisc': 0,
},
'public': {
'netmask': '255.255.255.0',
'nic': 'eth1',
'dns_pattern': 'floating-%(hostname)s.%(clusterid)s.%(search_path)s',
'promisc': 1,
},
'storage': {
'netmask': '255.255.255.0',
'nic': 'eth0',
'dns_pattern': 'storage-%(hostname)s.%(clusterid)s.%(search_path)s',
'promisc': 0,
},
},
}
security = {
'server_credentials': {
'username': 'root',
'password': 'huawei',
},
'console_credentials': {
'username': 'admin',
'password': 'huawei',
},
'service_credentials': {
'username': 'admin',
'password': 'huawei',
},
}
role_assign_policy = {
'policy_by_host_numbers': {
},
'default': {
'roles': [],
'maxs': {},
'mins': {},
'default_max': -1,
'default_min': 0,
'exclusives': [],
'bundles': [],
},
}
testmode = $compass_testmode

2
conf/os/centos.conf Normal file
View File

@ -0,0 +1,2 @@
NAME = 'CentOS'
PARENT = 'general'

3
conf/os/centos6.5.conf Normal file
View File

@ -0,0 +1,3 @@
NAME = 'CentOS6.5'
PARENT = 'CentOS'
DEPLOYABLE = True

2
conf/os/general.conf Normal file
View File

@ -0,0 +1,2 @@
NAME = 'general'
PARENT = ''

2
conf/os/ubuntu.conf Normal file
View File

@ -0,0 +1,2 @@
NAME = 'Ubuntu'
PARENT = 'general'

3
conf/os/ubuntu12.04.conf Normal file
View File

@ -0,0 +1,3 @@
NAME = 'Ubuntu12.04'
PARENT = 'Ubuntu'
DEPLOYABLE = True

View File

@ -0,0 +1,3 @@
NAME = 'CentOS'
PARENT = 'general'
OS = 'CentOS'

View File

@ -0,0 +1,3 @@
NAME = 'CentOS(cobbler)'
PARENT = 'CentOS'
INSTALLER = 'cobbler'

View File

@ -0,0 +1,3 @@
NAME = 'CentOS6.5(cobbler)'
PARENT = 'CentOS(cobbler)'
OS = 'CentOS6.5'

View File

@ -0,0 +1,3 @@
NAME = 'Ubuntu(cobbler)'
PARENT = 'Ubuntu'
INSTALLER = 'cobbler'

View File

@ -0,0 +1,3 @@
NAME = 'Ubuntu12.04(cobbler)'
PARENT = 'Ubuntu(cobbler)'
OS = 'Ubuntu12.04'

View File

@ -0,0 +1,2 @@
NAME = 'general'
OS = 'general'

View File

@ -0,0 +1,3 @@
NAME = 'Ubuntu'
PARENT = 'general'
OS = 'Ubuntu'

2
conf/os_field/dns.conf Normal file
View File

@ -0,0 +1,2 @@
NAME = 'dns'
VALIDATOR = is_valid_dns

View File

@ -0,0 +1,2 @@
NAME = 'gateway'
VALIDATOR = is_valid_gateway

View File

@ -0,0 +1 @@
NAME = 'general'

2
conf/os_field/ip.conf Normal file
View File

@ -0,0 +1,2 @@
NAME = 'ip'
VALIDATOR = is_valid_ip

View File

@ -0,0 +1,2 @@
NAME = 'netmask'
VALIDATOR = is_valid_netmask

View File

@ -0,0 +1,2 @@
NAME = 'network'
VALIDATOR = is_valid_network

View File

@ -0,0 +1,3 @@
NAME = 'password'
VALIDATOR = is_valid_password
DESCRIPTION = 'password'

View File

@ -0,0 +1,3 @@
NAME = 'percentage'
FIELD_TYPE = int
VALIDATOR = is_valid_percentage

2
conf/os_field/size.conf Normal file
View File

@ -0,0 +1,2 @@
NAME = 'size'
VALIDATOR = is_valid_size

View File

@ -0,0 +1,3 @@
NAME = 'username'
VALIDATOR = is_valid_username
DESCRIPTION = 'username'

View File

@ -0,0 +1,6 @@
NAME = 'cobbler'
TYPE = 'cobbler'
CONFIG = {
'url': 'http://127.0.0.1/cobbler_api',
'token': ('cobbler', 'cobbler')
}

View File

@ -0,0 +1,84 @@
ADAPTER = 'general'
METADATA = {
'general': {
'_self': {
'required_in_whole_config': True
},
'language': {
'_self': {
'field': 'general',
'default_value': 'EN',
'options': ['EN'],
'required_in_options': True
}
},
'timezone': {
'_self': {
'field': 'general',
'default_value': 'PDT',
'options': ['PDT'],
'required_in_options': True
}
},
'domain': {
'_self': {
'field': 'general',
'is_required' : True,
'default_value': 'ods.com',
'options': ['ods.com'],
'required_in_options': True
}
},
'default_gateway': {
'_self': {
'is_required': True,
'field': 'ip',
'default_value': '10.145.88.1',
}
}
},
'server_credentials': {
'_self': {
'required_in_whole_config': True,
},
'username': {
'_self': {
'is_required': True,
'field': 'username',
}
},
'password': {
'_self': {
'is_required': True,
'field': 'password'
}
}
},
'partition': {
'_self': {
'required_in_whole_config': True,
'options': ['/boot', 'swap', '/var', '/home'],
'required_in_options': True
},
'$partition': {
'_self': {
'validator': is_valid_partition
},
'max_size': {
'_self': {
'field': 'size'
},
},
'percentage': {
'_self': {
'field': 'percentage',
}
},
'size': {
'_self': {
'field': 'size'
},
}
}
}
}

View File

@ -0,0 +1,3 @@
NAME = 'ceph'
PARENT = 'general'
DISTRIBUTED_SYSTEM = 'ceph'

View File

@ -0,0 +1,4 @@
NAME = 'ceph(chef)'
PARENT = 'ceph'
INSTALLER = 'chef(icehouse)'
SUPPORTED_OS_PATTERNS = ['(?i)centos.*', '(?i)ubuntu.*']

View File

@ -0,0 +1,4 @@
NAME = 'chef_openstack'
PARENT = 'openstack'
INSTALLER = 'chef(icehouse)'
SUPPORTED_OS_PATTERNS = ['(?i)centos.*', '(?i)ubuntu.*']

Some files were not shown because too many files have changed in this diff Show More