Remove GenericHardwareDeclarativePollster

... and discovery/inspector plugins depending on the pollster. These
were implemented to gather metrics via SNMP daemon in TripleO-managed
deployment but these are no longer valid since Telemetry services and
Nova were removed from undercloud.

Change-Id: If9a6b695ba799c766314a88328ea8a779407acc0
This commit is contained in:
Takashi Kajinami 2022-05-06 12:25:30 +09:00
parent 41ac16b5ed
commit a28cef7036
26 changed files with 12 additions and 1970 deletions

View File

@ -1,144 +0,0 @@
# -*- encoding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import warnings
from oslo_config import cfg
from oslo_log import log
from oslo_utils import timeutils
from ceilometer import nova_client
from ceilometer.polling import plugin_base
LOG = log.getLogger(__name__)
OPTS = [
cfg.StrOpt('url_scheme',
default='snmp://',
deprecated_for_removal=True,
help='URL scheme to use for hardware nodes.'),
cfg.StrOpt('readonly_user_name',
default='ro_snmp_user',
deprecated_for_removal=True,
help='SNMPd user name of all nodes running in the cloud.'),
cfg.StrOpt('readonly_user_password',
default='password',
deprecated_for_removal=True,
help='SNMPd v3 authentication password of all the nodes '
'running in the cloud.',
secret=True),
cfg.StrOpt('readonly_user_auth_proto',
choices=['md5', 'sha'],
deprecated_for_removal=True,
help='SNMPd v3 authentication algorithm of all the nodes '
'running in the cloud'),
cfg.StrOpt('readonly_user_priv_proto',
choices=['des', 'aes128', '3des', 'aes192', 'aes256'],
deprecated_for_removal=True,
help='SNMPd v3 encryption algorithm of all the nodes '
'running in the cloud'),
cfg.StrOpt('readonly_user_priv_password',
deprecated_for_removal=True,
help='SNMPd v3 encryption password of all the nodes '
'running in the cloud.',
secret=True),
cfg.StrOpt('tripleo_network_name',
default='ctlplane',
deprecated_for_removal=True,
help='Name of the control plane Tripleo network')
]
class NodesDiscoveryTripleO(plugin_base.DiscoveryBase):
def __init__(self, conf):
super(NodesDiscoveryTripleO, self).__init__(conf)
self.nova_cli = nova_client.Client(conf)
self.last_run = None
self.instances = {}
warnings.warn('GenericHardwareDeclarativePollster has been deprecated '
'and will be removed in a future release.',
category=DeprecationWarning, stacklevel=3)
def _make_resource_url(self, ip):
hwconf = self.conf.hardware
url = hwconf.url_scheme
username = hwconf.readonly_user_name
password = hwconf.readonly_user_password
if username:
url += username
if password:
url += ':' + password
if username or password:
url += '@'
url += ip
opts = ['auth_proto', 'priv_proto', 'priv_password']
query = "&".join(opt + "=" + hwconf['readonly_user_%s' % opt]
for opt in opts
if hwconf['readonly_user_%s' % opt])
if query:
url += '?' + query
return url
def discover(self, manager, param=None):
"""Discover resources to monitor.
instance_get_all will return all instances if last_run is None,
and will return only the instances changed since the last_run time.
"""
try:
instances = self.nova_cli.instance_get_all(self.last_run)
except Exception:
# NOTE(zqfan): instance_get_all is wrapped and will log exception
# when there is any error. It is no need to raise it again and
# print one more time.
return []
for instance in instances:
if getattr(instance, 'OS-EXT-STS:vm_state', None) in ['deleted',
'error']:
self.instances.pop(instance.id, None)
else:
self.instances[instance.id] = instance
self.last_run = timeutils.utcnow(True).isoformat()
resources = []
for instance in self.instances.values():
addresses = instance.addresses.get(
self.conf.hardware.tripleo_network_name)
if addresses is None:
# NOTE(sileht): This is not a tripleo undercloud instance, this
# is a cheap detection if ironic node deployed by tripleo, but
# nova don't expose anything more useful and we must not log a
# ERROR when the instance is not a tripleo undercloud one.
continue
try:
ip_address = addresses[0].get('addr')
final_address = self._make_resource_url(ip_address)
resource = {
'resource_id': instance.id,
'resource_url': final_address,
'mac_addr': addresses[0].get('OS-EXT-IPS-MAC:mac_addr'),
'image_id': instance.image['id'],
'flavor_id': instance.flavor['id']
}
resources.append(resource)
except KeyError:
LOG.error("Couldn't obtain IP address of "
"instance %s" % instance.id)
return resources

View File

@ -1,26 +0,0 @@
#
# Copyright 2014 Intel Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from stevedore import driver
def get_inspector(parsed_url, namespace='ceilometer.hardware.inspectors'):
"""Get inspector driver and load it.
:param parsed_url: urlparse.SplitResult object for the inspector
:param namespace: Namespace to use to look for drivers.
"""
loaded_driver = driver.DriverManager(namespace, parsed_url.scheme)
return loaded_driver.driver()

View File

@ -1,40 +0,0 @@
#
# Copyright 2014 ZHAW SoE
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Inspector abstraction for read-only access to hardware components"""
import abc
class Inspector(object, metaclass=abc.ABCMeta):
@abc.abstractmethod
def inspect_generic(self, host, cache, extra_metadata, param):
"""A generic inspect function.
:param host: the target host
:param cache: cache passed from the pollster
:param extra_metadata: extra dict to be used as metadata
:param param: a dict of inspector specific param
:return: an iterator of (value, metadata, extra) containing the sample
value, metadata dict to construct sample's metadata, and
extra dict of extra metadata to help constructing sample
"""
def prepare_params(self, param):
"""Parse the params to a format which the inspector itself recognizes.
:param param: inspector params from meter definition file
:return: a dict of param which the inspector recognized
"""
return {}

View File

@ -1,346 +0,0 @@
#
# Copyright 2014 ZHAW SoE
# Copyright 2014 Intel Corp
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Inspector for collecting data over SNMP"""
import copy
from oslo_log import log
from pysnmp.entity.rfc3413.oneliner import cmdgen
from pysnmp.proto import rfc1905
from urllib import parse as urlparse
from ceilometer.hardware.inspector import base
LOG = log.getLogger(__name__)
class SNMPException(Exception):
pass
def parse_snmp_return(ret, is_bulk=False):
"""Check the return value of snmp operations
:param ret: a tuple of (errorIndication, errorStatus, errorIndex, data)
returned by pysnmp
:param is_bulk: True if the ret value is from GetBulkRequest
:return: a tuple of (err, data)
err: True if error found, or False if no error found
data: a string of error description if error found, or the
actual return data of the snmp operation
"""
err = True
(errIndication, errStatus, errIdx, varBinds) = ret
if errIndication:
data = errIndication
elif errStatus:
if is_bulk:
varBinds = varBinds[-1]
data = "%s at %s" % (errStatus.prettyPrint(),
errIdx and varBinds[int(errIdx) - 1] or "?")
else:
err = False
data = varBinds
return err, data
EXACT = 'type_exact'
PREFIX = 'type_prefix'
_auth_proto_mapping = {
'md5': cmdgen.usmHMACMD5AuthProtocol,
'sha': cmdgen.usmHMACSHAAuthProtocol,
}
_priv_proto_mapping = {
'des': cmdgen.usmDESPrivProtocol,
'aes128': cmdgen.usmAesCfb128Protocol,
'3des': cmdgen.usm3DESEDEPrivProtocol,
'aes192': cmdgen.usmAesCfb192Protocol,
'aes256': cmdgen.usmAesCfb256Protocol,
}
_usm_proto_mapping = {
'auth_proto': ('authProtocol', _auth_proto_mapping),
'priv_proto': ('privProtocol', _priv_proto_mapping),
}
class SNMPInspector(base.Inspector):
# Default port
_port = 161
_CACHE_KEY_OID = "snmp_cached_oid"
# NOTE: The following mapping has been moved to the yaml file identified
# by the config options hardware.meter_definitions_file. However, we still
# keep the description here for code reading purpose.
"""
The following mapping define how to construct
(value, metadata, extra) returned by inspect_generic
MAPPING = {
'identifier: {
'matching_type': EXACT or PREFIX,
'metric_oid': (oid, value_converter)
'metadata': {
metadata_name1: (oid1, value_converter),
metadata_name2: (oid2, value_converter),
},
'post_op': special func to modify the return data,
},
}
For matching_type of EXACT, each item in the above mapping will
return exact one (value, metadata, extra) tuple. The value would be
returned from SNMP request GetRequest for oid of 'metric_oid', the
metadata dict would be constructed based on the returning from SNMP
GetRequest for oids of 'metadata'.
For matching_type of PREFIX, SNMP request GetBulkRequest
would be sent to get values for oids of 'metric_oid' and
'metadata' of each item in the above mapping. And each item might
return multiple (value, metadata, extra) tuples, e.g.
Suppose we have the following mapping:
MAPPING = {
'disk.size.total': {
'matching_type': PREFIX,
'metric_oid': ("1.3.6.1.4.1.2021.9.1.6", int)
'metadata': {
'device': ("1.3.6.1.4.1.2021.9.1.3", str),
'path': ("1.3.6.1.4.1.2021.9.1.2", str),
},
'post_op': None,
},
and the SNMP have the following oid/value(s):
{
'1.3.6.1.4.1.2021.9.1.6.1': 19222656,
'1.3.6.1.4.1.2021.9.1.3.1': "/dev/sda2",
'1.3.6.1.4.1.2021.9.1.2.1': "/"
'1.3.6.1.4.1.2021.9.1.6.2': 808112,
'1.3.6.1.4.1.2021.9.1.3.2': "tmpfs",
'1.3.6.1.4.1.2021.9.1.2.2': "/run",
}
So here we'll return 2 instances of (value, metadata, extra):
(19222656, {'device': "/dev/sda2", 'path': "/"}, None)
(808112, {'device': "tmpfs", 'path': "/run"}, None)
The post_op is assumed to be implemented by new metric developer. It
could be used to add additional special metadata(e.g. ip address), or
it could be used to add information into extra dict to be returned
to construct the pollster how to build final sample, e.g.
extra.update('project_id': xy, 'user_id': zw)
"""
def _query_oids(self, host, oids, cache, is_bulk):
# send GetRequest or GetBulkRequest to get oids values and
# populate the values into cache
authData = self._get_auth_strategy(host)
transport = cmdgen.UdpTransportTarget((host.hostname,
host.port or self._port))
oid_cache = cache.setdefault(self._CACHE_KEY_OID, {})
cmd_runner = cmdgen.CommandGenerator()
if is_bulk:
ret = cmd_runner.bulkCmd(authData, transport, 0, 100, *oids,
lookupValues=True)
else:
ret = cmd_runner.getCmd(authData, transport, *oids,
lookupValues=True)
(error, data) = parse_snmp_return(ret, is_bulk)
if error:
raise SNMPException("An error occurred, oids %(oid)s, "
"host %(host)s, %(err)s" %
dict(oid=oids,
host=host.hostname,
err=data))
# save result into cache
if is_bulk:
for var_bind_table_row in data:
for name, val in var_bind_table_row:
oid_cache[str(name)] = val
else:
for name, val in data:
oid_cache[str(name)] = val
@staticmethod
def find_matching_oids(oid_cache, oid, match_type, find_one=True):
matched = []
if match_type == PREFIX:
for key in oid_cache.keys():
if key.startswith(oid):
matched.append(key)
if find_one:
break
else:
if oid in oid_cache:
matched.append(oid)
return matched
@staticmethod
def get_oid_value(oid_cache, oid_def, suffix='', host=None):
oid, converter = oid_def
value = oid_cache[oid + suffix]
if isinstance(value, (rfc1905.NoSuchObject, rfc1905.NoSuchInstance)):
LOG.debug("OID %s%s has no value" % (
oid, " on %s" % host.hostname if host else ""))
return None
if converter:
value = converter(value)
return value
@classmethod
def construct_metadata(cls, oid_cache, meta_defs, suffix='', host=None):
metadata = {}
for key, oid_def in meta_defs.items():
metadata[key] = cls.get_oid_value(oid_cache, oid_def, suffix, host)
return metadata
@classmethod
def _find_missing_oids(cls, meter_def, cache):
# find oids have not been queried and cached
new_oids = []
oid_cache = cache.setdefault(cls._CACHE_KEY_OID, {})
# check metric_oid
if not cls.find_matching_oids(oid_cache,
meter_def['metric_oid'][0],
meter_def['matching_type']):
new_oids.append(meter_def['metric_oid'][0])
for metadata in meter_def['metadata'].values():
if not cls.find_matching_oids(oid_cache,
metadata[0],
meter_def['matching_type']):
new_oids.append(metadata[0])
return new_oids
def inspect_generic(self, host, cache, extra_metadata, param):
# the snmp definition for the corresponding meter
meter_def = param
# collect oids that needs to be queried
oids_to_query = self._find_missing_oids(meter_def, cache)
# query oids and populate into caches
if oids_to_query:
self._query_oids(host, oids_to_query, cache,
meter_def['matching_type'] == PREFIX)
# construct (value, metadata, extra)
oid_cache = cache[self._CACHE_KEY_OID]
# find all oids which needed to construct final sample values
# for matching type of EXACT, only 1 sample would be generated
# for matching type of PREFIX, multiple samples could be generated
oids_for_sample_values = self.find_matching_oids(
oid_cache,
meter_def['metric_oid'][0],
meter_def['matching_type'],
False)
input_extra_metadata = extra_metadata
for oid in oids_for_sample_values:
suffix = oid[len(meter_def['metric_oid'][0]):]
value = self.get_oid_value(oid_cache,
meter_def['metric_oid'],
suffix, host)
# get the metadata for this sample value
metadata = self.construct_metadata(oid_cache,
meter_def['metadata'],
suffix, host)
extra_metadata = copy.deepcopy(input_extra_metadata) or {}
# call post_op for special cases
if meter_def['post_op']:
func = getattr(self, meter_def['post_op'], None)
if func:
value = func(host, cache, meter_def,
value, metadata, extra_metadata,
suffix)
yield (value, metadata, extra_metadata)
def _post_op_memory_avail_to_used(self, host, cache, meter_def,
value, metadata, extra, suffix):
_memory_total_oid = "1.3.6.1.4.1.2021.4.5.0"
if _memory_total_oid not in cache[self._CACHE_KEY_OID]:
self._query_oids(host, [_memory_total_oid], cache, False)
total_value = self.get_oid_value(cache[self._CACHE_KEY_OID],
(_memory_total_oid, int))
if total_value is None:
return None
return total_value - value
def _post_op_net(self, host, cache, meter_def,
value, metadata, extra, suffix):
# add ip address into metadata
_interface_ip_oid = "1.3.6.1.2.1.4.20.1.2"
oid_cache = cache.setdefault(self._CACHE_KEY_OID, {})
if not self.find_matching_oids(oid_cache,
_interface_ip_oid,
PREFIX):
# populate the oid into cache
self._query_oids(host, [_interface_ip_oid], cache, True)
ip_addr = ''
for k, v in oid_cache.items():
if k.startswith(_interface_ip_oid) and v == int(suffix[1:]):
ip_addr = k.replace(_interface_ip_oid + ".", "")
metadata.update(ip=ip_addr)
# update resource_id for each nic interface
self._suffix_resource_id(host, metadata, 'name', extra)
return value
def _post_op_disk(self, host, cache, meter_def,
value, metadata, extra, suffix):
self._suffix_resource_id(host, metadata, 'device', extra)
return value
@staticmethod
def _suffix_resource_id(host, metadata, key, extra):
prefix = metadata.get(key)
if prefix:
res_id = extra.get('resource_id') or host.hostname
res_id = res_id + ".%s" % metadata.get(key)
extra.update(resource_id=res_id)
@staticmethod
def _get_auth_strategy(host):
options = urlparse.parse_qs(host.query)
kwargs = {}
for key in _usm_proto_mapping:
opt = options.get(key, [None])[-1]
value = _usm_proto_mapping[key][1].get(opt)
if value:
kwargs[_usm_proto_mapping[key][0]] = value
priv_pass = options.get('priv_password', [None])[-1]
if priv_pass:
kwargs['privKey'] = priv_pass
if host.password:
kwargs['authKey'] = host.password
if kwargs:
auth_strategy = cmdgen.UsmUserData(host.username,
**kwargs)
else:
auth_strategy = cmdgen.CommunityData(host.username or 'public')
return auth_strategy
def prepare_params(self, param):
processed = {}
processed['matching_type'] = param['matching_type']
processed['metric_oid'] = (param['oid'], eval(param['type']))
processed['post_op'] = param.get('post_op', None)
processed['metadata'] = {}
for k, v in param.get('metadata', {}).items():
processed['metadata'][k] = (v['oid'], eval(v['type']))
return processed

View File

@ -1,287 +0,0 @@
---
# see http://www.circitor.fr/Mibs/Html/U/UCD-SNMP-MIB.php for reference.
# http://www.circitor.fr/Mibs/Html/U/UCD-DISKIO-MIB.php for disk metrics
metric:
# cpu
- name: hardware.cpu.load.1min
unit: process
type: gauge
snmp_inspector:
matching_type: "type_exact"
oid: "1.3.6.1.4.1.2021.10.1.3.1"
type: "lambda x: float(str(x))"
- name: hardware.cpu.load.5min
unit: process
type: gauge
snmp_inspector:
matching_type: "type_exact"
oid: "1.3.6.1.4.1.2021.10.1.3.2"
type: "lambda x: float(str(x))"
- name: hardware.cpu.load.15min
unit: process
type: gauge
snmp_inspector:
matching_type: "type_exact"
oid: "1.3.6.1.4.1.2021.10.1.3.3"
type: "lambda x: float(str(x))"
# hardware.cpu.util is deprecated
- name: hardware.cpu.util
unit: "%"
type: gauge
snmp_inspector:
matching_type: "type_exact"
oid: "1.3.6.1.4.1.2021.11.9.0"
type: "int"
- name: hardware.cpu.user
unit: tick
type: gauge
snmp_inspector:
matching_type: "type_exact"
oid: "1.3.6.1.4.1.2021.11.50.0"
type: "int"
- name: hardware.cpu.nice
unit: tick
type: gauge
snmp_inspector:
matching_type: "type_exact"
oid: "1.3.6.1.4.1.2021.11.51.0"
type: "int"
- name: hardware.cpu.system
unit: tick
type: gauge
snmp_inspector:
matching_type: "type_exact"
oid: "1.3.6.1.4.1.2021.11.52.0"
type: "int"
- name: hardware.cpu.idle
unit: tick
type: gauge
snmp_inspector:
matching_type: "type_exact"
oid: "1.3.6.1.4.1.2021.11.53.0"
type: "int"
- name: hardware.cpu.wait
unit: tick
type: gauge
snmp_inspector:
matching_type: "type_exact"
oid: "1.3.6.1.4.1.2021.11.54.0"
type: "int"
- name: hardware.cpu.kernel
unit: tick
type: gauge
snmp_inspector:
matching_type: "type_exact"
oid: "1.3.6.1.4.1.2021.11.55.0"
type: "int"
- name: hardware.cpu.interrupt
unit: tick
type: gauge
snmp_inspector:
matching_type: "type_exact"
oid: "1.3.6.1.4.1.2021.11.56.0"
type: "int"
# disk
- name: hardware.disk.size.total
unit: KB
type: gauge
snmp_inspector:
matching_type: "type_prefix"
oid: "1.3.6.1.4.1.2021.9.1.6"
type: "int"
metadata: &disk_metadata
path:
oid: "1.3.6.1.4.1.2021.9.1.2"
type: "str"
device:
oid: "1.3.6.1.4.1.2021.9.1.3"
type: "str"
post_op: "_post_op_disk"
- name: hardware.disk.size.used
unit: KB
type: gauge
snmp_inspector:
matching_type: "type_prefix"
oid: "1.3.6.1.4.1.2021.9.1.8"
type: "int"
metadata: *disk_metadata
post_op: "_post_op_disk"
- name: hardware.disk.read.bytes
unit: B
type: gauge
snmp_inspector:
matching_type: "type_prefix"
oid: "1.3.6.1.4.1.2021.13.15.1.1.3"
type: "int"
metadata: &diskio_metadata
device:
oid: "1.3.6.1.4.1.2021.13.15.1.1.2"
post_op: "_post_op_disk"
- name: hardware.disk.write.bytes
unit: B
type: gauge
snmp_inspector:
matching_type: "type_prefix"
oid: "1.3.6.1.4.1.2021.13.15.1.1.4"
type: "int"
<<: *diskio_metadata
post_op: "_post_op_disk"
- name: hardware.disk.read.requests
unit: requests
type: gauge
snmp_inspector:
matching_type: "type_prefix"
oid: "1.3.6.1.4.1.2021.13.15.1.1.5"
type: "int"
<<: *diskio_metadata
post_op: "_post_op_disk"
- name: hardware.disk.write.requests
unit: requests
type: gauge
snmp_inspector:
matching_type: "type_prefix"
oid: "1.3.6.1.4.1.2021.13.15.1.1.6"
type: "int"
<<: *diskio_metadata
post_op: "_post_op_disk"
# memory
- name: hardware.memory.total
unit: KB
type: gauge
snmp_inspector:
matching_type: "type_exact"
oid: "1.3.6.1.4.1.2021.4.5.0"
type: "int"
- name: hardware.memory.used
unit: KB
type: gauge
snmp_inspector:
matching_type: "type_exact"
oid: "1.3.6.1.4.1.2021.4.6.0"
type: "int"
post_op: "_post_op_memory_avail_to_used"
- name: hardware.memory.swap.total
unit: KB
type: gauge
snmp_inspector:
matching_type: "type_exact"
oid: "1.3.6.1.4.1.2021.4.3.0"
type: "int"
- name: hardware.memory.swap.avail
unit: KB
type: gauge
snmp_inspector:
matching_type: "type_exact"
oid: "1.3.6.1.4.1.2021.4.4.0"
type: "int"
- name: hardware.memory.buffer
unit: KB
type: gauge
snmp_inspector:
matching_type: "type_exact"
oid: "1.3.6.1.4.1.2021.4.14.0"
type: "int"
- name: hardware.memory.cached
unit: KB
type: gauge
snmp_inspector:
matching_type: "type_exact"
oid: "1.3.6.1.4.1.2021.4.15.0"
type: "int"
# network interface
- name: hardware.network.incoming.bytes
unit: B
type: cumulative
snmp_inspector:
matching_type: "type_prefix"
oid: "1.3.6.1.2.1.2.2.1.10"
type: "int"
metadata: &net_metadata
name:
oid: "1.3.6.1.2.1.2.2.1.2"
type: "str"
speed:
oid: "1.3.6.1.2.1.2.2.1.5"
type: "lambda x: int(x) / 8"
mac:
oid: "1.3.6.1.2.1.2.2.1.6"
type: "lambda x: x.prettyPrint().replace('0x', '')"
post_op: "_post_op_net"
- name: hardware.network.outgoing.bytes
unit: B
type: cumulative
snmp_inspector:
matching_type: "type_prefix"
oid: "1.3.6.1.2.1.2.2.1.16"
type: "int"
metadata: *net_metadata
post_op: "_post_op_net"
- name: hardware.network.outgoing.errors
unit: packet
type: cumulative
snmp_inspector:
matching_type: "type_prefix"
oid: "1.3.6.1.2.1.2.2.1.20"
type: "int"
metadata: *net_metadata
post_op: "_post_op_net"
#network aggregate
- name: hardware.network.ip.outgoing.datagrams
unit: datagrams
type: cumulative
snmp_inspector:
matching_type: "type_exact"
oid: "1.3.6.1.2.1.4.10.0"
type: "int"
- name: hardware.network.ip.incoming.datagrams
unit: datagrams
type: cumulative
snmp_inspector:
matching_type: "type_exact"
oid: "1.3.6.1.2.1.4.3.0"
type: "int"
#system stats
# hardware.system_stats.cpu.idle is deprecated
- name: hardware.system_stats.cpu.idle
unit: "%"
type: gauge
snmp_inspector:
matching_type: "type_exact"
oid: "1.3.6.1.4.1.2021.11.11.0"
type: "int"
- name: hardware.system_stats.io.outgoing.blocks
unit: blocks
type: cumulative
snmp_inspector:
matching_type: "type_exact"
oid: "1.3.6.1.4.1.2021.11.57.0"
type: "int"
- name: hardware.system_stats.io.incoming.blocks
unit: blocks
type: cumulative
snmp_inspector:
matching_type: "type_exact"
oid: "1.3.6.1.4.1.2021.11.58.0"
type: "int"

View File

@ -1,225 +0,0 @@
#
# Copyright 2015 Intel Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import pkg_resources
import warnings
from oslo_config import cfg
from oslo_log import log
from oslo_utils import netutils
from ceilometer import declarative
from ceilometer.hardware import inspector as insloader
from ceilometer.hardware.pollsters import util
from ceilometer.i18n import _
from ceilometer.polling import plugin_base
from ceilometer import sample
OPTS = [
cfg.StrOpt('meter_definitions_file',
default="snmp.yaml",
deprecated_for_removal=True,
help="Configuration file for defining hardware snmp meters."
),
]
LOG = log.getLogger(__name__)
class MeterDefinition(object):
required_fields = ['name', 'unit', 'type']
def __init__(self, definition_cfg):
self.cfg = definition_cfg
for fname, fval in self.cfg.items():
if (isinstance(fname, str) and
(fname in self.required_fields or
fname.endswith('_inspector'))):
setattr(self, fname, fval)
else:
LOG.warning("Ignore unrecognized field %s", fname)
for fname in self.required_fields:
if not getattr(self, fname, None):
raise declarative.MeterDefinitionException(
_("Missing field %s") % fname, self.cfg)
if self.type not in sample.TYPES:
raise declarative.MeterDefinitionException(
_("Unrecognized type value %s") % self.type, self.cfg)
class GenericHardwareDeclarativePollster(plugin_base.PollsterBase):
CACHE_KEY = 'hardware.generic'
mapping = None
def __init__(self, conf):
super(GenericHardwareDeclarativePollster, self).__init__(conf)
self.inspectors = {}
warnings.warn('GenericHardwareDeclarativePollster has been deprecated '
'and will be removed in a future release.',
category=DeprecationWarning, stacklevel=3)
def _update_meter_definition(self, definition):
self.meter_definition = definition
self.cached_inspector_params = {}
@property
def default_discovery(self):
return 'tripleo_overcloud_nodes'
@staticmethod
def _parse_resource(res):
"""Parse resource from discovery.
Either URL can be given or dict. Dict has to contain at least
keys 'resource_id' and 'resource_url', all the dict keys will be stored
as metadata.
:param res: URL or dict containing all resource info.
:return: parsed_url, resource_id, metadata Returns parsed URL used for
SNMP query, unique identifier of the resource and metadata
of the resource.
"""
parsed_url, resource_id, metadata = (None, None, None)
if isinstance(res, dict):
if 'resource_url' not in res or 'resource_id' not in res:
LOG.error('Passed resource dict must contain keys '
'resource_id and resource_url.')
else:
metadata = res
parsed_url = netutils.urlsplit(res['resource_url'])
resource_id = res['resource_id']
else:
metadata = {}
parsed_url = netutils.urlsplit(res)
resource_id = res
return parsed_url, resource_id, metadata
def _get_inspector(self, parsed_url):
if parsed_url.scheme not in self.inspectors:
try:
driver = insloader.get_inspector(parsed_url)
self.inspectors[parsed_url.scheme] = driver
except Exception as err:
LOG.exception("Cannot load inspector %(name)s: %(err)s",
dict(name=parsed_url.scheme,
err=err))
raise
return self.inspectors[parsed_url.scheme]
def get_samples(self, manager, cache, resources=None):
"""Return an iterable of Sample instances from polling the resources.
:param manager: The service manager invoking the plugin
:param cache: A dictionary for passing data between plugins
:param resources: end point to poll data from
"""
resources = resources or []
h_cache = cache.setdefault(self.CACHE_KEY, {})
sample_iters = []
# Get the meter identifiers to poll
identifier = self.meter_definition.name
for resource in resources:
parsed_url, res, extra_metadata = self._parse_resource(resource)
if parsed_url is None:
LOG.error("Skip invalid resource %s", resource)
continue
ins = self._get_inspector(parsed_url)
try:
# Call hardware inspector to poll for the data
i_cache = h_cache.setdefault(res, {})
# Prepare inspector parameters and cache it for performance
param_key = parsed_url.scheme + '.' + identifier
inspector_param = self.cached_inspector_params.get(param_key)
if not inspector_param:
param = getattr(self.meter_definition,
parsed_url.scheme + '_inspector', {})
inspector_param = ins.prepare_params(param)
self.cached_inspector_params[param_key] = inspector_param
if identifier not in i_cache:
i_cache[identifier] = list(ins.inspect_generic(
host=parsed_url,
cache=i_cache,
extra_metadata=extra_metadata,
param=inspector_param))
# Generate samples
if i_cache[identifier]:
sample_iters.append(self.generate_samples(
parsed_url,
i_cache[identifier]))
except Exception as err:
msg = ('inspector call failed for %(ident)s '
'host %(host)s: %(err)s' %
dict(ident=identifier,
host=parsed_url.hostname,
err=err))
if "timeout" in str(err):
LOG.warning(msg)
else:
LOG.exception(msg)
return itertools.chain(*sample_iters)
def generate_samples(self, host_url, data):
"""Generate a list of Sample from the data returned by inspector
:param host_url: host url of the endpoint
:param data: list of data returned by the corresponding inspector
"""
samples = []
definition = self.meter_definition
for (value, metadata, extra) in data:
s = util.make_sample_from_host(host_url,
name=definition.name,
sample_type=definition.type,
unit=definition.unit,
volume=value,
res_metadata=metadata,
extra=extra,
name_prefix=None)
samples.append(s)
return samples
@classmethod
def build_pollsters(cls, conf):
if not cls.mapping:
definition_cfg = declarative.load_definitions(
conf, {}, conf.hardware.meter_definitions_file,
pkg_resources.resource_filename(__name__, "data/snmp.yaml"))
cls.mapping = load_definition(definition_cfg)
pollsters = []
for name in cls.mapping:
pollster = cls(conf)
pollster._update_meter_definition(cls.mapping[name])
pollsters.append((name, pollster))
return pollsters
def load_definition(config_def):
mappings = {}
for meter_def in config_def.get('metric', []):
try:
meter = MeterDefinition(meter_def)
mappings[meter.name] = meter
except declarative.DefinitionException as e:
errmsg = "Error loading meter definition: %s"
LOG.error(errmsg, e.brief_message)
return mappings

View File

@ -1,59 +0,0 @@
#
# Copyright 2013 ZHAW SoE
# Copyright 2014 Intel Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from urllib import parse as urlparse
from ceilometer import sample
def get_metadata_from_host(host_url):
return {'resource_url': urlparse.urlunsplit(host_url)}
def make_resource_metadata(res_metadata=None, host_url=None):
resource_metadata = dict()
if res_metadata is not None:
metadata = copy.copy(res_metadata)
resource_metadata.update(metadata)
resource_metadata.update(get_metadata_from_host(host_url))
return resource_metadata
def make_sample_from_host(host_url, name, sample_type, unit, volume,
project_id=None, user_id=None, resource_id=None,
res_metadata=None, extra=None,
name_prefix='hardware'):
extra = extra or {}
resource_metadata = make_resource_metadata(res_metadata, host_url)
resource_metadata.update(extra)
res_id = resource_id or extra.get('resource_id') or host_url.hostname
if name_prefix:
name = name_prefix + '.' + name
return sample.Sample(
name=name,
type=sample_type,
unit=unit,
volume=volume,
user_id=user_id or extra.get('user_id'),
project_id=project_id or extra.get('project_id'),
resource_id=res_id,
resource_metadata=resource_metadata,
source='hardware',
)

View File

@ -22,8 +22,6 @@ import ceilometer.compute.virt.inspector
import ceilometer.compute.virt.libvirt.utils
import ceilometer.compute.virt.vmware.inspector
import ceilometer.event.converter
import ceilometer.hardware.discovery
import ceilometer.hardware.pollsters.generic
import ceilometer.image.discovery
import ceilometer.ipmi.platform.intel_node_manager
import ceilometer.ipmi.pollsters
@ -95,9 +93,6 @@ def list_opts():
'membership has changed'),
]),
('event', ceilometer.event.converter.OPTS),
('hardware', itertools.chain(
ceilometer.hardware.discovery.OPTS,
ceilometer.hardware.pollsters.generic.OPTS)),
('ipmi',
itertools.chain(ceilometer.ipmi.platform.intel_node_manager.OPTS,
ceilometer.ipmi.pollsters.OPTS)),

View File

@ -275,61 +275,6 @@ resources:
attributes:
provider: resource_metadata.provider
- resource_type: host
metrics:
hardware.cpu.load.1min:
hardware.cpu.load.5min:
hardware.cpu.load.15min:
hardware.cpu.util:
hardware.cpu.user:
archive_policy_name: ceilometer-low-rate
hardware.cpu.nice:
archive_policy_name: ceilometer-low-rate
hardware.cpu.system:
archive_policy_name: ceilometer-low-rate
hardware.cpu.idle:
archive_policy_name: ceilometer-low-rate
hardware.cpu.wait:
archive_policy_name: ceilometer-low-rate
hardware.cpu.kernel:
archive_policy_name: ceilometer-low-rate
hardware.cpu.interrupt:
archive_policy_name: ceilometer-low-rate
hardware.memory.total:
hardware.memory.used:
hardware.memory.swap.total:
hardware.memory.swap.avail:
hardware.memory.buffer:
hardware.memory.cached:
hardware.network.ip.outgoing.datagrams:
hardware.network.ip.incoming.datagrams:
hardware.system_stats.cpu.idle:
hardware.system_stats.io.outgoing.blocks:
hardware.system_stats.io.incoming.blocks:
attributes:
host_name: resource_metadata.resource_url
- resource_type: host_disk
metrics:
hardware.disk.size.total:
hardware.disk.size.used:
hardware.disk.read.bytes:
hardware.disk.write.bytes:
hardware.disk.read.requests:
hardware.disk.write.requests:
attributes:
host_name: resource_metadata.resource_url
device_name: resource_metadata.device
- resource_type: host_network_interface
metrics:
hardware.network.incoming.bytes:
hardware.network.outgoing.bytes:
hardware.network.outgoing.errors:
attributes:
host_name: resource_metadata.resource_url
device_name: resource_metadata.name
- resource_type: nova_compute
metrics:
compute.node.cpu.frequency:

View File

@ -1,31 +0,0 @@
#
# Copyright 2014 Intel Corp
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import netutils
from ceilometer.hardware import inspector
from ceilometer.tests import base
class TestHardwareInspector(base.BaseTestCase):
def test_get_inspector(self):
url = netutils.urlsplit("snmp://")
driver = inspector.get_inspector(url)
self.assertTrue(driver)
def test_get_inspector_illegal(self):
url = netutils.urlsplit("illegal://")
self.assertRaises(RuntimeError,
inspector.get_inspector,
url)

View File

@ -1,271 +0,0 @@
#
# Copyright 2013 Intel Corp
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for ceilometer/hardware/inspector/snmp/inspector.py"""
from unittest import mock
import fixtures
from oslo_utils import netutils
from pysnmp.proto import rfc1905
from ceilometer.hardware.inspector import snmp
from ceilometer.tests import base as test_base
ins = snmp.SNMPInspector
class FakeObjectName(object):
def __init__(self, name):
self.name = name
def __str__(self):
return str(self.name)
class FakeCommandGenerator(object):
def getCmd(self, authData, transportTarget, *oids, **kwargs):
emptyOIDs = {
'1.3.6.1.4.1.2021.4.14.0': rfc1905.noSuchObject,
'1.3.6.1.4.1.2021.4.14.1': rfc1905.noSuchInstance,
}
varBinds = [
(FakeObjectName(oid), int(oid.split('.')[-1]))
for oid in oids
if oid not in emptyOIDs
]
for emptyOID, exc in emptyOIDs.items():
if emptyOID in oids:
varBinds += [(FakeObjectName(emptyOID), exc)]
return (None, None, 0, varBinds)
def bulkCmd(authData, transportTarget, nonRepeaters, maxRepetitions,
*oids, **kwargs):
varBindTable = [
[(FakeObjectName("%s.%d" % (oid, i)), i) for i in range(1, 3)]
for oid in oids
]
return (None, None, 0, varBindTable)
class TestSNMPInspector(test_base.BaseTestCase):
mapping = {
'test_exact': {
'matching_type': snmp.EXACT,
'metric_oid': ('1.3.6.1.4.1.2021.10.1.3.1', int),
'metadata': {
'meta': ('1.3.6.1.4.1.2021.10.1.3.8', int)
},
'post_op': '_fake_post_op',
},
'test_prefix': {
'matching_type': snmp.PREFIX,
'metric_oid': ('1.3.6.1.4.1.2021.9.1.8', int),
'metadata': {
'meta': ('1.3.6.1.4.1.2021.9.1.3', int)
},
'post_op': None,
},
'test_nosuch': {
'matching_type': snmp.EXACT,
'metric_oid': ('1.3.6.1.4.1.2021.4.14.0', int),
'metadata': {},
'post_op': None,
},
'test_nosuch_instance': {
'matching_type': snmp.EXACT,
'metric_oid': ('1.3.6.1.4.1.2021.4.14.1', int),
'metadata': {},
'post_op': None,
},
}
def setUp(self):
super(TestSNMPInspector, self).setUp()
self.inspector = snmp.SNMPInspector()
self.host = netutils.urlsplit("snmp://localhost")
self.useFixture(fixtures.MockPatchObject(
snmp.cmdgen, 'CommandGenerator',
return_value=FakeCommandGenerator()))
def test_snmp_error(self):
def get_list(func, *args, **kwargs):
return list(func(*args, **kwargs))
def faux_parse(ret, is_bulk):
return (True, 'forced error')
self.useFixture(fixtures.MockPatchObject(
snmp, 'parse_snmp_return', new=faux_parse))
self.assertRaises(snmp.SNMPException,
get_list,
self.inspector.inspect_generic,
host=self.host,
cache={},
extra_metadata={},
param=self.mapping['test_exact'])
@staticmethod
def _fake_post_op(host, cache, meter_def, value, metadata, extra, suffix):
metadata.update(post_op_meta=4)
extra.update(project_id=2)
return value
def test_inspect_no_such_object(self):
cache = {}
try:
# inspect_generic() is a generator, so we explicitly need to
# iterate through it in order to trigger the exception.
list(self.inspector.inspect_generic(self.host,
cache,
{},
self.mapping['test_nosuch']))
except ValueError:
self.fail("got ValueError when interpreting NoSuchObject return")
def test_inspect_no_such_instance(self):
cache = {}
try:
# inspect_generic() is a generator, so we explicitly need to
# iterate through it in order to trigger the exception.
list(self.inspector.inspect_generic(self.host,
cache,
{},
self.mapping['test_nosuch']))
except ValueError:
self.fail("got ValueError when interpreting NoSuchInstance return")
def test_inspect_generic_exact(self):
self.inspector._fake_post_op = self._fake_post_op
cache = {}
ret = list(self.inspector.inspect_generic(self.host,
cache,
{},
self.mapping['test_exact']))
keys = cache[ins._CACHE_KEY_OID].keys()
self.assertIn('1.3.6.1.4.1.2021.10.1.3.1', keys)
self.assertIn('1.3.6.1.4.1.2021.10.1.3.8', keys)
self.assertEqual(1, len(ret))
self.assertEqual(1, ret[0][0])
self.assertEqual(8, ret[0][1]['meta'])
self.assertEqual(4, ret[0][1]['post_op_meta'])
self.assertEqual(2, ret[0][2]['project_id'])
def test_inspect_generic_prefix(self):
cache = {}
ret = list(self.inspector.inspect_generic(self.host,
cache,
{},
self.mapping['test_prefix']))
keys = cache[ins._CACHE_KEY_OID].keys()
self.assertIn('1.3.6.1.4.1.2021.9.1.8' + '.1', keys)
self.assertIn('1.3.6.1.4.1.2021.9.1.8' + '.2', keys)
self.assertIn('1.3.6.1.4.1.2021.9.1.3' + '.1', keys)
self.assertIn('1.3.6.1.4.1.2021.9.1.3' + '.2', keys)
self.assertEqual(2, len(ret))
self.assertIn(ret[0][0], (1, 2))
self.assertEqual(ret[0][0], ret[0][1]['meta'])
def test_post_op_net(self):
cache = {}
metadata = dict(name='lo',
speed=0,
mac='ba21e43302fe')
extra = {}
ret = self.inspector._post_op_net(self.host, cache, None,
value=8,
metadata=metadata,
extra=extra,
suffix=".2")
self.assertEqual(8, ret)
self.assertIn('ip', metadata)
self.assertIn("2", metadata['ip'])
self.assertIn('resource_id', extra)
self.assertEqual("localhost.lo", extra['resource_id'])
def test_post_op_disk(self):
cache = {}
metadata = dict(device='/dev/sda1',
path='/')
extra = {}
ret = self.inspector._post_op_disk(self.host, cache, None,
value=8,
metadata=metadata,
extra=extra,
suffix=None)
self.assertEqual(8, ret)
self.assertIn('resource_id', extra)
self.assertEqual("localhost./dev/sda1", extra['resource_id'])
def test_prepare_params(self):
param = {'post_op': '_post_op_disk',
'oid': '1.3.6.1.4.1.2021.9.1.6',
'type': 'int',
'matching_type': 'type_prefix',
'metadata': {
'device': {'oid': '1.3.6.1.4.1.2021.9.1.3',
'type': 'str'},
'path': {'oid': '1.3.6.1.4.1.2021.9.1.2',
'type': "lambda x: str(x)"}}}
processed = self.inspector.prepare_params(param)
self.assertEqual('_post_op_disk', processed['post_op'])
self.assertEqual('1.3.6.1.4.1.2021.9.1.6', processed['metric_oid'][0])
self.assertEqual(int, processed['metric_oid'][1])
self.assertEqual(snmp.PREFIX, processed['matching_type'])
self.assertEqual(2, len(processed['metadata'].keys()))
self.assertEqual('1.3.6.1.4.1.2021.9.1.2',
processed['metadata']['path'][0])
self.assertEqual("4",
processed['metadata']['path'][1](4))
def test_pysnmp_ver43(self):
# Test pysnmp version >=4.3 compatibility of ObjectIdentifier
from distutils import version
import pysnmp
has43 = (version.StrictVersion(pysnmp.__version__) >=
version.StrictVersion('4.3.0'))
oid = '1.3.6.4.1.2021.11.57.0'
if has43:
from pysnmp.entity import engine
from pysnmp.smi import rfc1902
from pysnmp.smi import view
snmp_engine = engine.SnmpEngine()
mvc = view.MibViewController(snmp_engine.getMibBuilder())
name = rfc1902.ObjectIdentity(oid)
name.resolveWithMib(mvc)
else:
from pysnmp.proto import rfc1902
name = rfc1902.ObjectName(oid)
self.assertEqual(oid, str(name))
@mock.patch.object(snmp.cmdgen, 'UsmUserData')
def test_auth_strategy(self, mock_method):
host = ''.join(['snmp://a:b@foo?auth_proto=sha',
'&priv_password=pass&priv_proto=aes256'])
host = netutils.urlsplit(host)
self.inspector._get_auth_strategy(host)
mock_method.assert_called_with(
'a', authKey='b',
authProtocol=snmp.cmdgen.usmHMACSHAAuthProtocol,
privProtocol=snmp.cmdgen.usmAesCfb256Protocol,
privKey='pass')
host2 = 'snmp://a:b@foo?&priv_password=pass'
host2 = netutils.urlsplit(host2)
self.inspector._get_auth_strategy(host2)
mock_method.assert_called_with('a', authKey='b', privKey='pass')

View File

@ -1,193 +0,0 @@
#
# Copyright 2015 Intel Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import fixtures
from oslo_utils import fileutils
import yaml
from ceilometer import declarative
from ceilometer.hardware.inspector import base as inspector_base
from ceilometer.hardware.pollsters import generic
from ceilometer import sample
from ceilometer import service
from ceilometer.tests import base as test_base
class TestMeterDefinition(test_base.BaseTestCase):
def test_config_definition(self):
cfg = dict(name='test',
type='gauge',
unit='B',
snmp_inspector={})
definition = generic.MeterDefinition(cfg)
self.assertEqual('test', definition.name)
self.assertEqual('gauge', definition.type)
self.assertEqual('B', definition.unit)
self.assertEqual({}, definition.snmp_inspector)
def test_config_missing_field(self):
cfg = dict(name='test', type='gauge')
try:
generic.MeterDefinition(cfg)
except declarative.MeterDefinitionException as e:
self.assertEqual("Missing field unit", e.brief_message)
def test_config_invalid_field(self):
cfg = dict(name='test',
type='gauge',
unit='B',
invalid={})
definition = generic.MeterDefinition(cfg)
self.assertEqual("foobar", getattr(definition, 'invalid', 'foobar'))
def test_config_invalid_type_field(self):
cfg = dict(name='test',
type='invalid',
unit='B',
snmp_inspector={})
try:
generic.MeterDefinition(cfg)
except declarative.MeterDefinitionException as e:
self.assertEqual("Unrecognized type value invalid",
e.brief_message)
def test_config_missing_unit_field(self):
cfg = dict(name='hardware.cpu.user',
snmp_inspector={"matching_type": "type_exact",
"oid": "1.3.6.1.4.1.2021.11.50.0",
"type": "int"})
try:
generic.MeterDefinition(cfg)
except declarative.MeterDefinitionException as e:
self.assertEqual("Missing field unit",
e.brief_message)
@mock.patch('ceilometer.hardware.pollsters.generic.LOG')
def test_bad_metric_skip(self, LOG):
cfg = {'metric': [dict(name='test1',
type='gauge',
unit='B',
snmp_inspector={}),
dict(name='test_bad',
type='invalid',
unit='B',
snmp_inspector={}),
dict(name='test2',
type='gauge',
unit='B',
snmp_inspector={})]}
data = generic.load_definition(cfg)
self.assertEqual(2, len(data))
LOG.error.assert_called_with(
"Error loading meter definition: %s",
"Unrecognized type value invalid")
class FakeInspector(inspector_base.Inspector):
net_metadata = dict(name='test.teest',
mac='001122334455',
ip='10.0.0.2',
speed=1000)
DATA = {
'test': (0.99, {}, {}),
'test2': (90, net_metadata, {}),
}
def inspect_generic(self, host, cache,
extra_metadata=None, param=None):
yield self.DATA[host.hostname]
class TestGenericPollsters(test_base.BaseTestCase):
@staticmethod
def faux_get_inspector(url, namespace=None):
return FakeInspector()
def setUp(self):
super(TestGenericPollsters, self).setUp()
self.conf = service.prepare_service([], [])
self.resources = ["snmp://test", "snmp://test2"]
self.useFixture(fixtures.MockPatch(
'ceilometer.hardware.inspector.get_inspector',
self.faux_get_inspector))
self.pollster = generic.GenericHardwareDeclarativePollster(self.conf)
def _setup_meter_def_file(self, cfg):
cfg = cfg.encode('utf-8')
meter_cfg_file = fileutils.write_to_tempfile(content=cfg,
prefix="snmp",
suffix="yaml")
self.conf.set_override(
'meter_definitions_file',
meter_cfg_file, group='hardware')
cfg = declarative.load_definitions(
self.conf, {}, self.conf.hardware.meter_definitions_file)
return cfg
def _check_get_samples(self, name, definition,
expected_value, expected_type, expected_unit=None):
self.pollster._update_meter_definition(definition)
cache = {}
samples = list(self.pollster.get_samples(None, cache,
self.resources))
self.assertTrue(samples)
self.assertIn(self.pollster.CACHE_KEY, cache)
for resource in self.resources:
self.assertIn(resource, cache[self.pollster.CACHE_KEY])
self.assertEqual(set([name]),
set([s.name for s in samples]))
match = [s for s in samples if s.name == name]
self.assertEqual(expected_value, match[0].volume)
self.assertEqual(expected_type, match[0].type)
if expected_unit:
self.assertEqual(expected_unit, match[0].unit)
def test_get_samples(self):
param = dict(matching_type='type_exact',
oid='1.3.6.1.4.1.2021.10.1.3.1',
type='lambda x: float(str(x))')
meter_def = generic.MeterDefinition(dict(type='gauge',
name='hardware.test1',
unit='process',
snmp_inspector=param))
self._check_get_samples('hardware.test1',
meter_def,
0.99, sample.TYPE_GAUGE,
expected_unit='process')
def test_get_pollsters_extensions(self):
param = dict(matching_type='type_exact',
oid='1.3.6.1.4.1.2021.10.1.3.1',
type='lambda x: float(str(x))')
meter_cfg = yaml.dump(
{'metric': [dict(type='gauge',
name='hardware.test1',
unit='process',
snmp_inspector=param),
dict(type='gauge',
name='hardware.test2.abc',
unit='process',
snmp_inspector=param)]})
self._setup_meter_def_file(meter_cfg)
pollster = generic.GenericHardwareDeclarativePollster
# Clear cached mapping
pollster.mapping = None
exts = pollster.get_pollsters_extensions(self.conf)
self.assertEqual(2, len(exts))
self.assertIn(exts[0].name, ['hardware.test1', 'hardware.test2.abc'])
self.assertIn(exts[1].name, ['hardware.test1', 'hardware.test2.abc'])

View File

@ -1,59 +0,0 @@
#
# Copyright 2013 Intel Corp
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import netutils
from ceilometer.hardware.pollsters import util
from ceilometer import sample
from ceilometer.tests import base as test_base
class TestPollsterUtils(test_base.BaseTestCase):
def setUp(self):
super(TestPollsterUtils, self).setUp()
self.host_url = netutils.urlsplit("snmp://127.0.0.1:161")
def test_make_sample(self):
s = util.make_sample_from_host(self.host_url,
name='test',
sample_type=sample.TYPE_GAUGE,
unit='B',
volume=1,
res_metadata={
'metakey': 'metaval',
})
self.assertEqual('127.0.0.1', s.resource_id)
self.assertIn('snmp://127.0.0.1:161', s.resource_metadata.values())
self.assertIn('metakey', s.resource_metadata.keys())
def test_make_sample_extra(self):
extra = {
'project_id': 'project',
'resource_id': 'resource'
}
s = util.make_sample_from_host(self.host_url,
name='test',
sample_type=sample.TYPE_GAUGE,
unit='B',
volume=1,
extra=extra)
self.assertIsNone(s.user_id)
self.assertEqual('project', s.project_id)
self.assertEqual('resource', s.resource_id)
self.assertEqual({'resource_url': 'snmp://127.0.0.1:161',
'project_id': 'project',
'resource_id':
'resource'},
s.resource_metadata)

View File

@ -18,7 +18,6 @@ from unittest import mock
from oslotest import base
from ceilometer.hardware import discovery as hardware
from ceilometer.polling.discovery import endpoint
from ceilometer.polling.discovery import localnode
from ceilometer.polling.discovery import tenant as project
@ -146,63 +145,3 @@ class TestProjectDiscovery(base.BaseTestCase):
result = self.discovery.discover(self.manager)
self.assertEqual(len(result), 3)
self.assertEqual(self.manager.keystone.projects.list.call_count, 2)
class TestHardwareDiscovery(base.BaseTestCase):
class MockInstance(object):
addresses = {'ctlplane': [
{'addr': '0.0.0.0',
'OS-EXT-IPS-MAC:mac_addr': '01-23-45-67-89-ab'}
]}
id = 'resource_id'
image = {'id': 'image_id'}
flavor = {'id': 'flavor_id'}
expected = {
'resource_id': 'resource_id',
'resource_url': 'snmp://ro_snmp_user:password@0.0.0.0',
'mac_addr': '01-23-45-67-89-ab',
'image_id': 'image_id',
'flavor_id': 'flavor_id',
}
expected_usm = {
'resource_id': 'resource_id',
'resource_url': ''.join(['snmp://ro_snmp_user:password@0.0.0.0',
'?priv_proto=aes192',
'&priv_password=priv_pass']),
'mac_addr': '01-23-45-67-89-ab',
'image_id': 'image_id',
'flavor_id': 'flavor_id',
}
def setUp(self):
super(TestHardwareDiscovery, self).setUp()
self.CONF = service.prepare_service([], [])
self.discovery = hardware.NodesDiscoveryTripleO(self.CONF)
self.discovery.nova_cli = mock.MagicMock()
self.manager = mock.MagicMock()
def test_hardware_discovery(self):
self.discovery.nova_cli.instance_get_all.return_value = [
self.MockInstance()]
resources = self.discovery.discover(self.manager)
self.assertEqual(1, len(resources))
self.assertEqual(self.expected, resources[0])
def test_hardware_discovery_without_flavor(self):
instance = self.MockInstance()
instance.flavor = {}
self.discovery.nova_cli.instance_get_all.return_value = [instance]
resources = self.discovery.discover(self.manager)
self.assertEqual(0, len(resources))
def test_hardware_discovery_usm(self):
self.CONF.set_override('readonly_user_priv_proto', 'aes192',
group='hardware')
self.CONF.set_override('readonly_user_priv_password', 'priv_pass',
group='hardware')
self.discovery.nova_cli.instance_get_all.return_value = [
self.MockInstance()]
resources = self.discovery.discover(self.manager)
self.assertEqual(self.expected_usm, resources[0])

View File

@ -25,7 +25,6 @@ from keystoneauth1 import exceptions as ka_exceptions
from stevedore import extension
from ceilometer.compute import discovery as nova_discover
from ceilometer.hardware import discovery
from ceilometer.polling.dynamic_pollster import DynamicPollster
from ceilometer.polling.dynamic_pollster import \
NonOpenStackApisPollsterDefinition
@ -689,48 +688,6 @@ class TestPollingAgent(BaseAgent):
self.assertFalse(self.PollsterKeystone.samples)
self.assertFalse(self.notified_samples)
@mock.patch('ceilometer.polling.manager.LOG')
@mock.patch('ceilometer.nova_client.LOG')
def test_hardware_discover_fail_minimize_logs(self, novalog, baselog):
class PollsterHardware(TestPollster):
discovery = 'tripleo_overcloud_nodes'
class PollsterHardwareAnother(TestPollster):
discovery = 'tripleo_overcloud_nodes'
self.mgr.extensions.extend([
extension.Extension('testhardware',
None,
None,
PollsterHardware(self.CONF), ),
extension.Extension('testhardware2',
None,
None,
PollsterHardwareAnother(self.CONF), )
])
ext = extension.Extension('tripleo_overcloud_nodes',
None,
None,
discovery.NodesDiscoveryTripleO(self.CONF))
self.mgr.discoveries = (extension.ExtensionManager
.make_test_instance([ext]))
poll_cfg = {
'sources': [{
'name': "test_hardware",
'interval': 10,
'meters': ['testhardware', 'testhardware2'],
'sinks': ['test_sink']}],
'sinks': [{
'name': 'test_sink',
'publishers': ["test"]}]
}
self.setup_polling(poll_cfg)
polling_tasks = self.mgr.setup_polling_tasks()
self.mgr.interval_task(list(polling_tasks.values())[0])
self.assertEqual(1, novalog.exception.call_count)
self.assertFalse(baselog.exception.called)
@mock.patch('ceilometer.polling.manager.LOG')
def test_polling_exception(self, LOG):
source_name = 'test_pollingexception'

View File

@ -19,9 +19,8 @@ Notifications
messages from the configured message queue system.
Polling
Retrieve information directly from the hypervisor or from the host
machine using SNMP, or by using the APIs of other OpenStack
services.
Retrieve information directly from the hypervisor or by using the APIs of
other OpenStack services.
Notifications
=============
@ -281,15 +280,13 @@ Central agent
~~~~~~~~~~~~~
This agent is responsible for polling public REST APIs to retrieve additional
information on OpenStack resources not already surfaced via notifications,
and also for polling hardware resources over SNMP.
information on OpenStack resources not already surfaced via notifications.
Some of the services polled with this agent are:
- OpenStack Networking
- OpenStack Object Storage
- OpenStack Block Storage
- Hardware resources via SNMP
To install and configure this service use the :ref:`install_rdo`
section in the Installation Tutorials and Guides.

View File

@ -398,116 +398,6 @@ meters are recorded from capable platform:
| | | | | | the system |
+---------------------+-------+------+----------+----------+------------------+
SNMP based meters
~~~~~~~~~~~~~~~~~
Telemetry supports gathering SNMP based generic host meters. In order to
be able to collect this data you need to run snmpd on each target host.
The following meters are available about the host machines by using
SNMP:
+---------------------+-------+------+----------+----------+------------------+
| Name | Type | Unit | Resource | Origin | Note |
+=====================+=======+======+==========+==========+==================+
| **Meters added in the Mitaka release or earlier** |
+---------------------+-------+------+----------+----------+------------------+
| hardware.cpu.load.\ | Gauge | proc\| host ID | Pollster | CPU load in the |
| 1min | | ess | | | past 1 minute |
+---------------------+-------+------+----------+----------+------------------+
| hardware.cpu.load.\ | Gauge | proc\| host ID | Pollster | CPU load in the |
| 5min | | ess | | | past 5 minutes |
+---------------------+-------+------+----------+----------+------------------+
| hardware.cpu.load.\ | Gauge | proc\| host ID | Pollster | CPU load in the |
| 15min | | ess | | | past 15 minutes |
+---------------------+-------+------+----------+----------+------------------+
| hardware.disk.size\ | Gauge | KB | disk ID | Pollster | Total disk size |
| .total | | | | | |
+---------------------+-------+------+----------+----------+------------------+
| hardware.disk.size\ | Gauge | KB | disk ID | Pollster | Used disk size |
| .used | | | | | |
+---------------------+-------+------+----------+----------+------------------+
| hardware.memory.to\ | Gauge | KB | host ID | Pollster | Total physical |
| tal | | | | | memory size |
+---------------------+-------+------+----------+----------+------------------+
| hardware.memory.us\ | Gauge | KB | host ID | Pollster | Used physical m\ |
| ed | | | | | emory size |
+---------------------+-------+------+----------+----------+------------------+
| hardware.memory.bu\ | Gauge | KB | host ID | Pollster | Physical memory |
| ffer | | | | | buffer size |
+---------------------+-------+------+----------+----------+------------------+
| hardware.memory.ca\ | Gauge | KB | host ID | Pollster | Cached physical |
| ched | | | | | memory size |
+---------------------+-------+------+----------+----------+------------------+
| hardware.memory.sw\ | Gauge | KB | host ID | Pollster | Total swap space |
| ap.total | | | | | size |
+---------------------+-------+------+----------+----------+------------------+
| hardware.memory.sw\ | Gauge | KB | host ID | Pollster | Available swap |
| ap.avail | | | | | space size |
+---------------------+-------+------+----------+----------+------------------+
| hardware.network.i\ | Cumul\| B | interface| Pollster | Bytes received |
| ncoming.bytes | ative | | ID | | by network inte\ |
| | | | | | rface |
+---------------------+-------+------+----------+----------+------------------+
| hardware.network.o\ | Cumul\| B | interface| Pollster | Bytes sent by n\ |
| utgoing.bytes | ative | | ID | | etwork interface |
+---------------------+-------+------+----------+----------+------------------+
| hardware.network.o\ | Cumul\| pack\| interface| Pollster | Sending error o\ |
| utgoing.errors | ative | et | ID | | f network inter\ |
| | | | | | face |
+---------------------+-------+------+----------+----------+------------------+
| hardware.network.i\ | Cumul\| data\| host ID | Pollster | Number of recei\ |
| p.incoming.datagra\ | ative | grams| | | ved datagrams |
| ms | | | | | |
+---------------------+-------+------+----------+----------+------------------+
| hardware.network.i\ | Cumul\| data\| host ID | Pollster | Number of sent |
| p.outgoing.datagra\ | ative | grams| | | datagrams |
| ms | | | | | |
+---------------------+-------+------+----------+----------+------------------+
| hardware.system_st\ | Cumul\| bloc\| host ID | Pollster | Aggregated numb\ |
| ats.io.incoming.bl\ | ative | ks | | | er of blocks re\ |
| ocks | | | | | ceived to block |
| | | | | | device |
+---------------------+-------+------+----------+----------+------------------+
| hardware.system_st\ | Cumul\| bloc\| host ID | Pollster | Aggregated numb\ |
| ats.io.outgoing.bl\ | ative | ks | | | er of blocks se\ |
| ocks | | | | | nt to block dev\ |
| | | | | | ice |
+---------------------+-------+------+----------+----------+------------------+
| **Meters added in the Queens release** |
+---------------------+-------+------+----------+----------+------------------+
| hardware.disk.read.\| Gauge | B | disk ID | Pollster | Bytes read from |
| bytes | | | | | device since boot|
+---------------------+-------+------+----------+----------+------------------+
| hardware.disk.write\| Gauge | B | disk ID | Pollster | Bytes written to |
| .bytes | | | | | device since boot|
+---------------------+-------+------+----------+----------+------------------+
| hardware.disk.read.\| Gauge | requ\| disk ID | Pollster | Read requests to |
| requests | | ests | | | device since boot|
+---------------------+-------+------+----------+----------+------------------+
| hardware.disk.write\| Gauge | requ\| disk ID | Pollster | Write requests to|
| .requests | | ests | | | device since boot|
+---------------------+-------+------+----------+----------+------------------+
| **Meters added in the Stein release** |
+---------------------+-------+------+----------+----------+------------------+
| hardware.cpu.user | Gauge | tick | host ID | Pollster | CPU user in tick |
+---------------------+-------+------+----------+----------+------------------+
| hardware.cpu.system | Gauge | tick | host ID | Pollster | CPU system in t\ |
| | | | | | ick |
+---------------------+-------+------+----------+----------+------------------+
| hardware.cpu.nice | Gauge | tick | host ID | Pollster | CPU nice in tick |
+---------------------+-------+------+----------+----------+------------------+
| hardware.cpu.idle | Gauge | tick | host ID | Pollster | CPU idle in tick |
+---------------------+-------+------+----------+----------+------------------+
| hardware.cpu.wait | Gauge | tick | host ID | Pollster | CPU wait in tick |
+---------------------+-------+------+----------+----------+------------------+
| hardware.cpu.kernel | Gauge | tick | host ID | Pollster | CPU kernel in t\ |
| | | | | | ick |
+---------------------+-------+------+----------+----------+------------------+
| hardware.cpu.inter\ | Gauge | tick | host ID | Pollster | CPU interrupt i\ |
| rupt | | | | | n tick |
+---------------------+-------+------+----------+----------+------------------+
OpenStack Image service
~~~~~~~~~~~~~~~~~~~~~~~

View File

@ -69,7 +69,7 @@ The Ceilometer project created 2 methods to collect data:
The first method is supported by the ceilometer-notification agent, which
monitors the message queues for notifications. Polling agents can be configured
either to poll the local hypervisor or remote APIs (public REST APIs exposed by
services and host-level SNMP/IPMI daemons).
services and host-level IPMI daemons).
Notification Agent: Listening for data
---------------------------------------

View File

@ -0,0 +1,8 @@
---
upgrade:
- |
``GenericHardwareDeclarativePollster`` has been removed. Because of this
removal all metrics gathered by SNMP daemon have been removed as well.
- |
The ``NodesDiscoveryTripleO`` discovery plugin has been removed.

View File

@ -23,7 +23,6 @@ oslo.messaging>=10.3.0 # Apache-2.0
oslo.upgradecheck>=0.1.1 # Apache-2.0
oslo.utils>=4.7.0 # Apache-2.0
oslo.privsep>=1.32.0 # Apache-2.0
pysnmp<5.0.0,>=4.2.3 # BSD
python-glanceclient>=2.8.0 # Apache-2.0
python-keystoneclient>=3.18.0 # Apache-2.0
keystoneauth1>=3.18.0 # Apache-2.0

View File

@ -60,7 +60,6 @@ ceilometer.discover.central =
ipsec_connections = ceilometer.network.services.discovery:IPSecConnectionsDiscovery
fw_services = ceilometer.network.services.discovery:FirewallDiscovery
fw_policy = ceilometer.network.services.discovery:FirewallPolicyDiscovery
tripleo_overcloud_nodes = ceilometer.hardware.discovery:NodesDiscoveryTripleO
fip_services = ceilometer.network.services.discovery:FloatingIPDiscovery
images = ceilometer.image.discovery:ImagesDiscovery
volumes = ceilometer.volume.discovery:VolumeDiscovery
@ -187,17 +186,11 @@ ceilometer.poll.central =
volume.snapshot.size = ceilometer.volume.cinder:VolumeSnapshotSize
volume.backup.size = ceilometer.volume.cinder:VolumeBackupSize
ceilometer.builder.poll.central =
hardware.snmp = ceilometer.hardware.pollsters.generic:GenericHardwareDeclarativePollster
ceilometer.compute.virt =
libvirt = ceilometer.compute.virt.libvirt.inspector:LibvirtInspector
hyperv = ceilometer.compute.virt.hyperv.inspector:HyperVInspector
vsphere = ceilometer.compute.virt.vmware.inspector:VsphereInspector
ceilometer.hardware.inspectors =
snmp = ceilometer.hardware.inspector.snmp:SNMPInspector
ceilometer.sample.publisher =
test = ceilometer.publisher.test:TestPublisher
notifier = ceilometer.publisher.messaging:SampleNotifierPublisher