Add tox configuration

Change-Id: Ic4051617b5757c649cd59dacf8e9dd9d7e9819a6
Signed-off-by: Julien Danjou <julien.danjou@enovance.com>
This commit is contained in:
Doug Hellmann 2012-05-29 17:57:58 -04:00 committed by Julien Danjou
parent a49e59b115
commit 5e0a32f475
11 changed files with 107 additions and 28 deletions

1
.gitignore vendored
View File

@ -4,3 +4,4 @@ TAGS
*.egg-info
build
.coverage
.tox

View File

@ -32,17 +32,18 @@ if __name__ == '__main__':
flags.FLAGS(sys.argv)
logging.setup()
utils.monkey_patch()
server = service.Service.create(binary='ceilometer-agent',
topic='ceilometer.agent',
manager='ceilometer.agent.manager.AgentManager',
# FIXME(dhellmann): The
# periodic_interval is set very
# short for development. After we
# fix the configuration loading we
# can use the config file to
# adjust it and remove this
# hard-coded value.
periodic_interval=10,
)
server = \
service.Service.create(binary='ceilometer-agent',
topic='ceilometer.agent',
manager='ceilometer.agent.manager.AgentManager',
# FIXME(dhellmann): The
# periodic_interval is set very
# short for development. After we
# fix the configuration loading we
# can use the config file to
# adjust it and remove this
# hard-coded value.
periodic_interval=10,
)
service.serve(server)
service.wait()

View File

@ -32,8 +32,10 @@ if __name__ == '__main__':
flags.FLAGS(sys.argv)
logging.setup()
utils.monkey_patch()
server = service.Service.create(binary='ceilometer-collector',
topic='ceilometer.collector',
manager='ceilometer.collector.manager.CollectorManager')
server = \
service.Service.create(binary='ceilometer-collector',
topic='ceilometer.collector',
manager='ceilometer.collector.'
'manager.CollectorManager')
service.serve(server)
service.wait()

View File

@ -78,14 +78,17 @@ class DiskIOPollster(plugin.PollsterBase):
try:
disks = self._get_disks(conn, instance.name)
except Exception as err:
self.LOG.warning('Ignoring instance %s: %s', instance.name, err)
self.LOG.warning('Ignoring instance %s: %s', \
instance.name, err)
self.LOG.exception(err)
continue
bytes = 0
for disk in disks:
stats = conn.block_stats(instance.name, disk)
self.LOG.info("DISKIO USAGE: %s %s: read-requests=%d read-bytes=%d write-requests=%d write-bytes=%d errors=%d",
instance, disk, stats[0], stats[1], stats[2], stats[3], stats[4])
self.LOG.info("DISKIO USAGE: %s %s:"
"read-requests=%d read-bytes=%d write-requests=%d write-bytes=%d errors=%d",
instance, disk, stats[0], stats[1],
stats[2], stats[3], stats[4])
bytes += stats[1] + stats[3] # combine read and write
yield make_counter_from_instance(instance,
type='disk',
@ -101,11 +104,13 @@ class CPUPollster(plugin.PollsterBase):
conn = nova.virt.connection.get_connection(read_only=True)
# FIXME(dhellmann): How do we get a list of instances without
# talking directly to the database?
for instance in manager.db.instance_get_all_by_host(context, manager.host):
for instance in manager.db.instance_get_all_by_host(context,
manager.host):
self.LOG.info('checking instance %s', instance.uuid)
try:
cpu_info = conn.get_info(instance)
self.LOG.info("CPUTIME USAGE: %s %d", instance, cpu_info['cpu_time'])
self.LOG.info("CPUTIME USAGE: %s %d",
instance, cpu_info['cpu_time'])
yield make_counter_from_instance(instance,
type='cpu',
volume=cpu_info['cpu_time'],

View File

@ -43,8 +43,10 @@ class FloatingIPPollster(plugin.PollsterBase):
resource_id=ip.id,
datetime=None,
duration=None,
resource_metadata={'address': ip.address,
'fixed_ip_id': ip.fixed_ip_id,
'host': ip.host,
'pool': ip.pool,
'auto_assigned': ip.auto_assigned})
resource_metadata={
'address': ip.address,
'fixed_ip_id': ip.fixed_ip_id,
'host': ip.host,
'pool': ip.pool,
'auto_assigned': ip.auto_assigned
})

View File

@ -43,4 +43,5 @@ class PollsterBase(object):
@abc.abstractmethod
def get_counters(self, manager, context):
"""Return a sequence of Counter instances from polling the resources."""
"""Return a sequence of Counter instances from polling the
resources."""

7
setup.cfg Normal file
View File

@ -0,0 +1,7 @@
[nosetests]
cover-package = ceilometer
cover-html = true
cover-erase = true
cover-inclusive = true
verbosity=2
detailed-errors=1

View File

@ -33,12 +33,14 @@ setuptools.setup(
py_modules=[],
entry_points={
'ceilometer.collector.compute': [
'instance_create = ceilometer.compute.notifications:InstanceCreate',
'instance_create'
'= ceilometer.compute.notifications:InstanceCreate',
],
'ceilometer.poll.compute': [
'libvirt_diskio = ceilometer.compute.libvirt:DiskIOPollster',
'libvirt_cpu = ceilometer.compute.libvirt:CPUPollster',
'network_floatingip = ceilometer.compute.network:FloatingIPPollster',
'network_floatingip'
'= ceilometer.compute.network:FloatingIPPollster',
],
},
)

10
tools/pip-requires Normal file
View File

@ -0,0 +1,10 @@
https://github.com/openstack/openstack-common/zipball/master#egg=openstack.common
https://github.com/openstack/nova/zipball/master#egg=nova
webob
kombu
iso8601
lockfile
netaddr
argparse
sqlalchemy
anyjson==0.3.1

6
tools/test-requires Normal file
View File

@ -0,0 +1,6 @@
nose
openstack.nose_plugin
coverage
pep8>=1.0
eventlet
mox

42
tox.ini Normal file
View File

@ -0,0 +1,42 @@
[tox]
envlist = py26,py27,pep8
[testenv]
setenv = VIRTUAL_ENV={envdir}
NOSE_WITH_OPENSTACK=1
NOSE_OPENSTACK_COLOR=1
NOSE_OPENSTACK_RED=0.05
NOSE_OPENSTACK_YELLOW=0.025
NOSE_OPENSTACK_SHOW_ELAPSED=1
deps = -r{toxinidir}/tools/pip-requires
-r{toxinidir}/tools/test-requires
commands = nosetests
[testenv:pep8]
deps = pep8
commands = pep8 --repeat --show-source ceilometer setup.py bin/ceilometer-agent bin/ceilometer-collector
[testenv:venv]
commands = {posargs}
[testenv:cover]
commands = nosetests --cover-erase --cover-package=ceilometer --with-xcoverage
[tox:jenkins]
downloadcache = ~/cache/pip
[testenv:jenkins26]
basepython = python2.6
setenv = NOSE_WITH_XUNIT=1
[testenv:jenkins27]
basepython = python2.7
setenv = NOSE_WITH_XUNIT=1
[testenv:jenkinscover]
setenv = NOSE_WITH_XUNIT=1
commands = nosetests --cover-erase --cover-package=ceilometer --with-xcoverage
[testenv:jenkinsvenv]
setenv = NOSE_WITH_XUNIT=1
commands = {posargs}