diff --git a/plugin_source/deployment_scripts/compute_post_deployment.py b/plugin_source/deployment_scripts/compute_post_deployment.py index 98469fe..d54f080 100755 --- a/plugin_source/deployment_scripts/compute_post_deployment.py +++ b/plugin_source/deployment_scripts/compute_post_deployment.py @@ -279,10 +279,14 @@ def patch_ceilometer(): Order of patches applied: ceilometer-poll-cpu-util.patch + ceilometer-rates-always-zero.patch + ceilometer-support-network-bytes.patch """ patchset_dir = sys.path[0] patchfile_list = [ '%s/patchset/ceilometer-poll-cpu-util.patch' % patchset_dir, + '%s/patchset/ceilometer-rates-always-zero.patch' % patchset_dir, + '%s/patchset/ceilometer-support-network-bytes.patch' % patchset_dir, ] for patch_file in patchfile_list: utils.execute('patch', '-d', DIST_PACKAGES_DIR, '-p1', '-i', diff --git a/plugin_source/deployment_scripts/patchset/bandwidth b/plugin_source/deployment_scripts/patchset/bandwidth new file mode 100755 index 0000000..91256a1 --- /dev/null +++ b/plugin_source/deployment_scripts/patchset/bandwidth @@ -0,0 +1,64 @@ +#!/usr/bin/env python + +# Copyright (c) 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# NOTE: XenServer still only supports Python 2.4 in it's dom0 userspace +# which means the Nova xenapi plugins must use only Python 2.4 features + +"""Fetch Bandwidth data from VIF network devices.""" + +import utils + +import pluginlib_nova + +import re + + +pluginlib_nova.configure_logging('bandwidth') + + +def _read_proc_net(): + f = open('/proc/net/dev', 'r') + try: + return f.readlines() + finally: + f.close() + + +def _get_bandwitdth_from_proc(): + devs = [l.strip() for l in _read_proc_net()] + # ignore headers + devs = devs[2:] + vif_pattern = re.compile("^vif(\d+)\.(\d+)") + dlist = [d.split(':', 1) for d in devs if vif_pattern.match(d)] + devmap = dict() + for name, stats in dlist: + slist = stats.split() + dom, vifnum = name[3:].split('.', 1) + dev = devmap.get(dom, {}) + # Note, we deliberately swap in and out, as instance traffic + # shows up inverted due to going though the bridge. (mdragon) + dev[vifnum] = dict(bw_in=int(slist[8]), bw_out=int(slist[0])) + devmap[dom] = dev + return devmap + + +def fetch_all_bandwidth(session): + return _get_bandwitdth_from_proc() + + +if __name__ == '__main__': + utils.register_plugin_calls(fetch_all_bandwidth) diff --git a/plugin_source/deployment_scripts/patchset/ceilometer-rates-always-zero.patch b/plugin_source/deployment_scripts/patchset/ceilometer-rates-always-zero.patch new file mode 100644 index 0000000..820fae2 --- /dev/null +++ b/plugin_source/deployment_scripts/patchset/ceilometer-rates-always-zero.patch @@ -0,0 +1,150 @@ +diff --git a/ceilometer/compute/virt/xenapi/inspector.py b/ceilometer/compute/virt/xenapi/inspector.py +index 9632cba..18ed5d7 100644 +--- a/ceilometer/compute/virt/xenapi/inspector.py ++++ b/ceilometer/compute/virt/xenapi/inspector.py +@@ -160,18 +160,19 @@ class XenapiInspector(virt_inspector.Inspector): + if vif_refs: + for vif_ref in vif_refs: + vif_rec = self._call_xenapi("VIF.get_record", vif_ref) +- vif_metrics_ref = self._call_xenapi( +- "VIF.get_metrics", vif_ref) +- vif_metrics_rec = self._call_xenapi( +- "VIF_metrics.get_record", vif_metrics_ref) ++ ++ rx_rate = float(self._call_xenapi( ++ "VM.query_data_source", vm_ref, ++ "vif_%s_rx" % vif_rec['device'])) ++ tx_rate = float(self._call_xenapi( ++ "VM.query_data_source", vm_ref, ++ "vif_%s_tx" % vif_rec['device'])) + + interface = virt_inspector.Interface( + name=vif_rec['uuid'], + mac=vif_rec['MAC'], + fref=None, + parameters=None) +- rx_rate = float(vif_metrics_rec['io_read_kbs']) * units.Ki +- tx_rate = float(vif_metrics_rec['io_write_kbs']) * units.Ki + stats = virt_inspector.InterfaceRateStats(rx_rate, tx_rate) + yield (interface, stats) + +@@ -182,16 +183,14 @@ class XenapiInspector(virt_inspector.Inspector): + if vbd_refs: + for vbd_ref in vbd_refs: + vbd_rec = self._call_xenapi("VBD.get_record", vbd_ref) +- vbd_metrics_ref = self._call_xenapi("VBD.get_metrics", +- vbd_ref) +- vbd_metrics_rec = self._call_xenapi("VBD_metrics.get_record", +- vbd_metrics_ref) + + disk = virt_inspector.Disk(device=vbd_rec['device']) +- # Stats provided from XenServer are in KB/s, +- # converting it to B/s. +- read_rate = float(vbd_metrics_rec['io_read_kbs']) * units.Ki +- write_rate = float(vbd_metrics_rec['io_write_kbs']) * units.Ki ++ read_rate = float(self._call_xenapi( ++ "VM.query_data_source", vm_ref, ++ "vbd_%s_read" % vbd_rec['device'])) ++ write_rate = float(self._call_xenapi( ++ "VM.query_data_source", vm_ref, ++ "vbd_%s_write" % vbd_rec['device'])) + disk_rate_info = virt_inspector.DiskRateStats( + read_bytes_rate=read_rate, + read_requests_rate=0, +diff --git a/ceilometer/tests/unit/compute/virt/xenapi/test_inspector.py b/ceilometer/tests/unit/compute/virt/xenapi/test_inspector.py +index caa1c93..7e8f827 100644 +--- a/ceilometer/tests/unit/compute/virt/xenapi/test_inspector.py ++++ b/ceilometer/tests/unit/compute/virt/xenapi/test_inspector.py +@@ -142,75 +142,42 @@ class TestXenapiInspection(base.BaseTestCase): + fake_instance = {'OS-EXT-SRV-ATTR:instance_name': 'fake_instance_name', + 'id': 'fake_instance_id'} + +- def fake_xenapi_request(method, args): +- vif_rec = { +- 'metrics': 'vif_metrics_ref', +- 'uuid': 'vif_uuid', +- 'MAC': 'vif_mac', +- } +- +- vif_metrics_rec = { +- 'io_read_kbs': '1', +- 'io_write_kbs': '2', +- } +- if method == 'VM.get_by_name_label': +- return ['vm_ref'] +- elif method == 'VM.get_VIFs': +- return ['vif_ref'] +- elif method == 'VIF.get_record': +- return vif_rec +- elif method == 'VIF.get_metrics': +- return 'vif_metrics_ref' +- elif method == 'VIF_metrics.get_record': +- return vif_metrics_rec +- else: +- return None ++ vif_rec = { ++ 'metrics': 'vif_metrics_ref', ++ 'uuid': 'vif_uuid', ++ 'MAC': 'vif_mac', ++ 'device': '0', ++ } ++ side_effects = [['vm_ref'], ['vif_ref'], vif_rec, 1024.0, 2048.0] + + session = self.inspector.session + with mock.patch.object(session, 'xenapi_request', +- side_effect=fake_xenapi_request): ++ side_effect=side_effects): + interfaces = list(self.inspector.inspect_vnic_rates(fake_instance)) + + self.assertEqual(1, len(interfaces)) + vnic0, info0 = interfaces[0] + self.assertEqual('vif_uuid', vnic0.name) + self.assertEqual('vif_mac', vnic0.mac) +- self.assertEqual(1024, info0.rx_bytes_rate) +- self.assertEqual(2048, info0.tx_bytes_rate) ++ self.assertEqual(1024.0, info0.rx_bytes_rate) ++ self.assertEqual(2048.0, info0.tx_bytes_rate) + + def test_inspect_disk_rates(self): + fake_instance = {'OS-EXT-SRV-ATTR:instance_name': 'fake_instance_name', + 'id': 'fake_instance_id'} + +- def fake_xenapi_request(method, args): +- vbd_rec = { +- 'device': 'xvdd' +- } +- +- vbd_metrics_rec = { +- 'io_read_kbs': '1', +- 'io_write_kbs': '2' +- } +- if method == 'VM.get_by_name_label': +- return ['vm_ref'] +- elif method == 'VM.get_VBDs': +- return ['vbd_ref'] +- elif method == 'VBD.get_record': +- return vbd_rec +- elif method == 'VBD.get_metrics': +- return 'vbd_metrics_ref' +- elif method == 'VBD_metrics.get_record': +- return vbd_metrics_rec +- else: +- return None ++ vbd_rec = { ++ 'device': 'xvdd' ++ } ++ side_effects = [['vm_ref'], ['vbd_ref'], vbd_rec, 1024.0, 2048.0] + + session = self.inspector.session + with mock.patch.object(session, 'xenapi_request', +- side_effect=fake_xenapi_request): ++ side_effect=side_effects): + disks = list(self.inspector.inspect_disk_rates(fake_instance)) + + self.assertEqual(1, len(disks)) + disk0, info0 = disks[0] + self.assertEqual('xvdd', disk0.device) +- self.assertEqual(1024, info0.read_bytes_rate) +- self.assertEqual(2048, info0.write_bytes_rate) ++ self.assertEqual(1024.0, info0.read_bytes_rate) ++ self.assertEqual(2048.0, info0.write_bytes_rate) diff --git a/plugin_source/deployment_scripts/patchset/ceilometer-support-network-bytes.patch b/plugin_source/deployment_scripts/patchset/ceilometer-support-network-bytes.patch new file mode 100644 index 0000000..c00924c --- /dev/null +++ b/plugin_source/deployment_scripts/patchset/ceilometer-support-network-bytes.patch @@ -0,0 +1,123 @@ +diff --git a/ceilometer/compute/virt/xenapi/inspector.py b/ceilometer/compute/virt/xenapi/inspector.py +index 9632cba..bbd5dc2 100644 +--- a/ceilometer/compute/virt/xenapi/inspector.py ++++ b/ceilometer/compute/virt/xenapi/inspector.py +@@ -21,6 +21,11 @@ try: + except ImportError: + api = None + ++try: ++ import cPickle as pickle ++except ImportError: ++ import pickle ++ + from ceilometer.compute.pollsters import util + from ceilometer.compute.virt import inspector as virt_inspector + from ceilometer.i18n import _ +@@ -97,14 +102,29 @@ class XenapiInspector(virt_inspector.Inspector): + def __init__(self): + super(XenapiInspector, self).__init__() + self.session = get_api_session() ++ self.host_ref = self._get_host_ref() ++ self.host_uuid = self._get_host_uuid() + + def _get_host_ref(self): + """Return the xenapi host on which nova-compute runs on.""" + return self.session.xenapi.session.get_this_host(self.session.handle) + ++ def _get_host_uuid(self): ++ return self.session.xenapi.host.get_uuid(self.host_ref) ++ + def _call_xenapi(self, method, *args): + return self.session.xenapi_request(method, args) + ++ def _call_plugin(self, plugin, fn, args): ++ args['host_uuid'] = self.host_uuid ++ return self.session.xenapi.host.call_plugin( ++ self.host_ref, plugin, fn, args) ++ ++ def _call_plugin_serialized(self, plugin, fn, *args, **kwargs): ++ params = {'params': pickle.dumps(dict(args=args, kwargs=kwargs))} ++ rv = self._call_plugin(plugin, fn, params) ++ return pickle.loads(rv) ++ + def _lookup_by_name(self, instance_name): + vm_refs = self._call_xenapi("VM.get_by_name_label", instance_name) + n = len(vm_refs) +@@ -153,6 +173,31 @@ class XenapiInspector(virt_inspector.Inspector): + memory_usage = (total_mem - free_mem * units.Ki) / units.Mi + return virt_inspector.MemoryUsageStats(usage=memory_usage) + ++ def inspect_vnics(self, instance): ++ instance_name = util.instance_name(instance) ++ vm_ref = self._lookup_by_name(instance_name) ++ dom_id = self._call_xenapi("VM.get_domid", vm_ref) ++ vif_refs = self._call_xenapi("VM.get_VIFs", vm_ref) ++ bw_all = self._call_plugin_serialized('bandwidth', ++ 'fetch_all_bandwidth') ++ if vif_refs: ++ for vif_ref in vif_refs: ++ vif_rec = self._call_xenapi("VIF.get_record", vif_ref) ++ ++ interface = virt_inspector.Interface( ++ name=vif_rec['uuid'], ++ mac=vif_rec['MAC'], ++ fref=None, ++ parameters=None) ++ bw_vif = bw_all[dom_id][vif_rec['device']] ++ ++ # Todo : Currently the plugin can't support ++ # rx_packets and tx_packets, temporarily set them as -1. ++ stats = virt_inspector.InterfaceStats( ++ rx_bytes=bw_vif['bw_in'], rx_packets='-1', ++ tx_bytes=bw_vif['bw_out'], tx_packets='-1') ++ yield (interface, stats) ++ + def inspect_vnic_rates(self, instance, duration=None): + instance_name = util.instance_name(instance) + vm_ref = self._lookup_by_name(instance_name) +diff --git a/ceilometer/tests/unit/compute/virt/xenapi/test_inspector.py b/ceilometer/tests/unit/compute/virt/xenapi/test_inspector.py +index caa1c93..fae1eef 100644 +--- a/ceilometer/tests/unit/compute/virt/xenapi/test_inspector.py ++++ b/ceilometer/tests/unit/compute/virt/xenapi/test_inspector.py +@@ -138,6 +138,40 @@ class TestXenapiInspection(base.BaseTestCase): + memory_stat = self.inspector.inspect_memory_usage(fake_instance) + self.assertEqual(fake_stat, memory_stat) + ++ def test_inspect_vnics(self): ++ fake_instance = { ++ 'OS-EXT-SRV-ATTR:instance_name': 'fake_instance_name', ++ 'id': 'fake_instance_id'} ++ vif_rec = { ++ 'uuid': 'vif_uuid', ++ 'MAC': 'vif_mac', ++ 'device': '0', ++ } ++ request_returns = [['vm_ref'], '10', ['vif_ref'], vif_rec] ++ bandwidth_returns = [{ ++ '10': { ++ '0': { ++ 'bw_in': 1024, 'bw_out': 2048 ++ } ++ } ++ }] ++ session = self.inspector.session ++ with mock.patch.object(session, 'xenapi_request', ++ side_effect=request_returns): ++ with mock.patch.object(self.inspector, ++ '_call_plugin_serialized', ++ side_effect=bandwidth_returns): ++ ++ interfaces = list( ++ self.inspector.inspect_vnics(fake_instance)) ++ ++ self.assertEqual(1, len(interfaces)) ++ vnic0, info0 = interfaces[0] ++ self.assertEqual('vif_uuid', vnic0.name) ++ self.assertEqual('vif_mac', vnic0.mac) ++ self.assertEqual(1024, info0.rx_bytes) ++ self.assertEqual(2048, info0.tx_bytes) ++ + def test_inspect_vnic_rates(self): + fake_instance = {'OS-EXT-SRV-ATTR:instance_name': 'fake_instance_name', + 'id': 'fake_instance_id'} diff --git a/suppack/build-xenserver-suppack.sh b/suppack/build-xenserver-suppack.sh index f781aa7..7a6dab5 100755 --- a/suppack/build-xenserver-suppack.sh +++ b/suppack/build-xenserver-suppack.sh @@ -88,6 +88,8 @@ git clone -b $GITBRANCH --single-branch --depth 1 "$NOVA_GITREPO" nova pushd nova # patch xenhost as this file is not merged into this release cp $FUELPLUG_UTILS_ROOT/../plugin_source/deployment_scripts/patchset/xenhost plugins/xenserver/xenapi/etc/xapi.d/plugins/ +# patch bandwidth as this file is not merged into this release +cp $FUELPLUG_UTILS_ROOT/../plugin_source/deployment_scripts/patchset/bandwidth plugins/xenserver/xenapi/etc/xapi.d/plugins/ popd cp -r xenserver-nova-suppack-builder/plugins/xenserver/xenapi/* nova/plugins/xenserver/xenapi/