Merge "Remove bundled intree ceileometer tempest plugin"

This commit is contained in:
Zuul 2018-02-06 07:59:09 +00:00 committed by Gerrit Code Review
commit d86651edc0
22 changed files with 0 additions and 1488 deletions

View File

@ -1,309 +0,0 @@
#
# Tests for gnocchi-threshold-alarm
#
# user_id : c8ecb587-d38c-426f-a58d-22b8d4a9a1d3
# project_id : 709f6ed6-bfb3-4649-b303-0019a7f6aef2
# alarm name : gabbi-gnocchi-threshold-resource-alarm
# resource_id : gabbi-mock-resource
# archive policy : gabbiliveceph
#
defaults:
request_headers:
x-auth-token: $ENVIRON['ADMIN_TOKEN']
tests:
#
# Setup gnocchi archive policy/resource type/resource/metrics
#
- name: SETUP. create archive policy gabbiliveceph
desc: create archve policy 'gabbiliveceph' for tests
POST: $ENVIRON['GNOCCHI_SERVICE_URL']/v1/archive_policy
status: 201
request_headers:
content-type: application/json
data:
name: gabbiliveceph
back_window: 0
definition:
- granularity: 1 second
points: 60
- granularity: 20 second
timespan: 1 minute
- points: 5
timespan: 5 minute
aggregation_methods:
- mean
- min
- max
response_json_paths:
$.name: gabbiliveceph
$.back_window: 0
$.aggregation_methods.`len`: 3
- name: create resource type ceph_account
desc: needed to create a resource
POST: $ENVIRON['GNOCCHI_SERVICE_URL']/v1/resource_type
status: 201 || 409
request_headers:
content-type: application/json
data:
name: ceph_account
- name: create resource of ceph_account type
POST: $ENVIRON['GNOCCHI_SERVICE_URL']/v1/resource/ceph_account
request_headers:
content-type: application/json
data:
id: 662e46f3-8c06-430c-8a9c-adcaedd1272c
user_id: 27e342e4-4489-424f-a7e4-ba8ed9ad729c
project_id: d6d32769-c351-4758-b0a2-458fa1a065a3
metrics:
radosgw.objects:
archive_policy_name: gabbiliveceph
radosgw.objects.size:
archive_policy_name: gabbiliveceph
radosgw.objects.containers:
archive_policy_name: gabbiliveceph
radosgw.api.request:
archive_policy_name: gabbiliveceph
radosgw.containers.objects:
archive_policy_name: gabbiliveceph
radosgw.containers.objects.size:
archive_policy_name: gabbiliveceph
status: 201
response_json_paths:
$.user_id: 27e342e4-4489-424f-a7e4-ba8ed9ad729c
$.project_id: d6d32769-c351-4758-b0a2-458fa1a065a3
$.metrics.`len`: 6
#
# Actual tests
#
- name: search 'gabbi-gnocchi-threshold-resource-alarm' alarm doesnt exist
desc: search for alarm using user_id, project_id, alarm_name
GET: $ENVIRON['AODH_SERVICE_URL']/v2/alarms
query_parameters:
q.field:
- user_id
- project_id
- name
q.op:
- eq
- eq
- eq
q.value:
- c8ecb587-d38c-426f-a58d-22b8d4a9a1d3
- 709f6ed6-bfb3-4649-b303-0019a7f6aef2
- gabbi-gnocchi-threshold-resource-alarm
method: GET
response_json_paths:
$.`len`: 0
- name: create alarm 'gabbi-gnocchi-threshold-resource-alarm'
desc: create a threshold alarm gabbi-gnocchi-threshold-resource-alarm
POST: $ENVIRON['AODH_SERVICE_URL']/v2/alarms
request_headers:
content-type: application/json
data:
alarm_actions:
- "http://site:8000/gnocchi-threshold-resource"
alarm_id: null
description: An gabbilive threshold based alarm
enabled: true
insufficient_data_actions:
- "http://site:8000/nodata-gnocchi-threshold-resource"
name: "gabbi-gnocchi-threshold-resource-alarm"
ok_actions:
- "http://site:8000/ok-gnocchi-threshold-resource"
project_id: 709f6ed6-bfb3-4649-b303-0019a7f6aef2
repeat_actions: false
severity: moderate
state: "insufficient data"
type: gnocchi_resources_threshold
gnocchi_resources_threshold_rule:
evaluation_periods: 3
metric: "radosgw.objects.size"
resource_id: "662e46f3-8c06-430c-8a9c-adcaedd1272c"
aggregation_method: "mean"
granularity: 60
threshold: 5.0
comparison_operator: "ge"
resource_type: ceph_account
user_id: c8ecb587-d38c-426f-a58d-22b8d4a9a1d3
status: 201
response_json_paths:
$.name: gabbi-gnocchi-threshold-resource-alarm
$.type: gnocchi_resources_threshold
$.user_id: c8ecb587-d38c-426f-a58d-22b8d4a9a1d3
$.project_id: 709f6ed6-bfb3-4649-b303-0019a7f6aef2
$.severity: moderate
- name: retrieve history about 'gabbi-gnocchi-threshold-resource-alarm' creation
desc: get history about alarm creation
GET: $ENVIRON['AODH_SERVICE_URL']/v2/alarms/$RESPONSE['$.alarm_id']/history
request_headers:
content-type: application/json
poll:
count: 5
delay: 2
response_json_paths:
$.`len`: 1
$.[0].type: creation
$.[0].alarm_id: $RESPONSE['$.alarm_id']
- name: update severity for alarm 'gabbi-gnocchi-threshold-resource-alarm'
desc: update severity for alarm gabbi-gnocchi-threshold-resource-alarm
PUT: $ENVIRON['AODH_SERVICE_URL']/v2/alarms/$RESPONSE['$.[0].alarm_id']
status: 200
request_headers:
content-type: application/json
data:
alarm_actions:
- "http://site:8000/gnocchi-threshold-resource"
alarm_id: null
description: An gabbilive threshold based alarm
enabled: true
insufficient_data_actions:
- "http://site:8000/nodata-gnocchi-threshold-resource"
name: "gabbi-gnocchi-threshold-resource-alarm"
ok_actions:
- "http://site:8000/ok-gnocchi-threshold-resource"
project_id: 709f6ed6-bfb3-4649-b303-0019a7f6aef2
repeat_actions: false
severity: low
state: "insufficient data"
type: gnocchi_resources_threshold
gnocchi_resources_threshold_rule:
evaluation_periods: 3
metric: "radosgw.objects.size"
resource_id: "662e46f3-8c06-430c-8a9c-adcaedd1272c"
aggregation_method: "mean"
granularity: 60
threshold: 5.0
comparison_operator: "ge"
resource_type: ceph_account
user_id: c8ecb587-d38c-426f-a58d-22b8d4a9a1d3
response_json_paths:
$.name: gabbi-gnocchi-threshold-resource-alarm
$.type: gnocchi_resources_threshold
$.user_id: c8ecb587-d38c-426f-a58d-22b8d4a9a1d3
$.project_id: 709f6ed6-bfb3-4649-b303-0019a7f6aef2
$.severity: low
$.state: "insufficient data"
- name: retrieve history for 'gabbi-gnocchi-threshold-resource-alarm'
desc: get history for rule_change
GET: $ENVIRON['AODH_SERVICE_URL']/v2/alarms/$RESPONSE['$.alarm_id']/history
request_headers:
content-type: application/json
poll:
count: 5
delay: 2
response_json_paths:
$.`len`: 2
$.[0].type: rule change
$.[0].alarm_id: $RESPONSE['$.alarm_id']
$.[0].detail: '{"severity": "low"}'
- name: update alarm state for 'gabbi-gnocchi-threshold-resource-alarm'
desc: update state for alarm
PUT: $ENVIRON['AODH_SERVICE_URL']/v2/alarms/$RESPONSE['$.[0].alarm_id']/state
request_headers:
content-type: application/json
data: '"ok"'
status: 200
response_strings:
- "ok"
- name: search 'gabbi-gnocchi-threshold-resource-alarm' alarm exist
desc: search for alarm using user_id, project_id, alarm_name
GET: $ENVIRON['AODH_SERVICE_URL']/v2/alarms
query_parameters:
q.field:
- user_id
- project_id
- name
q.op:
- eq
- eq
- eq
q.value:
- c8ecb587-d38c-426f-a58d-22b8d4a9a1d3
- 709f6ed6-bfb3-4649-b303-0019a7f6aef2
- gabbi-gnocchi-threshold-resource-alarm
poll:
count: 5
delay: 2
response_json_paths:
$.`len`: 1
- name: get info about 'gabbi-gnocchi-threshold-resource-alarm' alarm
desc: access alarm using its ID
GET: $ENVIRON['AODH_SERVICE_URL']/v2/alarms/$RESPONSE['$.[0].alarm_id']
response_json_paths:
$.alarm_id: $RESPONSE['$.[0].alarm_id']
$.alarm_actions: ["http://site:8000/gnocchi-threshold-resource"]
$.name: gabbi-gnocchi-threshold-resource-alarm
$.gnocchi_resources_threshold_rule.resource_id: "662e46f3-8c06-430c-8a9c-adcaedd1272c"
$.gnocchi_resources_threshold_rule.metric: "radosgw.objects.size"
$.gnocchi_resources_threshold_rule.resource_type: "ceph_account"
$.user_id: c8ecb587-d38c-426f-a58d-22b8d4a9a1d3
- name: get alarm state for 'gabbi-gnocchi-threshold-resource-alarm'
desc: get state for alarm
GET: $ENVIRON['AODH_SERVICE_URL']/v2/alarms/$RESPONSE['$.alarm_id']/state
request_headers:
content-type: application/json
status: 200
response_strings:
- "ok"
#
# Teardown
#
- name: CLEANUP. search 'gabbi-gnocchi-threshold-resource' alarm exist
desc: Find alarm id using user_id, project_id, alarm_name
GET: $ENVIRON['AODH_SERVICE_URL']/v2/alarms
query_parameters:
q.field:
- user_id
- project_id
- name
q.op:
- eq
- eq
- eq
q.value:
- c8ecb587-d38c-426f-a58d-22b8d4a9a1d3
- 709f6ed6-bfb3-4649-b303-0019a7f6aef2
- gabbi-gnocchi-threshold-resource-alarm
response_json_paths:
$.`len`: 1
- name: CLEANUP. delete threshold alarm 'gabbi-gnocchi-threshold-resource'
DELETE: $ENVIRON['AODH_SERVICE_URL']/v2/alarms/$RESPONSE['$.[0].alarm_id']
status: 204
- name: CLEANUP. Get resource by name '662e46f3-8c06-430c-8a9c-adcaedd1272c'
desc: retrieve resource by 662e46f3-8c06-430c-8a9c-adcaedd1272c to get its ID
GET: $ENVIRON['GNOCCHI_SERVICE_URL']/v1/resource/generic/662e46f3-8c06-430c-8a9c-adcaedd1272c
status: 200
- name: CLEANUP. delete test ceph_resource '662e46f3-8c06-430c-8a9c-adcaedd1272c'
desc: delete ceph_account resource 662e46f3-8c06-430c-8a9c-adcaedd1272c
DELETE: $ENVIRON['GNOCCHI_SERVICE_URL']/v1/resource/generic/$RESPONSE['$.id']
status: 204
- name: CLEANUP. delete resource type ceph_account
DELETE: $ENVIRON['GNOCCHI_SERVICE_URL']/v1/resource_type/ceph_account
status: 204
- name: CLEANUP. delete archive
DELETE: $ENVIRON['GNOCCHI_SERVICE_URL']/v1/archive_policy/gabbiliveceph
status: 204
xfail: True

View File

@ -1,175 +0,0 @@
defaults:
request_headers:
x-auth-token: $ENVIRON['USER_TOKEN']
tests:
- name: list alarms none
desc: Lists alarms, none yet exist
url: $ENVIRON['AODH_SERVICE_URL']/v2/alarms
method: GET
response_strings:
- "[]"
- name: list servers none
desc: List servers, none yet exists
url: $ENVIRON['NOVA_SERVICE_URL']/servers
method: GET
response_strings:
- "[]"
- name: create stack
desc: Create an autoscaling stack
url: $ENVIRON['HEAT_SERVICE_URL']/stacks
method: POST
request_headers:
content-type: application/json
data: <@create_stack.json
status: 201
- name: control stack status
desc: Checks the stack have been created successfully
url: $ENVIRON['HEAT_SERVICE_URL']/stacks/integration_test
redirects: true
method: GET
status: 200
poll:
count: 300
delay: 1
response_json_paths:
$.stack.stack_status: "CREATE_COMPLETE"
- name: list servers grow
desc: Wait the autoscaling stack grow to two servers
url: $ENVIRON['NOVA_SERVICE_URL']/servers/detail
method: GET
poll:
count: 600
delay: 1
response_json_paths:
$.servers[0].metadata.'metering.server_group': $RESPONSE['$.stack.id']
$.servers[1].metadata.'metering.server_group': $RESPONSE['$.stack.id']
$.servers[0].status: ACTIVE
$.servers[1].status: ACTIVE
$.servers.`len`: 2
- name: check gnocchi resources
desc: Check the gnocchi resources for this two servers exists
url: $ENVIRON['GNOCCHI_SERVICE_URL']/v1/resource/instance
method: GET
poll:
count: 30
delay: 1
response_strings:
- '"id": "$RESPONSE["$.servers[0].id"]"'
- '"id": "$RESPONSE["$.servers[1].id"]"'
- name: check event
desc: Check panko for new instance.create.end event
url: $ENVIRON['PANKO_SERVICE_URL']/v2/events
method: GET
request_headers:
content-type: application/json
data:
q:
- field: event_type
op: eq
type: string
value: compute.instance.create.end
- field: resource_id
op: eq
type: string
value: $HISTORY['list servers grow'].$RESPONSE['$.servers[0].id']
poll:
count: 30
delay: 1
response_json_paths:
$.`len`: 1
$[0].event_type: compute.instance.create.end
$[0].traits[?(@.name='resource_id')].value: $HISTORY['list servers grow'].$RESPONSE['$.servers[0].id']
- name: check alarm
desc: Check the aodh alarm and its state
url: $ENVIRON['AODH_SERVICE_URL']/v2/alarms
method: GET
poll:
count: 30
delay: 1
response_strings:
- "integration_test-cpu_alarm_high-"
response_json_paths:
$[0].state: alarm
- name: get stack location for update
desc: Get the stack location
url: $ENVIRON['HEAT_SERVICE_URL']/stacks/integration_test
method: GET
status: 302
- name: update stack
desc: Update an autoscaling stack
url: $LOCATION
method: PUT
request_headers:
content-type: application/json
data: <@update_stack.json
status: 202
- name: control stack status
desc: Checks the stack have been created successfully
url: $ENVIRON['HEAT_SERVICE_URL']/stacks/integration_test
redirects: true
method: GET
status: 200
poll:
count: 300
delay: 1
response_json_paths:
$.stack.stack_status: "UPDATE_COMPLETE"
- name: list servers shrink
desc: Wait the autoscaling stack shrink to one server
url: $ENVIRON['NOVA_SERVICE_URL']/servers/detail
method: GET
poll:
count: 600
delay: 1
response_json_paths:
$.servers[0].metadata.'metering.server_group': $RESPONSE['$.stack.id']
$.servers[0].status: ACTIVE
$.servers.`len`: 1
- name: get stack location
desc: Get the stack location
url: $ENVIRON['HEAT_SERVICE_URL']/stacks/integration_test
method: GET
status: 302
- name: delete stack
desc: Delete the stack
url: $LOCATION
method: DELETE
status: 204
- name: get deleted stack
desc: Check the stack have been deleted
url: $ENVIRON['HEAT_SERVICE_URL']/stacks/integration_test
redirects: true
method: GET
poll:
count: 300
delay: 1
status: 404
- name: list alarms deleted
desc: List alarms, no more exist
url: $ENVIRON['AODH_SERVICE_URL']/v2/alarms
method: GET
response_strings:
- "[]"
- name: list servers deleted
desc: List servers, no more exists
url: $ENVIRON['NOVA_SERVICE_URL']/servers
method: GET
response_strings:
- "[]"

View File

@ -1,74 +0,0 @@
{
"stack_name": "integration_test",
"template": {
"heat_template_version": "2013-05-23",
"description": "Integration Test AutoScaling with heat+ceilometer+gnocchi+aodh",
"resources": {
"asg": {
"type": "OS::Heat::AutoScalingGroup",
"properties": {
"min_size": 1,
"max_size": 2,
"resource": {
"type": "OS::Nova::Server",
"properties": {
"networks": [{ "network": "$ENVIRON['NEUTRON_NETWORK']" }],
"flavor": "$ENVIRON['NOVA_FLAVOR_REF']",
"image": "$ENVIRON['GLANCE_IMAGE_NAME']",
"metadata": {
"metering.server_group": { "get_param": "OS::stack_id" }
},
"user_data_format": "RAW",
"user_data": {"Fn::Join": ["", [
"#!/bin/sh\n",
"echo 'Loading CPU'\n",
"set -v\n",
"cat /dev/urandom > /dev/null\n"
]]}
}
}
}
},
"web_server_scaleup_policy": {
"type": "OS::Heat::ScalingPolicy",
"properties": {
"adjustment_type": "change_in_capacity",
"auto_scaling_group_id": { "get_resource": "asg" },
"cooldown": 2,
"scaling_adjustment": 1
}
},
"cpu_alarm_high": {
"type": "OS::Ceilometer::GnocchiAggregationByResourcesAlarm",
"properties": {
"description": "Scale-up if the mean CPU > 10% on 1 minute",
"metric": "cpu_util",
"aggregation_method": "mean",
"granularity": $ENVIRON["AODH_GRANULARITY"],
"evaluation_periods": 1,
"threshold": 10,
"comparison_operator": "gt",
"alarm_actions": [
{
"str_replace": {
"template": "trust+url",
"params": {
"url": { "get_attr": [ "web_server_scaleup_policy", "signal_url" ] }
}
}
}
],
"resource_type": "instance",
"query": {
"str_replace": {
"template": "{\"and\": [{\"=\": {\"server_group\": \"stack_id\"}}, {\"=\": {\"ended_at\": null}}]}",
"params": {
"stack_id": { "get_param": "OS::stack_id" }
}
}
}
}
}
}
}
}

View File

@ -1,73 +0,0 @@
{
"template": {
"heat_template_version": "2013-05-23",
"description": "Integration Test AutoScaling with heat+ceilometer+gnocchi+aodh",
"resources": {
"asg": {
"type": "OS::Heat::AutoScalingGroup",
"properties": {
"min_size": 1,
"max_size": 2,
"resource": {
"type": "OS::Nova::Server",
"properties": {
"networks": [{ "network": "$ENVIRON['NEUTRON_NETWORK']" }],
"flavor": "$ENVIRON['NOVA_FLAVOR_REF']",
"image": "$ENVIRON['GLANCE_IMAGE_NAME']",
"metadata": {
"metering.server_group": { "get_param": "OS::stack_id" }
},
"user_data_format": "RAW",
"user_data": {"Fn::Join": ["", [
"#!/bin/sh\n",
"echo 'Loading CPU'\n",
"set -v\n",
"cat /dev/urandom > /dev/null\n"
]]}
}
}
}
},
"web_server_scaledown_policy": {
"type": "OS::Heat::ScalingPolicy",
"properties": {
"adjustment_type": "change_in_capacity",
"auto_scaling_group_id": { "get_resource": "asg" },
"cooldown": 2,
"scaling_adjustment": -1
}
},
"cpu_alarm_high": {
"type": "OS::Ceilometer::GnocchiAggregationByResourcesAlarm",
"properties": {
"description": "Scale-down if the mean CPU > 10% on 1 minute",
"metric": "cpu_util",
"aggregation_method": "mean",
"granularity": $ENVIRON["AODH_GRANULARITY"],
"evaluation_periods": 1,
"threshold": 10,
"comparison_operator": "gt",
"alarm_actions": [
{
"str_replace": {
"template": "trust+url",
"params": {
"url": { "get_attr": [ "web_server_scaledown_policy", "signal_url" ] }
}
}
}
],
"resource_type": "instance",
"query": {
"str_replace": {
"template": "{\"and\": [{\"=\": {\"server_group\": \"stack_id\"}}, {\"=\": {\"ended_at\": null}}]}",
"params": {
"stack_id": { "get_param": "OS::stack_id" }
}
}
}
}
}
}
}
}

View File

@ -1,40 +0,0 @@
#
# Copyright 2015 Red Hat. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A test module to exercise the Gnocchi API with gabbi."""
import os
from gabbi import driver
TESTS_DIR = 'gabbits-live'
def load_tests(loader, tests, pattern):
"""Provide a TestSuite to the discovery process."""
NEEDED_ENV = ["AODH_SERVICE_URL", "GNOCCHI_SERVICE_URL",
"HEAT_SERVICE_URL", "NOVA_SERVICE_URL", "PANKO_SERVICE_URL",
"GLANCE_IMAGE_NAME", "ADMIN_TOKEN"]
for env_variable in NEEDED_ENV:
if not os.getenv(env_variable):
if os.getenv("GABBI_LIVE_FAIL_IF_NO_TEST"):
raise RuntimeError('%s is not set' % env_variable)
else:
return
test_dir = os.path.join(os.path.dirname(__file__), TESTS_DIR)
return driver.build_tests(test_dir, loader, host="localhost", port=8041)

View File

@ -1,64 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import exceptions as lib_exc
import tempest.test
from ceilometer.tests.tempest.aodh.service import client
CONF = config.CONF
class BaseAlarmingTest(tempest.test.BaseTestCase):
"""Base test case class for all Alarming API tests."""
credentials = ['primary']
client_manager = client.Manager
@classmethod
def skip_checks(cls):
super(BaseAlarmingTest, cls).skip_checks()
if not CONF.service_available.aodh_plugin:
raise cls.skipException("Aodh support is required")
@classmethod
def setup_clients(cls):
super(BaseAlarmingTest, cls).setup_clients()
cls.alarming_client = cls.os_primary.alarming_client
@classmethod
def resource_setup(cls):
super(BaseAlarmingTest, cls).resource_setup()
cls.alarm_ids = []
@classmethod
def create_alarm(cls, **kwargs):
body = cls.alarming_client.create_alarm(
name=data_utils.rand_name('telemetry_alarm'),
type='gnocchi_aggregation_by_metrics_threshold', **kwargs)
cls.alarm_ids.append(body['alarm_id'])
return body
@staticmethod
def cleanup_resources(method, list_of_ids):
for resource_id in list_of_ids:
try:
method(resource_id)
except lib_exc.NotFound:
pass
@classmethod
def resource_cleanup(cls):
cls.cleanup_resources(cls.alarming_client.delete_alarm, cls.alarm_ids)
super(BaseAlarmingTest, cls).resource_cleanup()

View File

@ -1,102 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
from ceilometer.tests.tempest.aodh.api import base
class TelemetryAlarmingAPITest(base.BaseAlarmingTest):
@classmethod
def resource_setup(cls):
super(TelemetryAlarmingAPITest, cls).resource_setup()
cls.rule = {'metrics': ['c0d457b6-957e-41de-a384-d5eb0957de3b'],
'comparison_operator': 'gt',
'aggregation_method': 'mean',
'threshold': 80.0,
'granularity': 70}
for i in range(2):
cls.create_alarm(
gnocchi_aggregation_by_metrics_threshold_rule=cls.rule)
@decorators.idempotent_id('1c918e06-210b-41eb-bd45-14676dd77cd7')
def test_alarm_list(self):
# List alarms
alarm_list = self.alarming_client.list_alarms()
# Verify created alarm in the list
fetched_ids = [a['alarm_id'] for a in alarm_list]
missing_alarms = [a for a in self.alarm_ids if a not in fetched_ids]
self.assertEqual(0, len(missing_alarms),
"Failed to find the following created alarm(s)"
" in a fetched list: %s" %
', '.join(str(a) for a in missing_alarms))
@decorators.idempotent_id('1297b095-39c1-4e74-8a1f-4ae998cedd68')
def test_create_update_get_delete_alarm(self):
# Create an alarm
alarm_name = data_utils.rand_name('telemetry_alarm')
body = self.alarming_client.create_alarm(
name=alarm_name, type='gnocchi_aggregation_by_metrics_threshold',
gnocchi_aggregation_by_metrics_threshold_rule=self.rule)
self.assertEqual(alarm_name, body['name'])
alarm_id = body['alarm_id']
self.assertDictContainsSubset(self.rule, body[
'gnocchi_aggregation_by_metrics_threshold_rule'])
# Update alarm with new rule and new name
new_rule = {'metrics': ['c0d457b6-957e-41de-a384-d5eb0957de3b'],
'comparison_operator': 'eq',
'aggregation_method': 'mean',
'threshold': 70.0,
'granularity': 60}
alarm_name_updated = data_utils.rand_name('telemetry-alarm-update')
body = self.alarming_client.update_alarm(
alarm_id,
gnocchi_aggregation_by_metrics_threshold_rule=new_rule,
name=alarm_name_updated,
type='gnocchi_aggregation_by_metrics_threshold')
self.assertEqual(alarm_name_updated, body['name'])
self.assertDictContainsSubset(
new_rule, body['gnocchi_aggregation_by_metrics_threshold_rule'])
# Get and verify details of an alarm after update
body = self.alarming_client.show_alarm(alarm_id)
self.assertEqual(alarm_name_updated, body['name'])
self.assertDictContainsSubset(
new_rule, body['gnocchi_aggregation_by_metrics_threshold_rule'])
# Get history for the alarm and verify the same
body = self.alarming_client.show_alarm_history(alarm_id)
self.assertEqual("rule change", body[0]['type'])
self.assertIn(alarm_name_updated, body[0]['detail'])
self.assertEqual("creation", body[1]['type'])
self.assertIn(alarm_name, body[1]['detail'])
# Delete alarm and verify if deleted
self.alarming_client.delete_alarm(alarm_id)
self.assertRaises(lib_exc.NotFound,
self.alarming_client.show_alarm, alarm_id)
@decorators.idempotent_id('aca49486-70bb-4016-87e0-f6131374f742')
def test_set_get_alarm_state(self):
alarm_states = ['ok', 'alarm', 'insufficient data']
alarm = self.create_alarm(
gnocchi_aggregation_by_metrics_threshold_rule=self.rule)
# Set alarm state and verify
new_state =\
[elem for elem in alarm_states if elem != alarm['state']][0]
state = self.alarming_client.alarm_set_state(alarm['alarm_id'],
new_state)
self.assertEqual(new_state, state.data)
# Get alarm state and verify
state = self.alarming_client.show_alarm_state(alarm['alarm_id'])
self.assertEqual(new_state, state.data)

View File

@ -1,75 +0,0 @@
# Copyright 2015 GlobalLogic. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import uuidutils
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
from ceilometer.tests.tempest.aodh.api import base
class TelemetryAlarmingNegativeTest(base.BaseAlarmingTest):
"""Negative tests for show_alarm, update_alarm, show_alarm_history tests
** show non-existent alarm
** show the deleted alarm
** delete deleted alarm
** update deleted alarm
"""
@decorators.attr(type=['negative'])
@decorators.idempotent_id('668743d5-08ad-4480-b2b8-15da34f81e7e')
def test_get_non_existent_alarm(self):
# get the non-existent alarm
non_existent_id = uuidutils.generate_uuid()
self.assertRaises(lib_exc.NotFound, self.alarming_client.show_alarm,
non_existent_id)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('ef45000d-0a72-4781-866d-4cb7bf2582ae')
def test_get_update_show_history_delete_deleted_alarm(self):
# get, update and delete the deleted alarm
alarm_name = data_utils.rand_name('telemetry_alarm')
rule = {'metrics': ["c0d457b6-957e-41de-a384-d5eb0957de3b"],
'aggregation_method': 'mean',
'comparison_operator': 'eq',
'threshold': 100.0,
'granularity': 90}
body = self.alarming_client.create_alarm(
name=alarm_name,
type='gnocchi_aggregation_by_metrics_threshold',
gnocchi_aggregation_by_metrics_threshold_rule=rule)
alarm_id = body['alarm_id']
self.alarming_client.delete_alarm(alarm_id)
# get the deleted alarm
self.assertRaises(lib_exc.NotFound, self.alarming_client.show_alarm,
alarm_id)
# update the deleted alarm
updated_alarm_name = data_utils.rand_name('telemetry_alarm_updated')
updated_rule = {'metrics': ["c0d457b6-957e-41de-a384-d5eb0957de3b"],
'comparison_operator': 'eq',
'aggregation_method': 'mean',
'threshold': 70,
'granularity': 50}
self.assertRaises(
lib_exc.NotFound, self.alarming_client.update_alarm,
alarm_id,
gnocchi_aggregation_by_metrics_threshold_rule=updated_rule,
name=updated_alarm_name,
type='gnocchi_aggregation_by_metrics_threshold')
# delete the deleted alarm
self.assertRaises(lib_exc.NotFound, self.alarming_client.delete_alarm,
alarm_id)

View File

@ -1,127 +0,0 @@
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from six.moves.urllib import parse as urllib
from tempest import config
from tempest.lib.common import rest_client
from tempest import manager
CONF = config.CONF
class AlarmingClient(rest_client.RestClient):
version = '2'
uri_prefix = "v2"
def deserialize(self, body):
return json.loads(body.replace("\n", ""))
def serialize(self, body):
return json.dumps(body)
def list_alarms(self, query=None):
uri = '%s/alarms' % self.uri_prefix
uri_dict = {}
if query:
uri_dict = {'q.field': query[0],
'q.op': query[1],
'q.value': query[2]}
if uri_dict:
uri += "?%s" % urllib.urlencode(uri_dict)
resp, body = self.get(uri)
self.expected_success(200, resp.status)
body = self.deserialize(body)
return rest_client.ResponseBodyList(resp, body)
def show_alarm(self, alarm_id):
uri = '%s/alarms/%s' % (self.uri_prefix, alarm_id)
resp, body = self.get(uri)
self.expected_success(200, resp.status)
body = self.deserialize(body)
return rest_client.ResponseBody(resp, body)
def show_alarm_history(self, alarm_id):
uri = "%s/alarms/%s/history" % (self.uri_prefix, alarm_id)
resp, body = self.get(uri)
self.expected_success(200, resp.status)
body = self.deserialize(body)
return rest_client.ResponseBodyList(resp, body)
def delete_alarm(self, alarm_id):
uri = "%s/alarms/%s" % (self.uri_prefix, alarm_id)
resp, body = self.delete(uri)
self.expected_success(204, resp.status)
if body:
body = self.deserialize(body)
return rest_client.ResponseBody(resp, body)
def create_alarm(self, **kwargs):
uri = "%s/alarms" % self.uri_prefix
body = self.serialize(kwargs)
resp, body = self.post(uri, body)
self.expected_success(201, resp.status)
body = self.deserialize(body)
return rest_client.ResponseBody(resp, body)
def update_alarm(self, alarm_id, **kwargs):
uri = "%s/alarms/%s" % (self.uri_prefix, alarm_id)
body = self.serialize(kwargs)
resp, body = self.put(uri, body)
self.expected_success(200, resp.status)
body = self.deserialize(body)
return rest_client.ResponseBody(resp, body)
def show_alarm_state(self, alarm_id):
uri = "%s/alarms/%s/state" % (self.uri_prefix, alarm_id)
resp, body = self.get(uri)
self.expected_success(200, resp.status)
body = self.deserialize(body)
return rest_client.ResponseBodyData(resp, body)
def alarm_set_state(self, alarm_id, state):
uri = "%s/alarms/%s/state" % (self.uri_prefix, alarm_id)
body = self.serialize(state)
resp, body = self.put(uri, body)
self.expected_success(200, resp.status)
body = self.deserialize(body)
return rest_client.ResponseBodyData(resp, body)
class Manager(manager.Manager):
default_params = {
'disable_ssl_certificate_validation':
CONF.identity.disable_ssl_certificate_validation,
'ca_certs': CONF.identity.ca_certificates_file,
'trace_requests': CONF.debug.trace_requests
}
alarming_params = {
'service': CONF.alarming_plugin.catalog_type,
'region': CONF.identity.region,
'endpoint_type': CONF.alarming_plugin.endpoint_type,
}
alarming_params.update(default_params)
def __init__(self, credentials=None, service=None):
super(Manager, self).__init__(credentials)
self.set_alarming_client()
def set_alarming_client(self):
self.alarming_client = AlarmingClient(self.auth_provider,
**self.alarming_params)

View File

@ -1,77 +0,0 @@
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
service_option = [cfg.BoolOpt('ceilometer',
default=True,
help="Whether or not Ceilometer is expected to"
"be available"),
cfg.BoolOpt('panko',
default=True,
help="Whether or not Panko is expected to be"
"available"),
cfg.BoolOpt("aodh_plugin",
default=True,
help="Whether or not Aodh is expected to be"
"available")]
telemetry_group = cfg.OptGroup(name='telemetry',
title='Telemetry Service Options')
event_group = cfg.OptGroup(name='event',
title='Event Service Options')
alarming_group = cfg.OptGroup(name='alarming_plugin',
title='Alarming Service Options')
TelemetryGroup = [
cfg.IntOpt('notification_wait',
default=120,
help="The seconds to wait for notifications which "
"containers and objects sent to swift."),
cfg.IntOpt('notification_sleep',
default=1,
help="The seconds to sleep after an unsuccessful "
"notification received."),
cfg.IntOpt('alarm_granularity',
default=300,
help="Granularity to use for aodh alarms. This must match the "
"configured Gnocchi archive policy")
]
event_opts = [
cfg.StrOpt('catalog_type',
default='event',
help="Catalog type of the Event service."),
cfg.StrOpt('endpoint_type',
default='publicURL',
choices=['public', 'admin', 'internal',
'publicURL', 'adminURL', 'internalURL'],
help="The endpoint type to use for the event service."),
]
AlarmingGroup = [
cfg.StrOpt('catalog_type',
default='alarming',
help="Catalog type of the Alarming service."),
cfg.StrOpt('endpoint_type',
default='publicURL',
choices=['public', 'admin', 'internal',
'publicURL', 'adminURL', 'internalURL'],
help="The endpoint type to use for the alarming service."),
]

View File

@ -1,169 +0,0 @@
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
class TempestException(Exception):
"""Base Tempest Exception
To correctly use this class, inherit from it and define
a 'message' property. That message will get printf'd
with the keyword arguments provided to the constructor.
"""
message = "An unknown exception occurred"
def __init__(self, *args, **kwargs):
super(TempestException, self).__init__()
try:
self._error_string = self.message % kwargs
except Exception:
# at least get the core message out if something happened
self._error_string = self.message
if len(args) > 0:
# If there is a non-kwarg parameter, assume it's the error
# message or reason description and tack it on to the end
# of the exception message
# Convert all arguments into their string representations...
args = ["%s" % arg for arg in args]
self._error_string = (self._error_string +
"\nDetails: %s" % '\n'.join(args))
def __str__(self):
return self._error_string
class RestClientException(TempestException,
testtools.TestCase.failureException):
pass
class InvalidConfiguration(TempestException):
message = "Invalid Configuration"
class InvalidCredentials(TempestException):
message = "Invalid Credentials"
class InvalidServiceTag(TempestException):
message = "Invalid service tag"
class InvalidIdentityVersion(TempestException):
message = "Invalid version %(identity_version)s of the identity service"
class TimeoutException(TempestException):
message = "Request timed out"
class BuildErrorException(TempestException):
message = "Server %(server_id)s failed to build and is in ERROR status"
class ImageKilledException(TempestException):
message = "Image %(image_id)s 'killed' while waiting for '%(status)s'"
class AddImageException(TempestException):
message = "Image %(image_id)s failed to become ACTIVE in the allotted time"
class VolumeBuildErrorException(TempestException):
message = "Volume %(volume_id)s failed to build and is in ERROR status"
class VolumeRestoreErrorException(TempestException):
message = "Volume %(volume_id)s failed to restore and is in ERROR status"
class SnapshotBuildErrorException(TempestException):
message = "Snapshot %(snapshot_id)s failed to build and is in ERROR status"
class VolumeBackupException(TempestException):
message = "Volume backup %(backup_id)s failed and is in ERROR status"
class StackBuildErrorException(TempestException):
message = ("Stack %(stack_identifier)s is in %(stack_status)s status "
"due to '%(stack_status_reason)s'")
class EndpointNotFound(TempestException):
message = "Endpoint not found"
class IdentityError(TempestException):
message = "Got identity error"
class ServerUnreachable(TempestException):
message = "The server is not reachable via the configured network"
# NOTE(andreaf) This exception is added here to facilitate the migration
# of get_network_from_name and preprov_creds to tempest.lib, and it should
# be migrated along with them
class InvalidTestResource(TempestException):
message = "%(name)s is not a valid %(type)s, or the name is ambiguous"
class RFCViolation(RestClientException):
message = "RFC Violation"
class InvalidHttpSuccessCode(RestClientException):
message = "The success code is different than the expected one"
class BadRequest(RestClientException):
message = "Bad request"
class ResponseWithNonEmptyBody(RFCViolation):
message = ("RFC Violation! Response with %(status)d HTTP Status Code "
"MUST NOT have a body")
class ResponseWithEntity(RFCViolation):
message = ("RFC Violation! Response with 205 HTTP Status Code "
"MUST NOT have an entity")
class InvalidHTTPResponseHeader(RestClientException):
message = "HTTP response header is invalid"
class InvalidStructure(TempestException):
message = "Invalid structure of table with details"
class CommandFailed(Exception):
def __init__(self, returncode, cmd, output, stderr):
super(CommandFailed, self).__init__()
self.returncode = returncode
self.cmd = cmd
self.stdout = output
self.stderr = stderr
def __str__(self):
return ("Command '%s' returned non-zero exit status %d.\n"
"stdout:\n%s\n"
"stderr:\n%s" % (self.cmd,
self.returncode,
self.stdout,
self.stderr))

View File

@ -1,56 +0,0 @@
#
# Copyright 2015 NEC Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from tempest import config
from tempest.test_discover import plugins
import ceilometer
from ceilometer.tests.tempest import config as tempest_config
class CeilometerTempestPlugin(plugins.TempestPlugin):
def load_tests(self):
base_path = os.path.split(os.path.dirname(
os.path.abspath(ceilometer.__file__)))[0]
test_dir = "ceilometer/tests/tempest"
full_test_dir = os.path.join(base_path, test_dir)
return full_test_dir, base_path
def register_opts(self, conf):
config.register_opt_group(
conf, config.service_available_group,
tempest_config.service_option)
config.register_opt_group(
conf, tempest_config.telemetry_group,
tempest_config.TelemetryGroup)
config.register_opt_group(
conf, tempest_config.event_group,
tempest_config.event_opts)
config.register_opt_group(
conf, tempest_config.alarming_group,
tempest_config.AlarmingGroup)
def get_opt_lists(self):
return [(tempest_config.telemetry_group.name,
tempest_config.TelemetryGroup),
(tempest_config.event_group.name,
tempest_config.event_opts),
(config.service_available_group.name,
tempest_config.service_option),
(tempest_config.alarming_group.name,
tempest_config.AlarmingGroup)]

View File

@ -1,143 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import unittest
from gabbi import runner
from gabbi import suitemaker
from gabbi import utils
from tempest import config
from tempest.scenario import manager
TEST_DIR = os.path.join(os.path.dirname(__file__), '..', '..',
'integration', 'gabbi', 'gabbits-live')
class TestTelemetryIntegration(manager.ScenarioTest):
credentials = ['admin', 'primary']
TIMEOUT_SCALING_FACTOR = 5
@classmethod
def skip_checks(cls):
super(TestTelemetryIntegration, cls).skip_checks()
for name in ["aodh_plugin", "gnocchi", "nova", "heat", "panko",
"ceilometer", "glance"]:
cls._check_service(name)
@classmethod
def _check_service(cls, name):
if not getattr(config.CONF.service_available, name, False):
raise cls.skipException("%s support is required" %
name.capitalize())
@staticmethod
def _get_endpoint(auth, service):
opt_section = getattr(config.CONF, service)
endpoint_type = opt_section.endpoint_type
is_keystone_v3 = 'catalog' in auth[1]
if is_keystone_v3:
if endpoint_type.endswith("URL"):
endpoint_type = endpoint_type[:-3]
catalog = auth[1]['catalog']
endpoints = [e['endpoints'] for e in catalog
if e['type'] == opt_section.catalog_type]
if not endpoints:
raise Exception("%s endpoint not found" %
opt_section.catalog_type)
endpoints = [e['url'] for e in endpoints[0]
if e['interface'] == endpoint_type]
if not endpoints:
raise Exception("%s interface not found for endpoint %s" %
(endpoint_type,
opt_section.catalog_type))
return endpoints[0]
else:
if not endpoint_type.endswith("URL"):
endpoint_type += "URL"
catalog = auth[1]['serviceCatalog']
endpoints = [e for e in catalog
if e['type'] == opt_section.catalog_type]
if not endpoints:
raise Exception("%s endpoint not found" %
opt_section.catalog_type)
return endpoints[0]['endpoints'][0][endpoint_type]
def _do_test(self, filename):
admin_auth = self.os_admin.auth_provider.get_auth()
auth = self.os_primary.auth_provider.get_auth()
networks = self.os_primary.networks_client.list_networks(
**{'router:external': False, 'fields': 'id'})['networks']
os.environ.update({
"ADMIN_TOKEN": admin_auth[0],
"USER_TOKEN": auth[0],
"AODH_GRANULARITY": str(config.CONF.telemetry.alarm_granularity),
"AODH_SERVICE_URL": self._get_endpoint(auth, "alarming_plugin"),
"GNOCCHI_SERVICE_URL": self._get_endpoint(auth, "metric"),
"PANKO_SERVICE_URL": self._get_endpoint(auth, "event"),
"HEAT_SERVICE_URL": self._get_endpoint(auth, "orchestration"),
"NOVA_SERVICE_URL": self._get_endpoint(auth, "compute"),
"GLANCE_SERVICE_URL": self._get_endpoint(auth, "image"),
"GLANCE_IMAGE_NAME": self.glance_image_create(),
"NOVA_FLAVOR_REF": config.CONF.compute.flavor_ref,
"NEUTRON_NETWORK": networks[0].get('id'),
})
with open(os.path.join(TEST_DIR, filename)) as f:
test_suite = suitemaker.test_suite_from_dict(
loader=unittest.defaultTestLoader,
test_base_name="gabbi",
suite_dict=utils.load_yaml(f),
test_directory=TEST_DIR,
host=None, port=None,
fixture_module=None,
intercept=None,
handlers=runner.initialize_handlers([]),
test_loader_name="tempest")
# NOTE(sileht): We hide stdout/stderr and reraise the failure
# manually, tempest will print it itself.
with open(os.devnull, 'w') as stream:
result = unittest.TextTestRunner(
stream=stream, verbosity=0, failfast=True,
).run(test_suite)
if not result.wasSuccessful():
failures = (result.errors + result.failures +
result.unexpectedSuccesses)
if failures:
test, bt = failures[0]
name = test.test_data.get('name', test.id())
msg = 'From test "%s" :\n%s' % (name, bt)
self.fail(msg)
self.assertTrue(result.wasSuccessful())
def test_maker(name, filename):
def test(self):
self._do_test(filename)
test.__name__ = name
return test
# Create one scenario per yaml file
for filename in os.listdir(TEST_DIR):
if not filename.endswith('.yaml'):
continue
name = "test_%s" % filename[:-5].lower().replace("-", "_")
setattr(TestTelemetryIntegration, name,
test_maker(name, filename))

View File

@ -275,9 +275,6 @@ oslo.config.opts =
ceilometer = ceilometer.opts:list_opts
ceilometer-auth = ceilometer.opts:list_keystoneauth_opts
tempest.test_plugins =
ceilometer_tests = ceilometer.tests.tempest.plugin:CeilometerTempestPlugin
[build_sphinx]
all_files = 1
build-dir = doc/build

View File

@ -19,5 +19,4 @@ testtools>=1.4.0 # MIT
gabbi>=1.30.0 # Apache-2.0
requests-aws>=0.1.4 # BSD License (3 clause)
os-testr>=0.4.1 # Apache-2.0
tempest>=14.0.0 # Apache-2.0
kafka-python>=1.3.2 # Apache-2.0