Add service cleanup command

Cinder volume services will perform cleanup on start, but when we have
multiple volume services grouped in a cluster, we may want to trigger
cleanup of services that are down.

This patch adds command `work-cleanup` to trigger server cleanups and
prints service nodes that will be cleaned and those that didn't have an
alternative service in the cluster to do the cleanup.

This command will only work on servers supporting API version 3.24 or
higher.

New command:

cinder work-cleanup [--cluster <cluster-name>] [--host <hostname>]
                    [--binary <binary>]
                    [--is-up <True|true|False|false>]
                    [--disabled <True|true|False|false>]
                    [--resource-id <resource-id>]
                    [--resource-type <Volume|Snapshot>]

Specs: https://specs.openstack.org/openstack/cinder-specs/specs/newton/ha-aa-cleanup.html

Change-Id: I1c33ffbffcb14f34ee2bda9042e706937b1147d7
Depends-On: If336b6569b171846954ed6eb73f5a4314c6c7e2e
Implements: blueprint cinder-volume-active-active-support
This commit is contained in:
Gorka Eguileor 2016-08-26 13:56:18 +02:00
parent 1a4176ad87
commit 45eb51eb99
6 changed files with 129 additions and 0 deletions

View File

@ -563,6 +563,17 @@ class FakeHTTPClient(fake_v2.FakeHTTPClient):
}
}
def post_workers_cleanup(self, **kw):
response = {
'cleaning': [{'id': '1', 'cluster_name': 'cluster1',
'host': 'host1', 'binary': 'binary'},
{'id': '3', 'cluster_name': 'cluster1',
'host': 'host3', 'binary': 'binary'}],
'unavailable': [{'id': '2', 'cluster_name': 'cluster2',
'host': 'host2', 'binary': 'binary'}],
}
return 200, {}, response
#
# resource filters
#

View File

@ -1168,3 +1168,23 @@ class ShellTest(utils.TestCase):
'--name foo --description bar --bootable '
'--volume-type baz --availability-zone az '
'--metadata k1=v1 k2=v2')
def test_worker_cleanup_before_3_24(self):
self.assertRaises(SystemExit,
self.run_command,
'work-cleanup fakehost')
def test_worker_cleanup(self):
self.run_command('--os-volume-api-version 3.24 '
'work-cleanup --cluster clustername --host hostname '
'--binary binaryname --is-up false --disabled true '
'--resource-id uuid --resource-type Volume')
expected = {'cluster_name': 'clustername',
'host': 'hostname',
'binary': 'binaryname',
'is_up': 'false',
'disabled': 'true',
'resource_id': 'uuid',
'resource_type': 'Volume'}
self.assert_called('POST', '/workers/cleanup', body=expected)

View File

@ -42,6 +42,7 @@ from cinderclient.v3 import volume_transfers
from cinderclient.v3 import volume_type_access
from cinderclient.v3 import volume_types
from cinderclient.v3 import volumes
from cinderclient.v3 import workers
class Client(object):
@ -91,6 +92,7 @@ class Client(object):
self.transfers = volume_transfers.VolumeTransferManager(self)
self.services = services.ServiceManager(self)
self.clusters = clusters.ClusterManager(self)
self.workers = workers.WorkerManager(self)
self.consistencygroups = consistencygroups.\
ConsistencygroupManager(self)
self.groups = groups.GroupManager(self)

View File

@ -1011,6 +1011,52 @@ def do_cluster_disable(cs, args):
utils.print_dict(cluster.to_dict())
@api_versions.wraps('3.24')
@utils.arg('--cluster', metavar='<cluster-name>', default=None,
help='Cluster name. Default=None.')
@utils.arg('--host', metavar='<hostname>', default=None,
help='Service host name. Default=None.')
@utils.arg('--binary', metavar='<binary>', default=None,
help='Service binary. Default=None.')
@utils.arg('--is-up', metavar='<True|true|False|false>', dest='is_up',
default=None, choices=('True', 'true', 'False', 'false'),
help='Filter by up/down status, if set to true services need to be'
' up, if set to false services need to be down. Default is '
'None, which means up/down status is ignored.')
@utils.arg('--disabled', metavar='<True|true|False|false>', default=None,
choices=('True', 'true', 'False', 'false'),
help='Filter by disabled status. Default=None.')
@utils.arg('--resource-id', metavar='<resource-id>', default=None,
help='UUID of a resource to cleanup. Default=None.')
@utils.arg('--resource-type', metavar='<Volume|Snapshot>', default=None,
choices=('Volume', 'Snapshot'),
help='Type of resource to cleanup.')
def do_work_cleanup(cs, args):
"""Request cleanup of services with optional filtering."""
filters = dict(cluster_name=args.cluster, host=args.host,
binary=args.binary, is_up=args.is_up,
disabled=args.disabled, resource_id=args.resource_id,
resource_type=args.resource_type)
filters = {k: v for k, v in filters.items() if v is not None}
cleaning, unavailable = cs.workers.clean(**filters)
columns = ('ID', 'Cluster Name', 'Host', 'Binary')
if cleaning:
print('Following services will be cleaned:')
utils.print_list(cleaning, columns)
if unavailable:
print('There are no alternative nodes to do cleanup for the following '
'services:')
utils.print_list(unavailable, columns)
if not (cleaning or unavailable):
print('No cleanable services matched cleanup criteria.')
@utils.arg('host',
metavar='<host>',
help='Cinder host on which the existing volume resides; '

View File

@ -0,0 +1,44 @@
# Copyright (c) 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Interface to workers API
"""
from cinderclient.apiclient import base as common_base
from cinderclient import base
class Service(base.Resource):
def __repr__(self):
return "<Service (%s): %s in cluster %s>" % (self.id, self.host,
self.cluster_name or '-')
@classmethod
def list_factory(cls, mngr, elements):
return [cls(mngr, element, loaded=True) for element in elements]
class WorkerManager(base.Manager):
base_url = '/workers'
def clean(self, **filters):
url = self.base_url + '/cleanup'
resp, body = self.api.client.post(url, body=filters)
cleaning = Service.list_factory(self, body['cleaning'])
unavailable = Service.list_factory(self, body['unavailable'])
result = common_base.TupleWithMeta((cleaning, unavailable), resp)
return result

View File

@ -0,0 +1,6 @@
---
features:
- |
New ``work-cleanup`` command to trigger server cleanups by other nodes
within a cluster on Active-Active deployments on microversion 3.24 and
higher.