Add ClusterMetadata documentation

This commit is contained in:
Dana Powers 2017-03-07 11:33:23 -08:00
parent 1810816b00
commit a4338169d4
5 changed files with 34 additions and 0 deletions

View File

@ -0,0 +1,5 @@
ClusterMetadata
===========
.. autoclass:: kafka.cluster.ClusterMetadata
:members:

View File

@ -6,6 +6,7 @@ Subpackages
.. toctree::
kafka.cluster
kafka.consumer
kafka.partitioner
kafka.producer
@ -13,6 +14,15 @@ Subpackages
Submodules
----------
kafka.cluster module
--------------------
.. automodule:: kafka.cluster
:members:
:undoc-members:
:show-inheritance:
kafka.client module
-------------------

View File

@ -7,3 +7,4 @@ kafka-python API
KafkaProducer
KafkaClient
BrokerConnection
ClusterMetadata

View File

@ -48,6 +48,10 @@ class KafkaClient(object):
This class is not thread-safe!
Attributes:
cluster (:any:`ClusterMetadata`): Local cache of cluster metadata, retrived
via MetadataRequests during :meth:`.poll`.
Keyword Arguments:
bootstrap_servers: 'host[:port]' string (or list of 'host[:port]'
strings) that the consumer should contact to bootstrap initial

View File

@ -16,6 +16,20 @@ log = logging.getLogger(__name__)
class ClusterMetadata(object):
"""
A class to manage kafka cluster metadata.
This class does not perform any IO. It simply updates internal state
given API responses (MetadataResponse, GroupCoordinatorResponse).
Keyword Arguments:
retry_backoff_ms (int): Milliseconds to backoff when retrying on
errors. Default: 100.
metadata_max_age_ms (int): The period of time in milliseconds after
which we force a refresh of metadata even if we haven't seen any
partition leadership changes to proactively discover any new
brokers or partitions. Default: 300000
"""
DEFAULT_CONFIG = {
'retry_backoff_ms': 100,
'metadata_max_age_ms': 300000,