Changes after running the hacking open stack code checks, except line length.
This commit is contained in:
parent
776f18b7a7
commit
06d7913248
|
@ -6,32 +6,31 @@
|
|||
|
||||
import logging
|
||||
import logging.config
|
||||
from multiprocessing import Process, Queue
|
||||
import multiprocessing
|
||||
import signal
|
||||
from state_tracker import ZookeeperStateTracker
|
||||
import sys
|
||||
import yaml
|
||||
|
||||
from state_tracker import ZookeeperStateTracker
|
||||
from processors.kafka_consumer import KafkaConsumer
|
||||
from processors.alarm_processor import AlarmProcessor
|
||||
from processors.kafka_consumer import KafkaConsumer
|
||||
from processors.notification_processor import NotificationProcessor
|
||||
from processors.sent_notification_processor import SentNotificationProcessor
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
processors = [] # global list to facilitate clean signal handling
|
||||
|
||||
|
||||
def clean_exit(signum, frame=None):
|
||||
""" Exit all processes cleanly
|
||||
Can be called on an os signal or no zookeeper losing connection.
|
||||
"""Exit all processes cleanly
|
||||
Can be called on an os signal or no zookeeper losing connection.
|
||||
"""
|
||||
for process in processors:
|
||||
# Since this is set up as a handler for SIGCHLD when this kills one child it gets another signal, the result
|
||||
# everything comes crashing down with some exceptions thrown for already dead processes
|
||||
try:
|
||||
process.terminate()
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
sys.exit(0)
|
||||
|
@ -43,8 +42,8 @@ def main(argv=None):
|
|||
if len(argv) == 2:
|
||||
config_file = argv[1]
|
||||
elif len(argv) > 2:
|
||||
print "Usage: " + argv[0] + " <config_file>"
|
||||
print "Config file defaults to /etc/mon/notification.yaml"
|
||||
print("Usage: " + argv[0] + " <config_file>")
|
||||
print("Config file defaults to /etc/mon/notification.yaml")
|
||||
return 1
|
||||
else:
|
||||
config_file = '/etc/mon/notification.yaml'
|
||||
|
@ -55,10 +54,10 @@ def main(argv=None):
|
|||
logging.config.dictConfig(config['logging'])
|
||||
|
||||
#Create the queues
|
||||
alarms = Queue(config['queues']['alarms_size'])
|
||||
notifications = Queue(config['queues']['notifications_size']) # data is a list of notification objects
|
||||
sent_notifications = Queue(config['queues']['sent_notifications_size']) # data is a list of notification objects
|
||||
finished = Queue(config['queues']['finished_size']) # Data is of the form (partition, offset)
|
||||
alarms = multiprocessing.Queue(config['queues']['alarms_size'])
|
||||
notifications = multiprocessing.Queue(config['queues']['notifications_size']) # [notification_object, ]
|
||||
sent_notifications = multiprocessing.Queue(config['queues']['sent_notifications_size']) # [notification_object, ]
|
||||
finished = multiprocessing.Queue(config['queues']['finished_size']) # Data is of the form (partition, offset)
|
||||
|
||||
#State Tracker - Used for tracking the progress of fully processed alarms and the zookeeper lock
|
||||
tracker = ZookeeperStateTracker(config['zookeeper']['url'], config['kafka']['alarm_topic'], finished)
|
||||
|
@ -66,7 +65,7 @@ def main(argv=None):
|
|||
|
||||
## Define processors
|
||||
#KafkaConsumer
|
||||
kafka = Process(
|
||||
kafka = multiprocessing.Process(
|
||||
target=KafkaConsumer(
|
||||
alarms,
|
||||
config['kafka']['url'],
|
||||
|
@ -79,8 +78,8 @@ def main(argv=None):
|
|||
|
||||
#AlarmProcessors
|
||||
alarm_processors = []
|
||||
for i in xrange(config['processors']['alarm']['number']):
|
||||
alarm_processors.append(Process(
|
||||
for i in range(config['processors']['alarm']['number']):
|
||||
alarm_processors.append(multiprocessing.Process(
|
||||
target=AlarmProcessor(
|
||||
alarms,
|
||||
notifications,
|
||||
|
@ -95,8 +94,8 @@ def main(argv=None):
|
|||
|
||||
#NotificationProcessors
|
||||
notification_processors = []
|
||||
for i in xrange(config['processors']['notification']['number']):
|
||||
notification_processors.append(Process(
|
||||
for i in range(config['processors']['notification']['number']):
|
||||
notification_processors.append(multiprocessing.Process(
|
||||
target=NotificationProcessor(
|
||||
notifications,
|
||||
sent_notifications,
|
||||
|
@ -107,7 +106,7 @@ def main(argv=None):
|
|||
processors.extend(notification_processors)
|
||||
|
||||
#SentNotificationProcessor
|
||||
sent_notification_processor = Process(
|
||||
sent_notification_processor = multiprocessing.Process(
|
||||
target=SentNotificationProcessor(
|
||||
sent_notifications,
|
||||
finished,
|
||||
|
@ -125,7 +124,7 @@ def main(argv=None):
|
|||
for process in processors:
|
||||
process.start()
|
||||
tracker.run() # Runs in the main process
|
||||
except:
|
||||
except Exception:
|
||||
log.exception('Error exiting!')
|
||||
for process in processors:
|
||||
process.terminate()
|
||||
|
|
|
@ -2,7 +2,7 @@ import json
|
|||
|
||||
|
||||
class Notification(object):
|
||||
""" An abstract base class used to define the notification interface and common functions
|
||||
"""An abstract base class used to define the notification interface and common functions
|
||||
"""
|
||||
__slots__ = (
|
||||
'address',
|
||||
|
@ -20,13 +20,13 @@ class Notification(object):
|
|||
)
|
||||
|
||||
def __init__(self, ntype, src_partition, src_offset, name, address, alarm):
|
||||
""" Setup the notification object
|
||||
The src_partition and src_offset allow the notification to be linked to the alarm that it came from
|
||||
ntype - The notification type
|
||||
name - Name used in sending
|
||||
address - to send the notification to
|
||||
alarm_data - info that caused the notification
|
||||
notifications that come after this one to remain uncommitted.
|
||||
"""Setup the notification object
|
||||
The src_partition and src_offset allow the notification to be linked to the alarm that it came from
|
||||
ntype - The notification type
|
||||
name - Name used in sending
|
||||
address - to send the notification to
|
||||
alarm_data - info that caused the notification
|
||||
notifications that come after this one to remain uncommitted.
|
||||
"""
|
||||
self.address = address
|
||||
self.name = name
|
||||
|
@ -44,7 +44,7 @@ class Notification(object):
|
|||
self.notification_timestamp = None # to be updated on actual notification send time
|
||||
|
||||
def to_json(self):
|
||||
""" Return json representation
|
||||
"""Return json representation
|
||||
"""
|
||||
notification_fields = [
|
||||
'address',
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
|
||||
class NotificationException(Exception):
|
||||
""" Base exception class """
|
||||
"""Base exception class.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@ log = logging.getLogger(__name__)
|
|||
class BaseProcessor(object):
|
||||
@staticmethod
|
||||
def _add_to_queue(queue, queue_name, msg):
|
||||
""" Warns on full queue then does a blocking push to the queue.
|
||||
"""Warns on full queue then does a blocking push to the queue.
|
||||
"""
|
||||
if queue.full():
|
||||
log.warn('Queue %s is full, publishing is blocked' % queue_name)
|
||||
|
|
|
@ -2,9 +2,9 @@ import json
|
|||
import logging
|
||||
import MySQLdb
|
||||
|
||||
from . import BaseProcessor
|
||||
from ..notification_exceptions import AlarmFormatError
|
||||
from ..notification import Notification
|
||||
from mon_notification.processors import BaseProcessor
|
||||
from mon_notification.notification import Notification
|
||||
from mon_notification.notification_exceptions import AlarmFormatError
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
@ -20,7 +20,7 @@ class AlarmProcessor(BaseProcessor):
|
|||
|
||||
@staticmethod
|
||||
def _parse_alarm(alarm_data):
|
||||
""" Parse the alarm message making sure it matches the expected format.
|
||||
"""Parse the alarm message making sure it matches the expected format.
|
||||
"""
|
||||
expected_fields = [
|
||||
'alarmId',
|
||||
|
@ -43,8 +43,8 @@ class AlarmProcessor(BaseProcessor):
|
|||
return alarm
|
||||
|
||||
def run(self):
|
||||
""" Check the notification setting for this project in mysql then create the appropriate notification or
|
||||
add to the finished_queue
|
||||
"""Check the notification setting for this project in mysql then create the appropriate notification or
|
||||
add to the finished_queue
|
||||
"""
|
||||
cur = self.mysql.cursor()
|
||||
while True:
|
||||
|
@ -53,7 +53,7 @@ class AlarmProcessor(BaseProcessor):
|
|||
offset = raw_alarm[1].offset
|
||||
try:
|
||||
alarm = self._parse_alarm(raw_alarm[1].message.value)
|
||||
except Exception, e: # This is general because of a lack of json exception base class
|
||||
except Exception as e: # This is general because of a lack of json exception base class
|
||||
log.error("Invalid Alarm format skipping partition %d, offset %d\nErrror%s" % (partition, offset, e))
|
||||
self._add_to_queue(self.finished_queue, 'finished', (partition, offset))
|
||||
continue
|
||||
|
@ -81,4 +81,3 @@ class AlarmProcessor(BaseProcessor):
|
|||
self._add_to_queue(self.finished_queue, 'finished', (partition, offset))
|
||||
else:
|
||||
self._add_to_queue(self.notification_queue, 'notifications', notifications)
|
||||
|
||||
|
|
|
@ -1,30 +1,30 @@
|
|||
import kafka.client
|
||||
import kafka.consumer
|
||||
import logging
|
||||
|
||||
from . import BaseProcessor
|
||||
from kafka.client import KafkaClient
|
||||
from kafka.consumer import SimpleConsumer
|
||||
from mon_notification.processors import BaseProcessor
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class KafkaConsumer(BaseProcessor):
|
||||
""" Pull from the alarm topic and place alarm objects on the sent_queue.
|
||||
No commit is being done until processing is finished and as the processing can take some time it is done in
|
||||
another step.
|
||||
"""Pull from the alarm topic and place alarm objects on the sent_queue.
|
||||
No commit is being done until processing is finished and as the processing can take some time it is done in
|
||||
another step.
|
||||
|
||||
Unfortunately at this point the python-kafka client does not handle multiple consumers seamlessly.
|
||||
For more information see, https://github.com/mumrah/kafka-python/issues/112
|
||||
Unfortunately at this point the python-kafka client does not handle multiple consumers seamlessly.
|
||||
For more information see, https://github.com/mumrah/kafka-python/issues/112
|
||||
"""
|
||||
def __init__(self, queue, kafka_url, group, topic, initial_offsets=None):
|
||||
"""
|
||||
kafka_url, group, topic - kafka connection details
|
||||
sent_queue - a sent_queue to publish log entries to
|
||||
"""Init
|
||||
kafka_url, group, topic - kafka connection details
|
||||
sent_queue - a sent_queue to publish log entries to
|
||||
"""
|
||||
self.queue = queue
|
||||
|
||||
self.kafka = KafkaClient(kafka_url)
|
||||
self.kafka = kafka.client.KafkaClient(kafka_url)
|
||||
# No autocommit, it does not work with kafka 0.8.0 - see https://github.com/mumrah/kafka-python/issues/118
|
||||
self.consumer = SimpleConsumer(self.kafka, group, topic, auto_commit=False)
|
||||
self.consumer = kafka.consumer.SimpleConsumer(self.kafka, group, topic, auto_commit=False)
|
||||
self.consumer.provide_partition_info() # Without this the partition is not provided in the response
|
||||
if initial_offsets is not None:
|
||||
# Set initial offsets directly in the consumer, there is no method for this so I have to do it here
|
||||
|
@ -34,7 +34,7 @@ class KafkaConsumer(BaseProcessor):
|
|||
self.consumer.fetch_offsets[partition] = initial_offsets[partition] + 1
|
||||
|
||||
def run(self):
|
||||
""" Consume from kafka and place alarm objects on the sent_queue
|
||||
"""Consume from kafka and place alarm objects on the sent_queue
|
||||
"""
|
||||
for message in self.consumer:
|
||||
log.debug("Consuming message from kafka, partition %d, offset %d" % (message[0], message[1].offset))
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
from email.mime.text import MIMEText
|
||||
import email.mime.text
|
||||
import logging
|
||||
import smtplib
|
||||
import time
|
||||
|
||||
from . import BaseProcessor
|
||||
from mon_notification.processors import BaseProcessor
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
@ -23,36 +23,37 @@ class NotificationProcessor(BaseProcessor):
|
|||
self.notification_types = {'email': self._send_email}
|
||||
|
||||
def _send_email(self, notification):
|
||||
""" Send the notification via email
|
||||
Returns the notification upon success, None upon failure
|
||||
"""Send the notification via email
|
||||
Returns the notification upon success, None upon failure
|
||||
"""
|
||||
msg = email.mime.text.MIMEText("%s\nAlarm %s transitioned to the %s state at %s\nFull Data:\n%s"
|
||||
% (notification.message,
|
||||
notification.alarm_name,
|
||||
notification.state,
|
||||
notification.alarm_timestamp,
|
||||
notification.to_json()))
|
||||
msg['Subject'] = '%s: %s' % (notification.state, notification.alarm_name)
|
||||
msg['From'] = self.email_config['from_addr']
|
||||
msg['To'] = notification.address
|
||||
|
||||
try:
|
||||
msg = MIMEText("%s\nAlarm %s transitioned to the %s state at %s\nFull Data:\n%s"
|
||||
% (notification.message,
|
||||
notification.alarm_name,
|
||||
notification.state,
|
||||
notification.alarm_timestamp,
|
||||
notification.to_json()))
|
||||
msg['Subject'] = '%s: %s' % (notification.state, notification.alarm_name)
|
||||
msg['From'] = self.email_config['from_addr']
|
||||
msg['To'] = notification.address
|
||||
self.smtp.sendmail(self.email_config['from_addr'], notification.address, msg.as_string())
|
||||
log.debug('Sent email to %s, notification %s' % (self.email_config['from_addr'], notification.to_json()))
|
||||
except smtplib.SMTPServerDisconnected, e:
|
||||
log.debug('Sent email to %s, notification %s' % (notification.address, notification.to_json()))
|
||||
except smtplib.SMTPServerDisconnected as e:
|
||||
log.debug('SMTP server disconnected. Will reconnect and retry message.')
|
||||
self._smtp_connect()
|
||||
try:
|
||||
self.smtp.sendmail(self.email_config['from_addr'], notification.address, msg.as_string())
|
||||
log.debug('Sent email to %s, notification %s' % (self.email_config['from_addr'], notification.to_json()))
|
||||
except smtplib.SMTPException, e:
|
||||
log.debug('Sent email to %s, notification %s' % (notification.address, notification.to_json()))
|
||||
except smtplib.SMTPException as e:
|
||||
log.error("Error sending Email Notification:%s\nError:%s" % (notification.to_json(), e))
|
||||
except smtplib.SMTPException, e:
|
||||
except smtplib.SMTPException as e:
|
||||
log.error("Error sending Email Notification:%s\nError:%s" % (notification.to_json(), e))
|
||||
else:
|
||||
return notification
|
||||
|
||||
def _smtp_connect(self):
|
||||
""" Connect to the smtp server
|
||||
"""Connect to the smtp server
|
||||
"""
|
||||
log.info('Connecting to Email Server %s' % self.email_config['server'])
|
||||
smtp = smtplib.SMTP(
|
||||
|
@ -64,9 +65,9 @@ class NotificationProcessor(BaseProcessor):
|
|||
self.smtp = smtp
|
||||
|
||||
def run(self):
|
||||
""" Send the notifications
|
||||
For each notification in a message it is sent according to its type.
|
||||
If all notifications fail the alarm partition/offset are added to the the finished queue
|
||||
"""Send the notifications
|
||||
For each notification in a message it is sent according to its type.
|
||||
If all notifications fail the alarm partition/offset are added to the the finished queue
|
||||
"""
|
||||
while True:
|
||||
notifications = self.notification_queue.get()
|
||||
|
|
|
@ -1,40 +1,40 @@
|
|||
import kafka.client
|
||||
import kafka.producer
|
||||
import logging
|
||||
|
||||
from . import BaseProcessor
|
||||
from kafka.client import KafkaClient
|
||||
from kafka.producer import SimpleProducer
|
||||
from mon_notification.processors import BaseProcessor
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SentNotificationProcessor(BaseProcessor):
|
||||
""" Processes notifications which have been sent
|
||||
This involves adding them into a kafka topic for persisting by another process and adding the alarm
|
||||
to the finished queue.
|
||||
"""Processes notifications which have been sent
|
||||
This involves adding them into a kafka topic for persisting by another process and adding the alarm
|
||||
to the finished queue.
|
||||
"""
|
||||
|
||||
def __init__(self, sent_queue, finished_queue, url, topic):
|
||||
"""
|
||||
url, group - kafka connection details
|
||||
topic - kafka topic to publish notifications to
|
||||
finished_queue - queue written to when notifications are fully finished.
|
||||
sent_queue - the sent_notifications queue notifications are read from
|
||||
"""Init
|
||||
url, group - kafka connection details
|
||||
topic - kafka topic to publish notifications to
|
||||
finished_queue - queue written to when notifications are fully finished.
|
||||
sent_queue - the sent_notifications queue notifications are read from
|
||||
"""
|
||||
self.topic = topic
|
||||
self.finished_queue = finished_queue
|
||||
self.sent_queue = sent_queue
|
||||
|
||||
self.kafka = KafkaClient(url)
|
||||
self.producer = SimpleProducer(
|
||||
self.kafka = kafka.client.KafkaClient(url)
|
||||
self.producer = kafka.producer.SimpleProducer(
|
||||
self.kafka,
|
||||
async=False,
|
||||
req_acks=SimpleProducer.ACK_AFTER_LOCAL_WRITE,
|
||||
req_acks=kafka.producer.SimpleProducer.ACK_AFTER_LOCAL_WRITE,
|
||||
ack_timeout=2000
|
||||
)
|
||||
|
||||
def run(self):
|
||||
""" Takes messages from the sent_queue, puts them on the kafka notification topic and then adds
|
||||
partition/offset to the finished queue
|
||||
"""Takes messages from the sent_queue, puts them on the kafka notification topic and then adds
|
||||
partition/offset to the finished queue
|
||||
"""
|
||||
while True:
|
||||
notifications = self.sent_queue.get()
|
||||
|
|
|
@ -1,32 +1,31 @@
|
|||
from collections import defaultdict
|
||||
import collections
|
||||
import kazoo.client
|
||||
import kazoo.exceptions
|
||||
import logging
|
||||
import time
|
||||
|
||||
from kazoo.client import KazooClient
|
||||
from kazoo.exceptions import KazooException
|
||||
|
||||
from notification_exceptions import NotificationException
|
||||
from mon_notification import notification_exceptions
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ZookeeperStateTracker(object):
|
||||
""" Tracks message offsets for a kafka topic and partitions.
|
||||
Uses zookeeper to keep track of the last committed offset.
|
||||
As messages are finished with processing the committed offset is updated periodically.
|
||||
The messages are not necessarily finished in order, but the committed offset includes
|
||||
all previous messages so this object tracks any gaps updating as needed.
|
||||
"""Tracks message offsets for a kafka topic and partitions.
|
||||
Uses zookeeper to keep track of the last committed offset.
|
||||
As messages are finished with processing the committed offset is updated periodically.
|
||||
The messages are not necessarily finished in order, but the committed offset includes
|
||||
all previous messages so this object tracks any gaps updating as needed.
|
||||
"""
|
||||
def __init__(self, url, topic, finished_queue):
|
||||
""" Setup the finished_queue
|
||||
url is the zookeeper hostname:port
|
||||
topic is the kafka topic to track
|
||||
"""Setup the finished_queue
|
||||
url is the zookeeper hostname:port
|
||||
topic is the kafka topic to track
|
||||
"""
|
||||
self.finished_queue = finished_queue
|
||||
self.topic = topic
|
||||
self.has_lock = False
|
||||
|
||||
self.zookeeper = KazooClient(url)
|
||||
self.zookeeper = kazoo.client.KazooClient(url)
|
||||
self.zookeeper.start()
|
||||
self.topic_path = '/consumers/mon-notification/%s' % topic
|
||||
|
||||
|
@ -36,11 +35,11 @@ class ZookeeperStateTracker(object):
|
|||
self._offsets = None
|
||||
# This is a dictionary of sets used for tracking finished offsets when there is a gap and the committed offset
|
||||
# can not yet be advanced
|
||||
self._uncommitted_offsets = defaultdict(set)
|
||||
self._uncommitted_offsets = collections.defaultdict(set)
|
||||
|
||||
def _get_offsets(self):
|
||||
""" Read the initial offsets from zookeeper or set defaults
|
||||
The return is a dictionary with key name being partition # and value the offset
|
||||
"""Read the initial offsets from zookeeper or set defaults
|
||||
The return is a dictionary with key name being partition # and value the offset
|
||||
"""
|
||||
if not self.has_lock:
|
||||
log.warn('Reading offsets before the tracker has the lock, they could change')
|
||||
|
@ -54,11 +53,11 @@ class ZookeeperStateTracker(object):
|
|||
else:
|
||||
self.zookeeper.ensure_path(self.topic_path)
|
||||
return {}
|
||||
except KazooException:
|
||||
except kazoo.exceptions.KazooException:
|
||||
log.exception('Error retrieving the committed offset in zookeeper')
|
||||
|
||||
def _update_zk_offsets(self):
|
||||
""" Update zookeepers stored offset numbers to the values in self.offsets
|
||||
"""Update zookeepers stored offset numbers to the values in self.offsets
|
||||
"""
|
||||
try:
|
||||
for partition, value in self._offsets.iteritems():
|
||||
|
@ -66,12 +65,12 @@ class ZookeeperStateTracker(object):
|
|||
self.zookeeper.ensure_path(partition_path)
|
||||
self.zookeeper.set(partition_path, str(value))
|
||||
log.debug('Updated committed offsets at path %s, offsets %s' % (self.topic_path, self._offsets))
|
||||
except KazooException:
|
||||
except kazoo.exceptions.KazooException:
|
||||
log.exception('Error updating the committed offset in zookeeper')
|
||||
|
||||
@property
|
||||
def offsets(self):
|
||||
""" Generally only initialize the offsets after the lock has been pulled
|
||||
"""Generally only initialize the offsets after the lock has been pulled
|
||||
"""
|
||||
if self._offsets is None:
|
||||
self._offsets = self._get_offsets()
|
||||
|
@ -79,8 +78,8 @@ class ZookeeperStateTracker(object):
|
|||
return self._offsets
|
||||
|
||||
def get_offsets(self, partitions=None):
|
||||
""" Return the offsets for specified partitions or all if partitions is None
|
||||
The return is a dictionary with key name being partition # and value the offset
|
||||
"""Return the offsets for specified partitions or all if partitions is None
|
||||
The return is a dictionary with key name being partition # and value the offset
|
||||
"""
|
||||
if partitions is None:
|
||||
return self.offsets
|
||||
|
@ -88,7 +87,7 @@ class ZookeeperStateTracker(object):
|
|||
return {k: self.offsets[k] for k in partitions}
|
||||
|
||||
def lock(self, exit_method):
|
||||
""" Grab a lock within zookeeper, if not available retry.
|
||||
"""Grab a lock within zookeeper, if not available retry.
|
||||
"""
|
||||
while True:
|
||||
# The path is ephemeral so if it exists wait then cycle again
|
||||
|
@ -99,7 +98,7 @@ class ZookeeperStateTracker(object):
|
|||
|
||||
try:
|
||||
self.zookeeper.create(self.lock_path, ephemeral=True, makepath=True)
|
||||
except KazooException, e:
|
||||
except kazoo.exceptions.KazooException as e:
|
||||
# If creating the path fails something beat us to it most likely, try again
|
||||
log.warn('Error creating lock path %s\n%s' % (self.lock_path, e))
|
||||
continue
|
||||
|
@ -114,12 +113,12 @@ class ZookeeperStateTracker(object):
|
|||
self.zookeeper.add_listener(exit_method)
|
||||
|
||||
def run(self):
|
||||
""" Mark a message as finished and where possible commit the new offset to zookeeper.
|
||||
There is no mechanism here to deal with the situation where a single alarm is extremely slow to finish
|
||||
holding up all others behind it. It is assumed the notification job will time out allowing things to finish.
|
||||
"""Mark a message as finished and where possible commit the new offset to zookeeper.
|
||||
There is no mechanism here to deal with the situation where a single alarm is extremely slow to finish
|
||||
holding up all others behind it. It is assumed the notification will time out allowing things to finish.
|
||||
"""
|
||||
if not self.has_lock:
|
||||
raise NotificationException('Attempt to begin run without Zookeeper Lock')
|
||||
raise notification_exceptions.NotificationException('Attempt to begin run without Zookeeper Lock')
|
||||
|
||||
if self._offsets is None: # Verify the offsets have been initialized
|
||||
self._offsets = self._get_offsets()
|
||||
|
@ -138,7 +137,7 @@ class ZookeeperStateTracker(object):
|
|||
elif self._offsets[partition] == offset - 1:
|
||||
|
||||
new_offset = offset
|
||||
for x in xrange(offset + 1, offset + 1 + len(self._uncommitted_offsets[partition])):
|
||||
for x in range(offset + 1, offset + 1 + len(self._uncommitted_offsets[partition])):
|
||||
if x in self._uncommitted_offsets[partition]:
|
||||
new_offset = x
|
||||
self._uncommitted_offsets[partition].remove(x)
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
[pep8]
|
||||
max-line-length = 120
|
||||
|
||||
[flake8]
|
||||
max-line-length = 120
|
6
setup.py
6
setup.py
|
@ -1,9 +1,9 @@
|
|||
from setuptools import setup, find_packages
|
||||
import setuptools
|
||||
|
||||
setup(
|
||||
setuptools.setup(
|
||||
name="mon-notification",
|
||||
version="0.1",
|
||||
packages=find_packages(exclude=['tests']),
|
||||
packages=setuptools.find_packages(exclude=['tests']),
|
||||
entry_points={
|
||||
'console_scripts': [
|
||||
'mon-notification = mon_notification.main:main'
|
||||
|
|
Loading…
Reference in New Issue