Hack together the actual delta reporting, UNCLEAN CODE ALERT

Still very much in the experimental phase, need to refactor KafkaPoller a lot
at this point and decide which code should live where and give it plenty of
tests
This commit is contained in:
R. Tyler Croy 2015-01-19 16:22:22 -08:00
parent 193b147064
commit 5fe4d8efaa
2 changed files with 31 additions and 10 deletions

View File

@ -24,6 +24,7 @@ class KafkaPoller extends Thread {
private HashMap<Integer, SimpleConsumer> brokerConsumerMap = [:]
private List<Broker> brokers = []
private AbstractMap<TopicPartition, List<zk.ConsumerOffset>> consumersMap
private List<Closure> onDelta = []
KafkaPoller(AbstractMap map) {
this.consumersMap = map
@ -37,6 +38,10 @@ class KafkaPoller extends Thread {
reconnect()
}
if (this.consumersMap.size() > 0) {
dumpMetadata()
}
Thread.sleep(1 * 1000)
}
}
@ -52,21 +57,33 @@ class KafkaPoller extends Thread {
withScalaCollection(metadata.topicsMetadata).each { kafka.api.TopicMetadata f ->
withScalaCollection(f.partitionsMetadata).each { p ->
println "Consumer for ${f.topic}:${p.partitionId}"
SimpleConsumer consumer = this.brokerConsumerMap[p.leader.get().id]
println consumer
TopicAndPartition topicAndPart = new TopicAndPartition(f.topic, p.partitionId)
println consumer.earliestOrLatestOffset(
topicAndPart,
-1,
0)
Long offset = latestFromLeader(p.leader.get()?.id, f.topic, p.partitionId)
TopicPartition tp = new TopicPartition(f.topic, p.partitionId)
print "Consumer for ${f.topic}:${p.partitionId}"
println " latest: ${offset}"
this.consumersMap[tp].each { zk.ConsumerOffset c ->
Long delta = offset - c.offset
if (delta > 0) {
this.onDelta.each { Closure callback ->
callback.call(c.groupName, tp, delta)
}
}
}
}
}
println 'dumped'
}
Long latestFromLeader(Integer leaderId, String topic, Integer partition) {
SimpleConsumer consumer = this.brokerConsumerMap[leaderId]
TopicAndPartition topicAndPart = new TopicAndPartition(topic, partition)
/* XXX: A zero clientId into this method might not be right */
return consumer.earliestOrLatestOffset(topicAndPart, -1, 0)
}
Iterable withScalaCollection(scala.collection.Iterable iter) {
return JavaConversions.asJavaIterable(iter)
}

View File

@ -28,7 +28,6 @@ class Main {
StandardTreeWatcher consumerWatcher = new StandardTreeWatcher(consumers)
consumerWatcher.onInitComplete << {
println "standard consumers initialized to ${consumers.size()} (topic, partition) tuples"
poller.dumpMetadata()
}
BrokerTreeWatcher brokerWatcher = new BrokerTreeWatcher(client)
@ -38,15 +37,20 @@ class Main {
cache.listenable.addListener(consumerWatcher)
poller.onDelta << { String groupName, TopicPartition tp, Long delta ->
println "${groupName} ${tp} -- ${delta}"
}
poller.start()
brokerWatcher.start()
cache.start()
println 'started..'
Thread.sleep(10 * 1000)
Thread.sleep(5 * 1000)
println 'exiting..'
poller.die()
poller.join()
return
}
}