Kafka Consumer没有显示消费的分区

时间:2017-03-04 07:45:05

标签: apache-kafka kafka-consumer-api

我正在使用Kafka 0.10.2单个代理和zookeeper 3.4.9单个代理。我有三个主题(t1,t2,t3),每个主题有10个分区.t1中的消息需要20秒才能处理。 t2中的消息需要10分钟才能处理,而t3包含大约需要90分钟处理的消息。现在,当我运行我的消费者并为主题t1添加并行性的额外消费者时工作正常。但对于主题t2和t3之后,有时没有消费者显示附加到分区。所以我修改了三个消费者属性

"max.poll.records",10
"max.poll.interval.ms",36000000
"request.timeout.ms",36000000
之后,消费者正在为所有话题正常工作。但是,一旦我将更多的消费者添加到主题t1,t2,t3,他们就会停止提交抵消并陷入重新平衡。我该如何解决这个问题?

我的消费者代码:

class KafkaPollingConsumer implements Runnable {
private static final Logger logger = LoggerFactory.getLogger(KafkaPollingConsumer.class)
    private static final String TAG = "[KafkaPollingConsumer]"
    private final KafkaConsumer<String, byte []> kafkaConsumer
    private Map<TopicPartition,OffsetAndMetadata> currentOffsetsMap = new HashMap<>()
    List topicNameList
    Map kafkaTopicConfigMap = new HashMap<String,Object>()
    Map kafkaTopicMessageListMap = new HashMap<String,List>()

    public KafkaPollingConsumer(String serverType, String groupName, String topicNameRegex){
        logger.debug("{} [Constructor] [Enter] Thread Name {} serverType group Name TopicNameRegex",TAG,Thread.currentThread().getName(),serverType,groupName,topicNameRegex)
        logger.debug("Populating Property for kafak consumer")
        Properties kafkaConsumerProperties = new Properties()
        kafkaConsumerProperties.put("group.id", groupName)
        kafkaConsumerProperties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer")
        kafkaConsumerProperties.put("value.deserializer", "com.custom.kafkaconsumer.deserializer.CustomObjectDeserializer")
        switch(serverType){
            case KafkaTopicConfigEntity.KAFKA_NODE_TYPE_ENUM.Priority.toString() :
                kafkaConsumerProperties.put("bootstrap.servers",ConfigLoader.conf.kafkaServer.priority.kafkaNode)
                kafkaConsumerProperties.put("enable.auto.commit",ConfigLoader.conf.kafkaServer.priority.consumer.enable.auto.commit)
                kafkaConsumerProperties.put("auto.offset.reset",ConfigLoader.conf.kafkaServer.priority.consumer.auto.offset.reset)
                break
            case KafkaTopicConfigEntity.KAFKA_NODE_TYPE_ENUM.Bulk.toString() :
                kafkaConsumerProperties.put("bootstrap.servers",ConfigLoader.conf.kafkaServer.bulk.kafkaNode)
                kafkaConsumerProperties.put("enable.auto.commit",ConfigLoader.conf.kafkaServer.bulk.consumer.enable.auto.commit)
                kafkaConsumerProperties.put("auto.offset.reset",ConfigLoader.conf.kafkaServer.bulk.consumer.auto.offset.reset)
                kafkaConsumerProperties.put("max.poll.records",10)
                kafkaConsumerProperties.put("max.poll.interval.ms",900000)
                kafkaConsumerProperties.put("request.timeout.ms",900000)
                break
            default :
                throw "Invalid server type"
                break
        }
        logger.debug("{} [Constructor] KafkaConsumer Property Populated {}",properties.toString())
        kafkaConsumer = new KafkaConsumer<String, byte []>(kafkaConsumerProperties)
        topicNameList = topicNameRegex.split(Pattern.quote('|'))
        logger.debug("{} [Constructor] Kafkatopic List {}",topicNameList.toString())
        logger.debug("{} [Constructor] Exit",TAG)
    }

    private class HandleRebalance implements ConsumerRebalanceListener {
        public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
        }

        public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
            if(currentOffsetsMap != null && !currentOffsetsMap.isEmpty()) {
                logger.debug("{} In onPartitionsRevoked Rebalanced ",TAG)
                kafkaConsumer.commitSync(currentOffsetsMap)
            }
        }
    }

    @Override
    void run() {
        logger.debug("{} Starting Thread ThreadName {}",TAG,Thread.currentThread().getName())
        populateKafkaConfigMap()
        initializeKafkaTopicMessageListMap()
        String topicName
        String consumerClassName
        String consumerMethodName
        Boolean isBatchJob
        Integer batchSize = 0
        final Thread mainThread = Thread.currentThread()
        Runtime.getRuntime().addShutdownHook(new Thread() {
            public void run() {
                logger.error("{},gracefully shutdowning thread {}",TAG,mainThread.getName())
                kafkaConsumer.wakeup()
                try {
                    mainThread.join()
                } catch (InterruptedException exception) {
                    logger.error("{} Error : {}",TAG,exception.getStackTrace().join("\n"))
                }
            }
        })
        kafkaConsumer.subscribe(topicNameList , new HandleRebalance())
        try{
            while(true){
                logger.debug("{} Starting Consumer with polling time in ms 100",TAG)
                ConsumerRecords kafkaRecords = kafkaConsumer.poll(100)
                for(ConsumerRecord record: kafkaRecords){
                    topicName = record.topic()
                    DBObject kafkaTopicConfigDBObject = kafkaTopicConfigMap.get(topicName)
                    consumerClassName = kafkaTopicConfigDBObject.get(KafkaTopicConfigEntity.CLASS_NAME_KEY)
                    consumerMethodName = kafkaTopicConfigDBObject.get(KafkaTopicConfigEntity.METHOD_NAME_KEY)
                    isBatchJob = kafkaTopicConfigDBObject.get(KafkaTopicConfigEntity.IS_BATCH_JOB_KEY)
                    logger.debug("Details about Message")
                    logger.debug("Thread {}",mainThread.getName())
                    logger.debug("Topic {}",topicName)
                    logger.debug("Partition {}",record.partition().toString())
                    logger.debug("Offset {}",record.offset().toString())
                    logger.debug("clasName {}",consumerClassName)
                    logger.debug("methodName {}",consumerMethodName)
                    logger.debug("isBatchJob {}",isBatchJob.toString())
                    if(isBatchJob == true){
                        batchSize = Integer.parseInt(kafkaTopicConfigDBObject.get(KafkaTopicConfigEntity.BATCH_SIZE_KEY).toString())
                        logger.debug("batchSize {}",batchSize.toString())
                    }
                    Object message = record.value()
                    logger.debug("message {}",message.toString())
                    publishMessageToConsumers(consumerClassName,consumerMethodName,isBatchJob,batchSize,message,topicName)
                    Thread.sleep(60000)
                    currentOffsetsMap.put(new TopicPartition(record.topic(), record.partition()),new OffsetAndMetadata(record.offset() +1))
                }
                logger.debug("{} Commiting Messages to Kafka",TAG)
                kafkaConsumer.commitSync(currentOffsetsMap)
            }
        }
        catch(InterruptException exception){
            logger.error("{} In InterruptException",TAG)
            logger.error("{} Exception {}",TAG,exception.getStackTrace().join("\n"))
        }
        catch (WakeupException exception) {
            logger.error("{} In WakeUp Exception",TAG)
            logger.error("{} Exception {}",TAG,exception.getStackTrace().join("\n"))
        }
        catch(Exception exception){
            logger.error("{} In Exception",TAG)
            logger.error("{} Exception {}",TAG,exception.getStackTrace().join("\n"))
        }
        finally {
            logger.error("{} In finally commiting remaining offset ",TAG)
            publishAllKafkaTopicBatchMessages()
            kafkaConsumer.commitSync(currentOffsetsMap)
            kafkaConsumer.close()
            logger.error("{} Exiting Consumer",TAG)
        }
    }

    private void publishMessageToConsumers(String consumerClassName,String consumerMethodName,Boolean isBatchJob,Integer batchSize,Object message, String topicName){
        logger.debug("{} [publishMessageToConsumer] Enter",TAG)
        if(isBatchJob == true){
            publishMessageToBatchConsumer(consumerClassName, consumerMethodName,batchSize, message, topicName)
        }
        else{
            publishMessageToNonBatchConsumer(consumerClassName, consumerMethodName, message)
        }
        logger.debug("{} [publishMessageToConsumer] Exit",TAG)
    }

    private void publishMessageToNonBatchConsumer(String consumerClassName, String consumerMethodName, message){
        logger.debug("{} [publishMessageToNonBatchConsumer] Enter",TAG)
        executeConsumerMethod(consumerClassName,consumerMethodName,message)
        logger.debug("{} [publishMessageToNonBatchConsumer] Exit",TAG)
    }

    private void publishMessageToBatchConsumer(String consumerClassName, String consumerMethodName, Integer batchSize, Object message, String topicName){
        logger.debug("{} [publishMessageToBatchConsumer] Enter",TAG)
        List consumerMessageList = kafkaTopicMessageListMap.get(topicName)
        consumerMessageList.add(message)
        if(consumerMessageList.size() == batchSize){
            logger.debug("{} [publishMessageToBatchConsumer] Pushing Messages In Batches",TAG)
            executeConsumerMethod(consumerClassName, consumerMethodName, consumerMessageList)
            consumerMessageList.clear()
        }
        kafkaTopicMessageListMap.put(topicName,consumerMessageList)
        logger.debug("{} [publishMessageToBatchConsumer] Exit",TAG)
    }

    private void populateKafkaConfigMap(){
        logger.debug("{} [populateKafkaConfigMap] Enter",TAG)
        KafkaTopicConfigDBService kafkaTopicConfigDBService = KafkaTopicConfigDBService.getInstance()
        topicNameList.each { topicName ->
            DBObject kafkaTopicDBObject = kafkaTopicConfigDBService.findByTopicName(topicName)
            kafkaTopicConfigMap.put(topicName,kafkaTopicDBObject)
        }
        logger.debug("{} [populateKafkaConfigMap] kafkaConfigMap {}",TAG,kafkaTopicConfigMap.toString())
        logger.debug("{} [populateKafkaConfigMap] Exit",TAG)
    }

    private void initializeKafkaTopicMessageListMap(){
        logger.debug("{} [initializeKafkaTopicMessageListMap] Enter",TAG)
        topicNameList.each { topicName ->
            kafkaTopicMessageListMap.put(topicName,[])
        }
        logger.debug("{} [populateKafkaConfigMap] kafkaTopicMessageListMap {}",TAG,kafkaTopicMessageListMap.toString())
        logger.debug("{} [initializeKafkaTopicMessageListMap] Exit",TAG)
    }

    private void executeConsumerMethod(String className, String methodName, def messages){
        try{
            logger.debug("{} [executeConsumerMethod] Enter",TAG)
            logger.debug("{} [executeConsumerMethod] className  {} methodName {} messages {}",TAG,className,methodName,messages.toString())
            Class.forName(className)."$methodName"(messages)
        } catch (Exception exception){
            logger.error("{} [{}] Error while executing method : {} of class: {} with params : {} - {}", TAG, Thread.currentThread().getName(), methodName,
                    className, messages.toString(), exception.getStackTrace().join("\n"))
        }
        logger.debug("{} [executeConsumerMethod] Exit",TAG)
    }

    private void publishAllKafkaTopicBatchMessages(){
        logger.debug("{} [publishAllKafkaTopicBatchMessages] Enter",TAG)
        String consumerClassName = null
        String consumerMethodName = null
        kafkaTopicMessageListMap.each { topicName,messageList ->
            DBObject kafkaTopicDBObject = kafkaTopicConfigMap.get(topicName)
            consumerClassName = kafkaTopicDBObject.get(KafkaTopicConfigEntity.CLASS_NAME_KEY)
            consumerMethodName = kafkaTopicDBObject.get(KafkaTopicConfigEntity.METHOD_NAME_KEY)
            logger.debug("{} Pushing message in topic {} className {} methodName {} ",TAG,topicName,consumerClassName,consumerMethodName)
            if(messageList != null && messageList.size() > 0){
                executeConsumerMethod(consumerClassName, consumerMethodName, messageList)
                messageList.clear()
                kafkaTopicMessageListMap.put(topicName,messageList)
            }
        }
        logger.debug("{} [publishAllKafkaTopicBatchMessages] Exit",TAG)
    }

消费者属性是:

auto.commit.interval.ms = 5000 auto.offset.reset = earliest bootstrap.servers = [localhost:9092] check.crcs = true client.id = consumer-1 connections.max.idle.ms = 540000 enable.auto.commit = false exclude.internal.topics = true fetch.max.bytes = 52428800 fetch.max.wait.ms = 500 fetch.min.bytes = 1 group.id = t1 heartbeat.interval.ms = 3000 interceptor.classes = null key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer max.partition.fetch.bytes = 1048576 max.poll.interval.ms = 36000000 max.poll.records = 10 metadata.max.age.ms = 300000 metric.reporters = [] metrics.num.samples = 2 metrics.recording.level = INFO metrics.sample.window.ms = 30000 partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor] receive.buffer.bytes = 65536 reconnect.backoff.ms = 50 request.timeout.ms = 36000000 retry.backoff.ms = 100 sasl.jaas.config
= null sasl.kerberos.kinit.cmd = /usr/bin/kinit sasl.kerberos.min.time.before.relogin = 60000 sasl.kerberos.service.name = null sasl.kerberos.ticket.renew.jitter =
0.05 sasl.kerberos.ticket.renew.window.factor = 0.8 sasl.mechanism = GSSAPI security.protocol = PLAINTEXT send.buffer.bytes = 131072 session.timeout.ms = 10000 ssl.cipher.suites = null ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1] ssl.endpoint.identification.algorithm = null ssl.key.password = null ssl.keymanager.algorithm = SunX509 ssl.keystore.location = null ssl.keystore.password = null ssl.keystore.type = JKS ssl.protocol = TLS ssl.provider = null ssl.secure.random.implementation = null ssl.trustmanager.algorithm = PKIX ssl.truststore.location = null ssl.truststore.password = null ssl.truststore.type = JKS value.deserializer = class com.custom.kafkaconsumer.deserializer.CustomObjectDeserializer

注意group.id对于所有3(t1,t2,t3)

是不同的

消费者状态:

TOPIC                          PARTITION  CURRENT-OFFSET  LOG-END-OFFSET  LAG        CONSUMER-ID                                       HOST                           CLIENT-ID
-                              -          -               -               -          consumer-1-393fe17d-514e-49b7-a421-ce449af73d14   /127.0.0.1                     consumer-1
-                              -          -               -               -          consumer-1-7b9a53a3-4c82-4e79-9a63-36566ae3f71f   /127.0.0.1                     consumer-1
-                              -          -               -               -          consumer-2-997eb524-71d4-48da-813f-178e95c01edb   /127.0.0.1                     consumer-2
-                              -          -               -               -          consumer-2-8fb34ec9-338c-4f53-91ca-fb4d24586a4c   /127.0.0.1                     consumer-2
t2        8          3219            3515            296        -                                                 -                              -
t2        1          3904            4202            298        -                                                 -                              -
t2        6          3207            3490            283        -                                                 -                              -
t2        0          38661           39055           394        -                                                 -                              -
t2        5          3181            3485            304        -                                                 -                              -
t2        7          3285            3581            296        -                                                 -                              -
t2        4          3914            4223            309        -                                                 -                              -
t2        3          3719            4022            303        -                                                 -                              -
t2        2          3882            4202            320        -                                                 -                              -
t2        9          3354            3626            272        -                                                 -

0 个答案:

没有答案