(window.webpackJsonp=window.webpackJsonp||[]).push([[90],{520:function(e,t,a){"use strict";a.r(t);var n=a(56),r=Object(n.a)({},(function(){var e=this,t=e.$createElement,a=e._self._c||t;return a("ContentSlotsDistributor",{attrs:{"slot-key":e.$parent.slotKey}},[a("h1",{attrs:{id:"spring-for-apache-kafka"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#spring-for-apache-kafka"}},[e._v("#")]),e._v(" Spring for Apache Kafka")]),e._v(" "),a("h2",{attrs:{id:"_1-preface"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_1-preface"}},[e._v("#")]),e._v(" 1. Preface")]),e._v(" "),a("p",[e._v("The Spring for Apache Kafka project applies core Spring concepts to the development of Kafka-based messaging solutions.\nWe provide a “template” as a high-level abstraction for sending messages.\nWe also provide support for Message-driven POJOs.")]),e._v(" "),a("h2",{attrs:{id:"_2-what-s-new"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_2-what-s-new"}},[e._v("#")]),e._v(" 2. What’s new?")]),e._v(" "),a("h3",{attrs:{id:"_2-1-what-s-new-in-2-8-since-2-7"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_2-1-what-s-new-in-2-8-since-2-7"}},[e._v("#")]),e._v(" 2.1. What’s New in 2.8 Since 2.7")]),e._v(" "),a("p",[e._v("This section covers the changes made from version 2.7 to version 2.8.\nFor changes in earlier version, see "),a("a",{attrs:{href:"#history"}},[e._v("[history]")]),e._v(".")]),e._v(" "),a("h4",{attrs:{id:"_2-1-1-kafka-client-version"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_2-1-1-kafka-client-version"}},[e._v("#")]),e._v(" 2.1.1. Kafka Client Version")]),e._v(" "),a("p",[e._v("This version requires the 3.0.0 "),a("code",[e._v("kafka-clients")])]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("When using transactions, "),a("code",[e._v("kafka-clients")]),e._v(" 3.0.0 and later no longer support "),a("code",[e._v("EOSMode.V2")]),e._v(" (aka "),a("code",[e._v("BETA")]),e._v(") (and automatic fallback to "),a("code",[e._v("V1")]),e._v(" - aka "),a("code",[e._v("ALPHA")]),e._v(") with brokers earlier than 2.5; you must therefore override the default "),a("code",[e._v("EOSMode")]),e._v(" ("),a("code",[e._v("V2")]),e._v(") with "),a("code",[e._v("V1")]),e._v(" if your brokers are older (or upgrade your brokers).")])])]),e._v(" "),a("tbody")]),e._v(" "),a("p",[e._v("See "),a("a",{attrs:{href:"#exactly-once"}},[e._v("Exactly Once Semantics")]),e._v(" and "),a("a",{attrs:{href:"https://cwiki.apache.org/confluence/display/KAFKA/KIP-447%3A+Producer+scalability+for+exactly+once+semantics",target:"_blank",rel:"noopener noreferrer"}},[e._v("KIP-447"),a("OutboundLink")],1),e._v(" for more information.")]),e._v(" "),a("h4",{attrs:{id:"_2-1-2-package-changes"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_2-1-2-package-changes"}},[e._v("#")]),e._v(" 2.1.2. Package Changes")]),e._v(" "),a("p",[e._v("Classes and interfaces related to type mapping have been moved from "),a("code",[e._v("…​support.converter")]),e._v(" to "),a("code",[e._v("…​support.mapping")]),e._v(".")]),e._v(" "),a("ul",[a("li",[a("p",[a("code",[e._v("AbstractJavaTypeMapper")])])]),e._v(" "),a("li",[a("p",[a("code",[e._v("ClassMapper")])])]),e._v(" "),a("li",[a("p",[a("code",[e._v("DefaultJackson2JavaTypeMapper")])])]),e._v(" "),a("li",[a("p",[a("code",[e._v("Jackson2JavaTypeMapper")])])])]),e._v(" "),a("h4",{attrs:{id:"_2-1-3-out-of-order-manual-commits"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_2-1-3-out-of-order-manual-commits"}},[e._v("#")]),e._v(" 2.1.3. Out of Order Manual Commits")]),e._v(" "),a("p",[e._v("The listener container can now be configured to accept manual offset commits out of order (usually asynchronously).\nThe container will defer the commit until the missing offset is acknowledged.\nSee "),a("a",{attrs:{href:"#ooo-commits"}},[e._v("Manually Committing Offsets")]),e._v(" for more information.")]),e._v(" "),a("h4",{attrs:{id:"_2-1-4-kafkalistener-changes"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_2-1-4-kafkalistener-changes"}},[e._v("#")]),e._v(" 2.1.4. "),a("code",[e._v("@KafkaListener")]),e._v(" Changes")]),e._v(" "),a("p",[e._v("It is now possible to specify whether the listener method is a batch listener on the method itself.\nThis allows the same container factory to be used for both record and batch listeners.")]),e._v(" "),a("p",[e._v("See "),a("a",{attrs:{href:"#batch-listeners"}},[e._v("Batch Listeners")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("Batch listeners can now handle conversion exceptions.")]),e._v(" "),a("p",[e._v("See "),a("a",{attrs:{href:"#batch-listener-conv-errors"}},[e._v("Conversion Errors with Batch Error Handlers")]),e._v(" for more information.")]),e._v(" "),a("p",[a("code",[e._v("RecordFilterStrategy")]),e._v(", when used with batch listeners, can now filter the entire batch in one call.\nSee the note at the end of "),a("a",{attrs:{href:"#batch-listeners"}},[e._v("Batch Listeners")]),e._v(" for more information.")]),e._v(" "),a("h4",{attrs:{id:"_2-1-5-kafkatemplate-changes"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_2-1-5-kafkatemplate-changes"}},[e._v("#")]),e._v(" 2.1.5. "),a("code",[e._v("KafkaTemplate")]),e._v(" Changes")]),e._v(" "),a("p",[e._v("You can now receive a single record, given the topic, partition and offset.\nSee "),a("a",{attrs:{href:"#kafka-template-receive"}},[e._v("Using "),a("code",[e._v("KafkaTemplate")]),e._v(" to Receive")]),e._v(" for more information.")]),e._v(" "),a("h4",{attrs:{id:"_2-1-6-commonerrorhandler-added"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_2-1-6-commonerrorhandler-added"}},[e._v("#")]),e._v(" 2.1.6. "),a("code",[e._v("CommonErrorHandler")]),e._v(" Added")]),e._v(" "),a("p",[e._v("The legacy "),a("code",[e._v("GenericErrorHandler")]),e._v(" and its sub-interface hierarchies for record an batch listeners have been replaced by a new single interface "),a("code",[e._v("CommonErrorHandler")]),e._v(" with implementations corresponding to most legacy implementations of "),a("code",[e._v("GenericErrorHandler")]),e._v(".\nSee "),a("a",{attrs:{href:"#error-handlers"}},[e._v("Container Error Handlers")]),e._v(" for more information.")]),e._v(" "),a("h4",{attrs:{id:"_2-1-7-listener-container-changes"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_2-1-7-listener-container-changes"}},[e._v("#")]),e._v(" 2.1.7. Listener Container Changes")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("interceptBeforeTx")]),e._v(" container property is now "),a("code",[e._v("true")]),e._v(" by default.")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("authorizationExceptionRetryInterval")]),e._v(" property has been renamed to "),a("code",[e._v("authExceptionRetryInterval")]),e._v(" and now applies to "),a("code",[e._v("AuthenticationException")]),e._v(" s in addition to "),a("code",[e._v("AuthorizationException")]),e._v(" s previously.\nBoth exceptions are considered fatal and the container will stop by default, unless this property is set.")]),e._v(" "),a("p",[e._v("See "),a("a",{attrs:{href:"#kafka-container"}},[e._v("Using "),a("code",[e._v("KafkaMessageListenerContainer")])]),e._v(" and "),a("a",{attrs:{href:"#container-props"}},[e._v("Listener Container Properties")]),e._v(" for more information.")]),e._v(" "),a("h4",{attrs:{id:"_2-1-8-serializer-deserializer-changes"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_2-1-8-serializer-deserializer-changes"}},[e._v("#")]),e._v(" 2.1.8. Serializer/Deserializer Changes")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("DelegatingByTopicSerializer")]),e._v(" and "),a("code",[e._v("DelegatingByTopicDeserializer")]),e._v(" are now provided.\nSee "),a("a",{attrs:{href:"#delegating-serialization"}},[e._v("Delegating Serializer and Deserializer")]),e._v(" for more information.")]),e._v(" "),a("h4",{attrs:{id:"_2-1-9-deadletterpublishingrecover-changes"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_2-1-9-deadletterpublishingrecover-changes"}},[e._v("#")]),e._v(" 2.1.9. "),a("code",[e._v("DeadLetterPublishingRecover")]),e._v(" Changes")]),e._v(" "),a("p",[e._v("The property "),a("code",[e._v("stripPreviousExceptionHeaders")]),e._v(" is now "),a("code",[e._v("true")]),e._v(" by default.")]),e._v(" "),a("p",[e._v("See "),a("a",{attrs:{href:"#dlpr-headers"}},[e._v("Managing Dead Letter Record Headers")]),e._v(" for more information.")]),e._v(" "),a("h4",{attrs:{id:"_2-1-10-retryable-topics-changes"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_2-1-10-retryable-topics-changes"}},[e._v("#")]),e._v(" 2.1.10. Retryable Topics Changes")]),e._v(" "),a("p",[e._v("Now you can use the same factory for retryable and non-retryable topics.\nSee "),a("a",{attrs:{href:"#retry-topic-lcf"}},[e._v("Specifying a ListenerContainerFactory")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("There’s now a manageable global list of fatal exceptions that will make the failed record go straight to the DLT.\nRefer to "),a("a",{attrs:{href:"#retry-topic-ex-classifier"}},[e._v("Exception Classifier")]),e._v(" to see how to manage it.")]),e._v(" "),a("p",[e._v("The KafkaBackOffException thrown when using the retryable topics feature is now logged at DEBUG level.\nSee "),a("a",{attrs:{href:"#change-kboe-logging-level"}},[e._v("[change-kboe-logging-level]")]),e._v(" if you need to change the logging level back to WARN or set it to any other level.")]),e._v(" "),a("h2",{attrs:{id:"_3-introduction"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_3-introduction"}},[e._v("#")]),e._v(" 3. Introduction")]),e._v(" "),a("p",[e._v("This first part of the reference documentation is a high-level overview of Spring for Apache Kafka and the underlying concepts and some code snippets that can help you get up and running as quickly as possible.")]),e._v(" "),a("h3",{attrs:{id:"_3-1-quick-tour"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_3-1-quick-tour"}},[e._v("#")]),e._v(" 3.1. Quick Tour")]),e._v(" "),a("p",[e._v("Prerequisites: You must install and run Apache Kafka.\nThen you must put the Spring for Apache Kafka ("),a("code",[e._v("spring-kafka")]),e._v(") JAR and all of its dependencies on your class path.\nThe easiest way to do that is to declare a dependency in your build tool.")]),e._v(" "),a("p",[e._v("If you are not using Spring Boot, declare the "),a("code",[e._v("spring-kafka")]),e._v(" jar as a dependency in your project.")]),e._v(" "),a("p",[e._v("Maven")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("\n org.springframework.kafka\n spring-kafka\n 2.8.3\n\n")])])]),a("p",[e._v("Gradle")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("compile 'org.springframework.kafka:spring-kafka:2.8.3'\n")])])]),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("When using Spring Boot, (and you haven’t used start.spring.io to create your project), omit the version and Boot will automatically bring in the correct version that is compatible with your Boot version:")])])]),e._v(" "),a("tbody")]),e._v(" "),a("p",[e._v("Maven")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("\n org.springframework.kafka\n spring-kafka\n\n")])])]),a("p",[e._v("Gradle")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("compile 'org.springframework.kafka:spring-kafka'\n")])])]),a("p",[e._v("However, the quickest way to get started is to use "),a("a",{attrs:{href:"https://start.spring.io",target:"_blank",rel:"noopener noreferrer"}},[e._v("start.spring.io"),a("OutboundLink")],1),e._v(" (or the wizards in Spring Tool Suits and Intellij IDEA) and create a project, selecting 'Spring for Apache Kafka' as a dependency.")]),e._v(" "),a("h4",{attrs:{id:"_3-1-1-compatibility"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_3-1-1-compatibility"}},[e._v("#")]),e._v(" 3.1.1. Compatibility")]),e._v(" "),a("p",[e._v("This quick tour works with the following versions:")]),e._v(" "),a("ul",[a("li",[a("p",[e._v("Apache Kafka Clients 3.0.0")])]),e._v(" "),a("li",[a("p",[e._v("Spring Framework 5.3.x")])]),e._v(" "),a("li",[a("p",[e._v("Minimum Java version: 8")])])]),e._v(" "),a("h4",{attrs:{id:"_3-1-2-getting-started"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_3-1-2-getting-started"}},[e._v("#")]),e._v(" 3.1.2. Getting Started")]),e._v(" "),a("p",[e._v("The simplest way to get started is to use "),a("a",{attrs:{href:"https://start.spring.io",target:"_blank",rel:"noopener noreferrer"}},[e._v("start.spring.io"),a("OutboundLink")],1),e._v(" (or the wizards in Spring Tool Suits and Intellij IDEA) and create a project, selecting 'Spring for Apache Kafka' as a dependency.\nRefer to the "),a("a",{attrs:{href:"https://docs.spring.io/spring-boot/docs/current/reference/html/spring-boot-features.html#boot-features-kafka",target:"_blank",rel:"noopener noreferrer"}},[e._v("Spring Boot documentation"),a("OutboundLink")],1),e._v(" for more information about its opinionated auto configuration of the infrastructure beans.")]),e._v(" "),a("p",[e._v("Here is a minimal consumer application.")]),e._v(" "),a("h5",{attrs:{id:"spring-boot-consumer-app"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#spring-boot-consumer-app"}},[e._v("#")]),e._v(" Spring Boot Consumer App")]),e._v(" "),a("p",[e._v("Example 1. Application")]),e._v(" "),a("p",[e._v("Java")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@SpringBootApplication\npublic class Application {\n\n public static void main(String[] args) {\n SpringApplication.run(Application.class, args);\n }\n\n @Bean\n public NewTopic topic() {\n return TopicBuilder.name("topic1")\n .partitions(10)\n .replicas(1)\n .build();\n }\n\n @KafkaListener(id = "myId", topics = "topic1")\n public void listen(String in) {\n System.out.println(in);\n }\n\n}\n')])])]),a("p",[e._v("Kotlin")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@SpringBootApplication\nclass Application {\n\n @Bean\n fun topic() = NewTopic("topic1", 10, 1)\n\n @KafkaListener(id = "myId", topics = ["topic1"])\n fun listen(value: String?) {\n println(value)\n }\n\n}\n\nfun main(args: Array) = runApplication(*args)\n')])])]),a("p",[e._v("Example 2. application.properties")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("spring.kafka.consumer.auto-offset-reset=earliest\n")])])]),a("p",[e._v("The "),a("code",[e._v("NewTopic")]),e._v(" bean causes the topic to be created on the broker; it is not needed if the topic already exists.")]),e._v(" "),a("h5",{attrs:{id:"spring-boot-producer-app"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#spring-boot-producer-app"}},[e._v("#")]),e._v(" Spring Boot Producer App")]),e._v(" "),a("p",[e._v("Example 3. Application")]),e._v(" "),a("p",[e._v("Java")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@SpringBootApplication\npublic class Application {\n\n public static void main(String[] args) {\n SpringApplication.run(Application.class, args);\n }\n\n @Bean\n public NewTopic topic() {\n return TopicBuilder.name("topic1")\n .partitions(10)\n .replicas(1)\n .build();\n }\n\n @Bean\n public ApplicationRunner runner(KafkaTemplate template) {\n return args -> {\n template.send("topic1", "test");\n };\n }\n\n}\n')])])]),a("p",[e._v("Kotlin")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@SpringBootApplication\nclass Application {\n\n @Bean\n fun topic() = NewTopic("topic1", 10, 1)\n\n @Bean\n fun runner(template: KafkaTemplate) =\n ApplicationRunner { template.send("topic1", "test") }\n\n companion object {\n @JvmStatic\n fun main(args: Array) = runApplication(*args)\n }\n\n}\n')])])]),a("h5",{attrs:{id:""}},[a("a",{staticClass:"header-anchor",attrs:{href:"#"}},[e._v("#")])]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("Spring for Apache Kafka is designed to be used in a Spring Application Context."),a("br"),e._v("For example, if you create the listener container yourself outside of a Spring context, not all functions will work unless you satisfy all of the "),a("code",[e._v("…​Aware")]),e._v(" interfaces that the container implements.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("p",[e._v("Here is an example of an application that does not use Spring Boot; it has both a "),a("code",[e._v("Consumer")]),e._v(" and "),a("code",[e._v("Producer")]),e._v(".")]),e._v(" "),a("p",[e._v("Example 4. Without Boot")]),e._v(" "),a("p",[e._v("Java")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('public class Sender {\n\n\tpublic static void main(String[] args) {\n\t\tAnnotationConfigApplicationContext context = new AnnotationConfigApplicationContext(Config.class);\n\t\tcontext.getBean(Sender.class).send("test", 42);\n\t}\n\n\tprivate final KafkaTemplate template;\n\n\tpublic Sender(KafkaTemplate template) {\n\t\tthis.template = template;\n\t}\n\n\tpublic void send(String toSend, int key) {\n\t\tthis.template.send("topic1", key, toSend);\n\t}\n\n}\n\npublic class Listener {\n\n @KafkaListener(id = "listen1", topics = "topic1")\n public void listen1(String in) {\n System.out.println(in);\n }\n\n}\n\n@Configuration\n@EnableKafka\npublic class Config {\n\n @Bean\n ConcurrentKafkaListenerContainerFactory\n kafkaListenerContainerFactory(ConsumerFactory consumerFactory) {\n ConcurrentKafkaListenerContainerFactory factory =\n new ConcurrentKafkaListenerContainerFactory<>();\n factory.setConsumerFactory(consumerFactory);\n return factory;\n }\n\n @Bean\n public ConsumerFactory consumerFactory() {\n return new DefaultKafkaConsumerFactory<>(consumerProps());\n }\n\n private Map consumerProps() {\n Map props = new HashMap<>();\n props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");\n props.put(ConsumerConfig.GROUP_ID_CONFIG, "group");\n props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, IntegerDeserializer.class);\n props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);\n props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");\n // ...\n return props;\n }\n\n @Bean\n public Sender sender(KafkaTemplate template) {\n return new Sender(template);\n }\n\n @Bean\n public Listener listener() {\n return new Listener();\n }\n\n @Bean\n public ProducerFactory producerFactory() {\n return new DefaultKafkaProducerFactory<>(senderProps());\n }\n\n private Map senderProps() {\n Map props = new HashMap<>();\n props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");\n props.put(ProducerConfig.LINGER_MS_CONFIG, 10);\n props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class);\n props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);\n //...\n return props;\n }\n\n @Bean\n public KafkaTemplate kafkaTemplate(ProducerFactory producerFactory) {\n return new KafkaTemplate(producerFactory);\n }\n\n}\n')])])]),a("p",[e._v("Kotlin")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('class Sender(private val template: KafkaTemplate) {\n\n fun send(toSend: String, key: Int) {\n template.send("topic1", key, toSend)\n }\n\n}\n\nclass Listener {\n\n @KafkaListener(id = "listen1", topics = ["topic1"])\n fun listen1(`in`: String) {\n println(`in`)\n }\n\n}\n\n@Configuration\n@EnableKafka\nclass Config {\n\n @Bean\n fun kafkaListenerContainerFactory(consumerFactory: ConsumerFactory) =\n ConcurrentKafkaListenerContainerFactory().also { it.consumerFactory = consumerFactory }\n\n @Bean\n fun consumerFactory() = DefaultKafkaConsumerFactory(consumerProps)\n\n val consumerProps = mapOf(\n ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG to "localhost:9092",\n ConsumerConfig.GROUP_ID_CONFIG to "group",\n ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG to IntegerDeserializer::class.java,\n ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG to StringDeserializer::class.java,\n ConsumerConfig.AUTO_OFFSET_RESET_CONFIG to "earliest"\n )\n\n @Bean\n fun sender(template: KafkaTemplate) = Sender(template)\n\n @Bean\n fun listener() = Listener()\n\n @Bean\n fun producerFactory() = DefaultKafkaProducerFactory(senderProps)\n\n val senderProps = mapOf(\n ProducerConfig.BOOTSTRAP_SERVERS_CONFIG to "localhost:9092",\n ProducerConfig.LINGER_MS_CONFIG to 10,\n ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG to IntegerSerializer::class.java,\n ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG to StringSerializer::class.java\n )\n\n @Bean\n fun kafkaTemplate(producerFactory: ProducerFactory) = KafkaTemplate(producerFactory)\n\n}\n')])])]),a("p",[e._v("As you can see, you have to define several infrastructure beans when not using Spring Boot.")]),e._v(" "),a("h2",{attrs:{id:"_4-reference"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_4-reference"}},[e._v("#")]),e._v(" 4. Reference")]),e._v(" "),a("p",[e._v("This part of the reference documentation details the various components that comprise Spring for Apache Kafka.\nThe "),a("a",{attrs:{href:"#kafka"}},[e._v("main chapter")]),e._v(" covers the core classes to develop a Kafka application with Spring.")]),e._v(" "),a("h3",{attrs:{id:"_4-1-using-spring-for-apache-kafka"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_4-1-using-spring-for-apache-kafka"}},[e._v("#")]),e._v(" 4.1. Using Spring for Apache Kafka")]),e._v(" "),a("p",[e._v("This section offers detailed explanations of the various concerns that impact using Spring for Apache Kafka.\nFor a quick but less detailed introduction, see "),a("a",{attrs:{href:"#quick-tour"}},[e._v("Quick Tour")]),e._v(".")]),e._v(" "),a("h4",{attrs:{id:"_4-1-1-connecting-to-kafka"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_4-1-1-connecting-to-kafka"}},[e._v("#")]),e._v(" 4.1.1. Connecting to Kafka")]),e._v(" "),a("ul",[a("li",[a("p",[a("code",[e._v("KafkaAdmin")]),e._v(" - see "),a("a",{attrs:{href:"#configuring-topics"}},[e._v("Configuring Topics")])])]),e._v(" "),a("li",[a("p",[a("code",[e._v("ProducerFactory")]),e._v(" - see "),a("a",{attrs:{href:"#sending-messages"}},[e._v("Sending Messages")])])]),e._v(" "),a("li",[a("p",[a("code",[e._v("ConsumerFactory")]),e._v(" - see "),a("a",{attrs:{href:"#receiving-messages"}},[e._v("Receiving Messages")])])])]),e._v(" "),a("p",[e._v("Starting with version 2.5, each of these extends "),a("code",[e._v("KafkaResourceFactory")]),e._v(".\nThis allows changing the bootstrap servers at runtime by adding a "),a("code",[e._v("Supplier")]),e._v(" to their configuration: "),a("code",[e._v("setBootstrapServersSupplier(() → …​)")]),e._v(".\nThis will be called for all new connections to get the list of servers.\nConsumers and Producers are generally long-lived.\nTo close existing Producers, call "),a("code",[e._v("reset()")]),e._v(" on the "),a("code",[e._v("DefaultKafkaProducerFactory")]),e._v(".\nTo close existing Consumers, call "),a("code",[e._v("stop()")]),e._v(" (and then "),a("code",[e._v("start()")]),e._v(") on the "),a("code",[e._v("KafkaListenerEndpointRegistry")]),e._v(" and/or "),a("code",[e._v("stop()")]),e._v(" and "),a("code",[e._v("start()")]),e._v(" on any other listener container beans.")]),e._v(" "),a("p",[e._v("For convenience, the framework also provides an "),a("code",[e._v("ABSwitchCluster")]),e._v(" which supports two sets of bootstrap servers; one of which is active at any time.\nConfigure the "),a("code",[e._v("ABSwitchCluster")]),e._v(" and add it to the producer and consumer factories, and the "),a("code",[e._v("KafkaAdmin")]),e._v(", by calling "),a("code",[e._v("setBootstrapServersSupplier()")]),e._v(".\nWhen you want to switch, call "),a("code",[e._v("primary()")]),e._v(" or "),a("code",[e._v("secondary()")]),e._v(" and call "),a("code",[e._v("reset()")]),e._v(" on the producer factory to establish new connection(s); for consumers, "),a("code",[e._v("stop()")]),e._v(" and "),a("code",[e._v("start()")]),e._v(" all listener containers.\nWhen using "),a("code",[e._v("@KafkaListener")]),e._v(" s, "),a("code",[e._v("stop()")]),e._v(" and "),a("code",[e._v("start()")]),e._v(" the "),a("code",[e._v("KafkaListenerEndpointRegistry")]),e._v(" bean.")]),e._v(" "),a("p",[e._v("See the Javadocs for more information.")]),e._v(" "),a("h5",{attrs:{id:"factory-listeners"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#factory-listeners"}},[e._v("#")]),e._v(" Factory Listeners")]),e._v(" "),a("p",[e._v("Starting with version 2.5, the "),a("code",[e._v("DefaultKafkaProducerFactory")]),e._v(" and "),a("code",[e._v("DefaultKafkaConsumerFactory")]),e._v(" can be configured with a "),a("code",[e._v("Listener")]),e._v(" to receive notifications whenever a producer or consumer is created or closed.")]),e._v(" "),a("p",[e._v("Producer Factory Listener")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("interface Listener {\n\n default void producerAdded(String id, Producer producer) {\n }\n\n default void producerRemoved(String id, Producer producer) {\n }\n\n}\n")])])]),a("p",[e._v("Consumer Factory Listener")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("interface Listener {\n\n default void consumerAdded(String id, Consumer consumer) {\n }\n\n default void consumerRemoved(String id, Consumer consumer) {\n }\n\n}\n")])])]),a("p",[e._v("In each case, the "),a("code",[e._v("id")]),e._v(" is created by appending the "),a("code",[e._v("client-id")]),e._v(" property (obtained from the "),a("code",[e._v("metrics()")]),e._v(" after creation) to the factory "),a("code",[e._v("beanName")]),e._v(" property, separated by "),a("code",[e._v(".")]),e._v(".")]),e._v(" "),a("p",[e._v("These listeners can be used, for example, to create and bind a Micrometer "),a("code",[e._v("KafkaClientMetrics")]),e._v(" instance when a new client is created (and close it when the client is closed).")]),e._v(" "),a("p",[e._v("The framework provides listeners that do exactly that; see "),a("a",{attrs:{href:"#micrometer-native"}},[e._v("Micrometer Native Metrics")]),e._v(".")]),e._v(" "),a("h4",{attrs:{id:"_4-1-2-configuring-topics"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_4-1-2-configuring-topics"}},[e._v("#")]),e._v(" 4.1.2. Configuring Topics")]),e._v(" "),a("p",[e._v("If you define a "),a("code",[e._v("KafkaAdmin")]),e._v(" bean in your application context, it can automatically add topics to the broker.\nTo do so, you can add a "),a("code",[e._v("NewTopic")]),e._v(" "),a("code",[e._v("@Bean")]),e._v(" for each topic to the application context.\nVersion 2.3 introduced a new class "),a("code",[e._v("TopicBuilder")]),e._v(" to make creation of such beans more convenient.\nThe following example shows how to do so:")]),e._v(" "),a("p",[e._v("Java")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@Bean\npublic KafkaAdmin admin() {\n Map configs = new HashMap<>();\n configs.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");\n return new KafkaAdmin(configs);\n}\n\n@Bean\npublic NewTopic topic1() {\n return TopicBuilder.name("thing1")\n .partitions(10)\n .replicas(3)\n .compact()\n .build();\n}\n\n@Bean\npublic NewTopic topic2() {\n return TopicBuilder.name("thing2")\n .partitions(10)\n .replicas(3)\n .config(TopicConfig.COMPRESSION_TYPE_CONFIG, "zstd")\n .build();\n}\n\n@Bean\npublic NewTopic topic3() {\n return TopicBuilder.name("thing3")\n .assignReplicas(0, Arrays.asList(0, 1))\n .assignReplicas(1, Arrays.asList(1, 2))\n .assignReplicas(2, Arrays.asList(2, 0))\n .config(TopicConfig.COMPRESSION_TYPE_CONFIG, "zstd")\n .build();\n}\n')])])]),a("p",[e._v("Kotlin")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@Bean\nfun admin() = KafkaAdmin(mapOf(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG to "localhost:9092"))\n\n@Bean\nfun topic1() =\n TopicBuilder.name("thing1")\n .partitions(10)\n .replicas(3)\n .compact()\n .build()\n\n@Bean\nfun topic2() =\n TopicBuilder.name("thing2")\n .partitions(10)\n .replicas(3)\n .config(TopicConfig.COMPRESSION_TYPE_CONFIG, "zstd")\n .build()\n\n@Bean\nfun topic3() =\n TopicBuilder.name("thing3")\n .assignReplicas(0, Arrays.asList(0, 1))\n .assignReplicas(1, Arrays.asList(1, 2))\n .assignReplicas(2, Arrays.asList(2, 0))\n .config(TopicConfig.COMPRESSION_TYPE_CONFIG, "zstd")\n .build()\n')])])]),a("p",[e._v("Starting with version 2.6, you can omit "),a("code",[e._v(".partitions()")]),e._v(" and/or "),a("code",[e._v("replicas()")]),e._v(" and the broker defaults will be applied to those properties.\nThe broker version must be at least 2.4.0 to support this feature - see "),a("a",{attrs:{href:"https://cwiki.apache.org/confluence/display/KAFKA/KIP-464%3A+Defaults+for+AdminClient%23createTopic",target:"_blank",rel:"noopener noreferrer"}},[e._v("KIP-464"),a("OutboundLink")],1),e._v(".")]),e._v(" "),a("p",[e._v("Java")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@Bean\npublic NewTopic topic4() {\n return TopicBuilder.name("defaultBoth")\n .build();\n}\n\n@Bean\npublic NewTopic topic5() {\n return TopicBuilder.name("defaultPart")\n .replicas(1)\n .build();\n}\n\n@Bean\npublic NewTopic topic6() {\n return TopicBuilder.name("defaultRepl")\n .partitions(3)\n .build();\n}\n')])])]),a("p",[e._v("Kotlin")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@Bean\nfun topic4() = TopicBuilder.name("defaultBoth").build()\n\n@Bean\nfun topic5() = TopicBuilder.name("defaultPart").replicas(1).build()\n\n@Bean\nfun topic6() = TopicBuilder.name("defaultRepl").partitions(3).build()\n')])])]),a("p",[e._v("Starting with version 2.7, you can declare multiple "),a("code",[e._v("NewTopic")]),e._v(" s in a single "),a("code",[e._v("KafkaAdmin.NewTopics")]),e._v(" bean definition:")]),e._v(" "),a("p",[e._v("Java")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@Bean\npublic KafkaAdmin.NewTopics topics456() {\n return new NewTopics(\n TopicBuilder.name("defaultBoth")\n .build(),\n TopicBuilder.name("defaultPart")\n .replicas(1)\n .build(),\n TopicBuilder.name("defaultRepl")\n .partitions(3)\n .build());\n}\n')])])]),a("p",[e._v("Kotlin")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@Bean\nfun topics456() = KafkaAdmin.NewTopics(\n TopicBuilder.name("defaultBoth")\n .build(),\n TopicBuilder.name("defaultPart")\n .replicas(1)\n .build(),\n TopicBuilder.name("defaultRepl")\n .partitions(3)\n .build()\n)\n')])])]),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("When using Spring Boot, a "),a("code",[e._v("KafkaAdmin")]),e._v(" bean is automatically registered so you only need the "),a("code",[e._v("NewTopic")]),e._v(" (and/or "),a("code",[e._v("NewTopics")]),e._v(") "),a("code",[e._v("@Bean")]),e._v(" s.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("p",[e._v("By default, if the broker is not available, a message is logged, but the context continues to load.\nYou can programmatically invoke the admin’s "),a("code",[e._v("initialize()")]),e._v(" method to try again later.\nIf you wish this condition to be considered fatal, set the admin’s "),a("code",[e._v("fatalIfBrokerNotAvailable")]),e._v(" property to "),a("code",[e._v("true")]),e._v(".\nThe context then fails to initialize.")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("If the broker supports it (1.0.0 or higher), the admin increases the number of partitions if it is found that an existing topic has fewer partitions than the "),a("code",[e._v("NewTopic.numPartitions")]),e._v(".")])])]),e._v(" "),a("tbody")]),e._v(" "),a("p",[e._v("Starting with version 2.7, the "),a("code",[e._v("KafkaAdmin")]),e._v(" provides methods to create and examine topics at runtime.")]),e._v(" "),a("ul",[a("li",[a("p",[a("code",[e._v("createOrModifyTopics")])])]),e._v(" "),a("li",[a("p",[a("code",[e._v("describeTopics")])])])]),e._v(" "),a("p",[e._v("For more advanced features, you can use the "),a("code",[e._v("AdminClient")]),e._v(" directly.\nThe following example shows how to do so:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("@Autowired\nprivate KafkaAdmin admin;\n\n...\n\n AdminClient client = AdminClient.create(admin.getConfigurationProperties());\n ...\n client.close();\n")])])]),a("h4",{attrs:{id:"_4-1-3-sending-messages"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_4-1-3-sending-messages"}},[e._v("#")]),e._v(" 4.1.3. Sending Messages")]),e._v(" "),a("p",[e._v("This section covers how to send messages.")]),e._v(" "),a("h5",{attrs:{id:"using-kafkatemplate"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#using-kafkatemplate"}},[e._v("#")]),e._v(" Using "),a("code",[e._v("KafkaTemplate")])]),e._v(" "),a("p",[e._v("This section covers how to use "),a("code",[e._v("KafkaTemplate")]),e._v(" to send messages.")]),e._v(" "),a("h6",{attrs:{id:"overview"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#overview"}},[e._v("#")]),e._v(" Overview")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("KafkaTemplate")]),e._v(" wraps a producer and provides convenience methods to send data to Kafka topics.\nThe following listing shows the relevant methods from "),a("code",[e._v("KafkaTemplate")]),e._v(":")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("ListenableFuture> sendDefault(V data);\n\nListenableFuture> sendDefault(K key, V data);\n\nListenableFuture> sendDefault(Integer partition, K key, V data);\n\nListenableFuture> sendDefault(Integer partition, Long timestamp, K key, V data);\n\nListenableFuture> send(String topic, V data);\n\nListenableFuture> send(String topic, K key, V data);\n\nListenableFuture> send(String topic, Integer partition, K key, V data);\n\nListenableFuture> send(String topic, Integer partition, Long timestamp, K key, V data);\n\nListenableFuture> send(ProducerRecord record);\n\nListenableFuture> send(Message message);\n\nMap metrics();\n\nList partitionsFor(String topic);\n\n T execute(ProducerCallback callback);\n\n// Flush the producer.\n\nvoid flush();\n\ninterface ProducerCallback {\n\n T doInKafka(Producer producer);\n\n}\n")])])]),a("p",[e._v("See the "),a("a",{attrs:{href:"https://docs.spring.io/spring-kafka/api/org/springframework/kafka/core/KafkaTemplate.html",target:"_blank",rel:"noopener noreferrer"}},[e._v("Javadoc"),a("OutboundLink")],1),e._v(" for more detail.")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("sendDefault")]),e._v(" API requires that a default topic has been provided to the template.")]),e._v(" "),a("p",[e._v("The API takes in a "),a("code",[e._v("timestamp")]),e._v(" as a parameter and stores this timestamp in the record.\nHow the user-provided timestamp is stored depends on the timestamp type configured on the Kafka topic.\nIf the topic is configured to use "),a("code",[e._v("CREATE_TIME")]),e._v(", the user specified timestamp is recorded (or generated if not specified).\nIf the topic is configured to use "),a("code",[e._v("LOG_APPEND_TIME")]),e._v(", the user-specified timestamp is ignored and the broker adds in the local broker time.")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("metrics")]),e._v(" and "),a("code",[e._v("partitionsFor")]),e._v(" methods delegate to the same methods on the underlying "),a("a",{attrs:{href:"https://kafka.apache.org/20/javadoc/org/apache/kafka/clients/producer/Producer.html",target:"_blank",rel:"noopener noreferrer"}},[a("code",[e._v("Producer")]),a("OutboundLink")],1),e._v(".\nThe "),a("code",[e._v("execute")]),e._v(" method provides direct access to the underlying "),a("a",{attrs:{href:"https://kafka.apache.org/20/javadoc/org/apache/kafka/clients/producer/Producer.html",target:"_blank",rel:"noopener noreferrer"}},[a("code",[e._v("Producer")]),a("OutboundLink")],1),e._v(".")]),e._v(" "),a("p",[e._v("To use the template, you can configure a producer factory and provide it in the template’s constructor.\nThe following example shows how to do so:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@Bean\npublic ProducerFactory producerFactory() {\n return new DefaultKafkaProducerFactory<>(producerConfigs());\n}\n\n@Bean\npublic Map producerConfigs() {\n Map props = new HashMap<>();\n props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");\n props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);\n props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);\n // See https://kafka.apache.org/documentation/#producerconfigs for more properties\n return props;\n}\n\n@Bean\npublic KafkaTemplate kafkaTemplate() {\n return new KafkaTemplate(producerFactory());\n}\n')])])]),a("p",[e._v("Starting with version 2.5, you can now override the factory’s "),a("code",[e._v("ProducerConfig")]),e._v(" properties to create templates with different producer configurations from the same factory.")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("@Bean\npublic KafkaTemplate stringTemplate(ProducerFactory pf) {\n return new KafkaTemplate<>(pf);\n}\n\n@Bean\npublic KafkaTemplate bytesTemplate(ProducerFactory pf) {\n return new KafkaTemplate<>(pf,\n Collections.singletonMap(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class));\n}\n")])])]),a("p",[e._v("Note that a bean of type "),a("code",[e._v("ProducerFactory")]),e._v(" (such as the one auto-configured by Spring Boot) can be referenced with different narrowed generic types.")]),e._v(" "),a("p",[e._v("You can also configure the template by using standard "),a("code",[e._v("")]),e._v(" definitions.")]),e._v(" "),a("p",[e._v("Then, to use the template, you can invoke one of its methods.")]),e._v(" "),a("p",[e._v("When you use the methods with a "),a("code",[e._v("Message")]),e._v(" parameter, the topic, partition, and key information is provided in a message header that includes the following items:")]),e._v(" "),a("ul",[a("li",[a("p",[a("code",[e._v("KafkaHeaders.TOPIC")])])]),e._v(" "),a("li",[a("p",[a("code",[e._v("KafkaHeaders.PARTITION_ID")])])]),e._v(" "),a("li",[a("p",[a("code",[e._v("KafkaHeaders.MESSAGE_KEY")])])]),e._v(" "),a("li",[a("p",[a("code",[e._v("KafkaHeaders.TIMESTAMP")])])])]),e._v(" "),a("p",[e._v("The message payload is the data.")]),e._v(" "),a("p",[e._v("Optionally, you can configure the "),a("code",[e._v("KafkaTemplate")]),e._v(" with a "),a("code",[e._v("ProducerListener")]),e._v(" to get an asynchronous callback with the results of the send (success or failure) instead of waiting for the "),a("code",[e._v("Future")]),e._v(" to complete.\nThe following listing shows the definition of the "),a("code",[e._v("ProducerListener")]),e._v(" interface:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("public interface ProducerListener {\n\n void onSuccess(ProducerRecord producerRecord, RecordMetadata recordMetadata);\n\n void onError(ProducerRecord producerRecord, RecordMetadata recordMetadata,\n Exception exception);\n\n}\n")])])]),a("p",[e._v("By default, the template is configured with a "),a("code",[e._v("LoggingProducerListener")]),e._v(", which logs errors and does nothing when the send is successful.")]),e._v(" "),a("p",[e._v("For convenience, default method implementations are provided in case you want to implement only one of the methods.")]),e._v(" "),a("p",[e._v("Notice that the send methods return a "),a("code",[e._v("ListenableFuture")]),e._v(".\nYou can register a callback with the listener to receive the result of the send asynchronously.\nThe following example shows how to do so:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('ListenableFuture> future = template.send("myTopic", "something");\nfuture.addCallback(new ListenableFutureCallback>() {\n\n @Override\n public void onSuccess(SendResult result) {\n ...\n }\n\n @Override\n public void onFailure(Throwable ex) {\n ...\n }\n\n});\n')])])]),a("p",[a("code",[e._v("SendResult")]),e._v(" has two properties, a "),a("code",[e._v("ProducerRecord")]),e._v(" and "),a("code",[e._v("RecordMetadata")]),e._v(".\nSee the Kafka API documentation for information about those objects.")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("Throwable")]),e._v(" in "),a("code",[e._v("onFailure")]),e._v(" can be cast to a "),a("code",[e._v("KafkaProducerException")]),e._v("; its "),a("code",[e._v("failedProducerRecord")]),e._v(" property contains the failed record.")]),e._v(" "),a("p",[e._v("Starting with version 2.5, you can use a "),a("code",[e._v("KafkaSendCallback")]),e._v(" instead of a "),a("code",[e._v("ListenableFutureCallback")]),e._v(", making it easier to extract the failed "),a("code",[e._v("ProducerRecord")]),e._v(", avoiding the need to cast the "),a("code",[e._v("Throwable")]),e._v(":")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('ListenableFuture> future = template.send("topic", 1, "thing");\nfuture.addCallback(new KafkaSendCallback() {\n\n @Override\n public void onSuccess(SendResult result) {\n ...\n }\n\n @Override\n public void onFailure(KafkaProducerException ex) {\n ProducerRecord failed = ex.getFailedProducerRecord();\n ...\n }\n\n});\n')])])]),a("p",[e._v("You can also use a pair of lambdas:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('ListenableFuture> future = template.send("topic", 1, "thing");\nfuture.addCallback(result -> {\n ...\n }, (KafkaFailureCallback) ex -> {\n ProducerRecord failed = ex.getFailedProducerRecord();\n ...\n });\n')])])]),a("p",[e._v("If you wish to block the sending thread to await the result, you can invoke the future’s "),a("code",[e._v("get()")]),e._v(" method; using the method with a timeout is recommended.\nYou may wish to invoke "),a("code",[e._v("flush()")]),e._v(" before waiting or, for convenience, the template has a constructor with an "),a("code",[e._v("autoFlush")]),e._v(" parameter that causes the template to "),a("code",[e._v("flush()")]),e._v(" on each send.\nFlushing is only needed if you have set the "),a("code",[e._v("linger.ms")]),e._v(" producer property and want to immediately send a partial batch.")]),e._v(" "),a("h6",{attrs:{id:"examples"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#examples"}},[e._v("#")]),e._v(" Examples")]),e._v(" "),a("p",[e._v("This section shows examples of sending messages to Kafka:")]),e._v(" "),a("p",[e._v("Example 5. Non Blocking (Async)")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("public void sendToKafka(final MyOutputData data) {\n final ProducerRecord record = createRecord(data);\n\n ListenableFuture> future = template.send(record);\n future.addCallback(new KafkaSendCallback() {\n\n @Override\n public void onSuccess(SendResult result) {\n handleSuccess(data);\n }\n\n @Override\n public void onFailure(KafkaProducerException ex) {\n handleFailure(data, record, ex);\n }\n\n });\n}\n")])])]),a("p",[e._v("Blocking (Sync)")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("public void sendToKafka(final MyOutputData data) {\n final ProducerRecord record = createRecord(data);\n\n try {\n template.send(record).get(10, TimeUnit.SECONDS);\n handleSuccess(data);\n }\n catch (ExecutionException e) {\n handleFailure(data, record, e.getCause());\n }\n catch (TimeoutException | InterruptedException e) {\n handleFailure(data, record, e);\n }\n}\n")])])]),a("p",[e._v("Note that the cause of the "),a("code",[e._v("ExecutionException")]),e._v(" is "),a("code",[e._v("KafkaProducerException")]),e._v(" with the "),a("code",[e._v("failedProducerRecord")]),e._v(" property.")]),e._v(" "),a("h5",{attrs:{id:"using-routingkafkatemplate"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#using-routingkafkatemplate"}},[e._v("#")]),e._v(" Using "),a("code",[e._v("RoutingKafkaTemplate")])]),e._v(" "),a("p",[e._v("Starting with version 2.5, you can use a "),a("code",[e._v("RoutingKafkaTemplate")]),e._v(" to select the producer at runtime, based on the destination "),a("code",[e._v("topic")]),e._v(" name.")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("The routing template does "),a("strong",[e._v("not")]),e._v(" support transactions, "),a("code",[e._v("execute")]),e._v(", "),a("code",[e._v("flush")]),e._v(", or "),a("code",[e._v("metrics")]),e._v(" operations because the topic is not known for those operations.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("p",[e._v("The template requires a map of "),a("code",[e._v("java.util.regex.Pattern")]),e._v(" to "),a("code",[e._v("ProducerFactory")]),e._v(" instances.\nThis map should be ordered (e.g. a "),a("code",[e._v("LinkedHashMap")]),e._v(") because it is traversed in order; you should add more specific patterns at the beginning.")]),e._v(" "),a("p",[e._v("The following simple Spring Boot application provides an example of how to use the same template to send to different topics, each using a different value serializer.")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@SpringBootApplication\npublic class Application {\n\n public static void main(String[] args) {\n SpringApplication.run(Application.class, args);\n }\n\n @Bean\n public RoutingKafkaTemplate routingTemplate(GenericApplicationContext context,\n ProducerFactory pf) {\n\n // Clone the PF with a different Serializer, register with Spring for shutdown\n Map configs = new HashMap<>(pf.getConfigurationProperties());\n configs.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);\n DefaultKafkaProducerFactory bytesPF = new DefaultKafkaProducerFactory<>(configs);\n context.registerBean(DefaultKafkaProducerFactory.class, "bytesPF", bytesPF);\n\n Map> map = new LinkedHashMap<>();\n map.put(Pattern.compile("two"), bytesPF);\n map.put(Pattern.compile(".+"), pf); // Default PF with StringSerializer\n return new RoutingKafkaTemplate(map);\n }\n\n @Bean\n public ApplicationRunner runner(RoutingKafkaTemplate routingTemplate) {\n return args -> {\n routingTemplate.send("one", "thing1");\n routingTemplate.send("two", "thing2".getBytes());\n };\n }\n\n}\n')])])]),a("p",[e._v("The corresponding "),a("code",[e._v("@KafkaListener")]),e._v(" s for this example are shown in "),a("a",{attrs:{href:"#annotation-properties"}},[e._v("Annotation Properties")]),e._v(".")]),e._v(" "),a("p",[e._v("For another technique to achieve similar results, but with the additional capability of sending different types to the same topic, see "),a("a",{attrs:{href:"#delegating-serialization"}},[e._v("Delegating Serializer and Deserializer")]),e._v(".")]),e._v(" "),a("h5",{attrs:{id:"using-defaultkafkaproducerfactory"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#using-defaultkafkaproducerfactory"}},[e._v("#")]),e._v(" Using "),a("code",[e._v("DefaultKafkaProducerFactory")])]),e._v(" "),a("p",[e._v("As seen in "),a("a",{attrs:{href:"#kafka-template"}},[e._v("Using "),a("code",[e._v("KafkaTemplate")])]),e._v(", a "),a("code",[e._v("ProducerFactory")]),e._v(" is used to create the producer.")]),e._v(" "),a("p",[e._v("When not using "),a("a",{attrs:{href:"#transactions"}},[e._v("Transactions")]),e._v(", by default, the "),a("code",[e._v("DefaultKafkaProducerFactory")]),e._v(" creates a singleton producer used by all clients, as recommended in the "),a("code",[e._v("KafkaProducer")]),e._v(" javadocs.\nHowever, if you call "),a("code",[e._v("flush()")]),e._v(" on the template, this can cause delays for other threads using the same producer.\nStarting with version 2.3, the "),a("code",[e._v("DefaultKafkaProducerFactory")]),e._v(" has a new property "),a("code",[e._v("producerPerThread")]),e._v(".\nWhen set to "),a("code",[e._v("true")]),e._v(", the factory will create (and cache) a separate producer for each thread, to avoid this issue.")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("When "),a("code",[e._v("producerPerThread")]),e._v(" is "),a("code",[e._v("true")]),e._v(", user code "),a("strong",[e._v("must")]),e._v(" call "),a("code",[e._v("closeThreadBoundProducer()")]),e._v(" on the factory when the producer is no longer needed."),a("br"),e._v("This will physically close the producer and remove it from the "),a("code",[e._v("ThreadLocal")]),e._v("."),a("br"),e._v("Calling "),a("code",[e._v("reset()")]),e._v(" or "),a("code",[e._v("destroy()")]),e._v(" will not clean up these producers.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("p",[e._v("Also see "),a("a",{attrs:{href:"#tx-template-mixed"}},[a("code",[e._v("KafkaTemplate")]),e._v(" Transactional and non-Transactional Publishing")]),e._v(".")]),e._v(" "),a("p",[e._v("When creating a "),a("code",[e._v("DefaultKafkaProducerFactory")]),e._v(", key and/or value "),a("code",[e._v("Serializer")]),e._v(" classes can be picked up from configuration by calling the constructor that only takes in a Map of properties (see example in "),a("a",{attrs:{href:"#kafka-template"}},[e._v("Using "),a("code",[e._v("KafkaTemplate")])]),e._v("), or "),a("code",[e._v("Serializer")]),e._v(" instances may be passed to the "),a("code",[e._v("DefaultKafkaProducerFactory")]),e._v(" constructor (in which case all "),a("code",[e._v("Producer")]),e._v(" s share the same instances).\nAlternatively you can provide "),a("code",[e._v("Supplier")]),e._v(" s (starting with version 2.3) that will be used to obtain separate "),a("code",[e._v("Serializer")]),e._v(" instances for each "),a("code",[e._v("Producer")]),e._v(":")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("@Bean\npublic ProducerFactory producerFactory() {\n return new DefaultKafkaProducerFactory<>(producerConfigs(), null, () -> new CustomValueSerializer());\n}\n\n@Bean\npublic KafkaTemplate kafkaTemplate() {\n return new KafkaTemplate(producerFactory());\n}\n")])])]),a("p",[e._v("Starting with version 2.5.10, you can now update the producer properties after the factory is created.\nThis might be useful, for example, if you have to update SSL key/trust store locations after a credentials change.\nThe changes will not affect existing producer instances; call "),a("code",[e._v("reset()")]),e._v(" to close any existing producers so that new producers will be created using the new properties.\nNOTE: You cannot change a transactional producer factory to non-transactional, and vice-versa.")]),e._v(" "),a("p",[e._v("Two new methods are now provided:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("void updateConfigs(Map updates);\n\nvoid removeConfig(String configKey);\n")])])]),a("p",[e._v("Starting with version 2.8, if you provide serializers as objects (in the constructor or via the setters), the factory will invoke the "),a("code",[e._v("configure()")]),e._v(" method to configure them with the configuration properties.")]),e._v(" "),a("h5",{attrs:{id:"using-replyingkafkatemplate"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#using-replyingkafkatemplate"}},[e._v("#")]),e._v(" Using "),a("code",[e._v("ReplyingKafkaTemplate")])]),e._v(" "),a("p",[e._v("Version 2.1.3 introduced a subclass of "),a("code",[e._v("KafkaTemplate")]),e._v(" to provide request/reply semantics.\nThe class is named "),a("code",[e._v("ReplyingKafkaTemplate")]),e._v(" and has two additional methods; the following shows the method signatures:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("RequestReplyFuture sendAndReceive(ProducerRecord record);\n\nRequestReplyFuture sendAndReceive(ProducerRecord record,\n Duration replyTimeout);\n")])])]),a("p",[e._v("(Also see "),a("a",{attrs:{href:"#exchanging-messages"}},[e._v("Request/Reply with "),a("code",[e._v("Message")]),e._v(" s")]),e._v(").")]),e._v(" "),a("p",[e._v("The result is a "),a("code",[e._v("ListenableFuture")]),e._v(" that is asynchronously populated with the result (or an exception, for a timeout).\nThe result also has a "),a("code",[e._v("sendFuture")]),e._v(" property, which is the result of calling "),a("code",[e._v("KafkaTemplate.send()")]),e._v(".\nYou can use this future to determine the result of the send operation.")]),e._v(" "),a("p",[e._v("If the first method is used, or the "),a("code",[e._v("replyTimeout")]),e._v(" argument is "),a("code",[e._v("null")]),e._v(", the template’s "),a("code",[e._v("defaultReplyTimeout")]),e._v(" property is used (5 seconds by default).")]),e._v(" "),a("p",[e._v("The following Spring Boot application shows an example of how to use the feature:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@SpringBootApplication\npublic class KRequestingApplication {\n\n public static void main(String[] args) {\n SpringApplication.run(KRequestingApplication.class, args).close();\n }\n\n @Bean\n public ApplicationRunner runner(ReplyingKafkaTemplate template) {\n return args -> {\n ProducerRecord record = new ProducerRecord<>("kRequests", "foo");\n RequestReplyFuture replyFuture = template.sendAndReceive(record);\n SendResult sendResult = replyFuture.getSendFuture().get(10, TimeUnit.SECONDS);\n System.out.println("Sent ok: " + sendResult.getRecordMetadata());\n ConsumerRecord consumerRecord = replyFuture.get(10, TimeUnit.SECONDS);\n System.out.println("Return value: " + consumerRecord.value());\n };\n }\n\n @Bean\n public ReplyingKafkaTemplate replyingTemplate(\n ProducerFactory pf,\n ConcurrentMessageListenerContainer repliesContainer) {\n\n return new ReplyingKafkaTemplate<>(pf, repliesContainer);\n }\n\n @Bean\n public ConcurrentMessageListenerContainer repliesContainer(\n ConcurrentKafkaListenerContainerFactory containerFactory) {\n\n ConcurrentMessageListenerContainer repliesContainer =\n containerFactory.createContainer("kReplies");\n repliesContainer.getContainerProperties().setGroupId("repliesGroup");\n repliesContainer.setAutoStartup(false);\n return repliesContainer;\n }\n\n @Bean\n public NewTopic kRequests() {\n return TopicBuilder.name("kRequests")\n .partitions(10)\n .replicas(2)\n .build();\n }\n\n @Bean\n public NewTopic kReplies() {\n return TopicBuilder.name("kReplies")\n .partitions(10)\n .replicas(2)\n .build();\n }\n\n}\n')])])]),a("p",[e._v("Note that we can use Boot’s auto-configured container factory to create the reply container.")]),e._v(" "),a("p",[e._v("If a non-trivial deserializer is being used for replies, consider using an "),a("a",{attrs:{href:"#error-handling-deserializer"}},[a("code",[e._v("ErrorHandlingDeserializer")])]),e._v(" that delegates to your configured deserializer.\nWhen so configured, the "),a("code",[e._v("RequestReplyFuture")]),e._v(" will be completed exceptionally and you can catch the "),a("code",[e._v("ExecutionException")]),e._v(", with the "),a("code",[e._v("DeserializationException")]),e._v(" in its "),a("code",[e._v("cause")]),e._v(" property.")]),e._v(" "),a("p",[e._v("Starting with version 2.6.7, in addition to detecting "),a("code",[e._v("DeserializationException")]),e._v(" s, the template will call the "),a("code",[e._v("replyErrorChecker")]),e._v(" function, if provided.\nIf it returns an exception, the future will be completed exceptionally.")]),e._v(" "),a("p",[e._v("Here is an example:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('template.setReplyErrorChecker(record -> {\n Header error = record.headers().lastHeader("serverSentAnError");\n if (error != null) {\n return new MyException(new String(error.value()));\n }\n else {\n return null;\n }\n});\n\n...\n\nRequestReplyFuture future = template.sendAndReceive(record);\ntry {\n future.getSendFuture().get(10, TimeUnit.SECONDS); // send ok\n ConsumerRecord consumerRecord = future.get(10, TimeUnit.SECONDS);\n ...\n}\ncatch (InterruptedException e) {\n ...\n}\ncatch (ExecutionException e) {\n if (e.getCause instanceof MyException) {\n ...\n }\n}\ncatch (TimeoutException e) {\n ...\n}\n')])])]),a("p",[e._v("The template sets a header (named "),a("code",[e._v("KafkaHeaders.CORRELATION_ID")]),e._v(" by default), which must be echoed back by the server side.")]),e._v(" "),a("p",[e._v("In this case, the following "),a("code",[e._v("@KafkaListener")]),e._v(" application responds:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@SpringBootApplication\npublic class KReplyingApplication {\n\n public static void main(String[] args) {\n SpringApplication.run(KReplyingApplication.class, args);\n }\n\n @KafkaListener(id="server", topics = "kRequests")\n @SendTo // use default replyTo expression\n public String listen(String in) {\n System.out.println("Server received: " + in);\n return in.toUpperCase();\n }\n\n @Bean\n public NewTopic kRequests() {\n return TopicBuilder.name("kRequests")\n .partitions(10)\n .replicas(2)\n .build();\n }\n\n @Bean // not required if Jackson is on the classpath\n public MessagingMessageConverter simpleMapperConverter() {\n MessagingMessageConverter messagingMessageConverter = new MessagingMessageConverter();\n messagingMessageConverter.setHeaderMapper(new SimpleKafkaHeaderMapper());\n return messagingMessageConverter;\n }\n\n}\n')])])]),a("p",[e._v("The "),a("code",[e._v("@KafkaListener")]),e._v(" infrastructure echoes the correlation ID and determines the reply topic.")]),e._v(" "),a("p",[e._v("See "),a("a",{attrs:{href:"#annotation-send-to"}},[e._v("Forwarding Listener Results using "),a("code",[e._v("@SendTo")])]),e._v(" for more information about sending replies.\nThe template uses the default header "),a("code",[e._v("KafKaHeaders.REPLY_TOPIC")]),e._v(" to indicate the topic to which the reply goes.")]),e._v(" "),a("p",[e._v("Starting with version 2.2, the template tries to detect the reply topic or partition from the configured reply container.\nIf the container is configured to listen to a single topic or a single "),a("code",[e._v("TopicPartitionOffset")]),e._v(", it is used to set the reply headers.\nIf the container is configured otherwise, the user must set up the reply headers.\nIn this case, an "),a("code",[e._v("INFO")]),e._v(" log message is written during initialization.\nThe following example uses "),a("code",[e._v("KafkaHeaders.REPLY_TOPIC")]),e._v(":")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('record.headers().add(new RecordHeader(KafkaHeaders.REPLY_TOPIC, "kReplies".getBytes()));\n')])])]),a("p",[e._v("When you configure with a single reply "),a("code",[e._v("TopicPartitionOffset")]),e._v(", you can use the same reply topic for multiple templates, as long as each instance listens on a different partition.\nWhen configuring with a single reply topic, each instance must use a different "),a("code",[e._v("group.id")]),e._v(".\nIn this case, all instances receive each reply, but only the instance that sent the request finds the correlation ID.\nThis may be useful for auto-scaling, but with the overhead of additional network traffic and the small cost of discarding each unwanted reply.\nWhen you use this setting, we recommend that you set the template’s "),a("code",[e._v("sharedReplyTopic")]),e._v(" to "),a("code",[e._v("true")]),e._v(", which reduces the logging level of unexpected replies to DEBUG instead of the default ERROR.")]),e._v(" "),a("p",[e._v("The following is an example of configuring the reply container to use the same shared reply topic:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@Bean\npublic ConcurrentMessageListenerContainer replyContainer(\n ConcurrentKafkaListenerContainerFactory containerFactory) {\n\n ConcurrentMessageListenerContainer container = containerFactory.createContainer("topic2");\n container.getContainerProperties().setGroupId(UUID.randomUUID().toString()); // unique\n Properties props = new Properties();\n props.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest"); // so the new group doesn\'t get old replies\n container.getContainerProperties().setKafkaConsumerProperties(props);\n return container;\n}\n')])])]),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("If you have multiple client instances and you do not configure them as discussed in the preceding paragraph, each instance needs a dedicated reply topic."),a("br"),e._v("An alternative is to set the "),a("code",[e._v("KafkaHeaders.REPLY_PARTITION")]),e._v(" and use a dedicated partition for each instance."),a("br"),e._v("The "),a("code",[e._v("Header")]),e._v(" contains a four-byte int (big-endian)."),a("br"),e._v("The server must use this header to route the reply to the correct partition ("),a("code",[e._v("@KafkaListener")]),e._v(" does this)."),a("br"),e._v("In this case, though, the reply container must not use Kafka’s group management feature and must be configured to listen on a fixed partition (by using a "),a("code",[e._v("TopicPartitionOffset")]),e._v(" in its "),a("code",[e._v("ContainerProperties")]),e._v(" constructor).")])])]),e._v(" "),a("tbody")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("The "),a("code",[e._v("DefaultKafkaHeaderMapper")]),e._v(" requires Jackson to be on the classpath (for the "),a("code",[e._v("@KafkaListener")]),e._v(")."),a("br"),e._v("If it is not available, the message converter has no header mapper, so you must configure a "),a("code",[e._v("MessagingMessageConverter")]),e._v(" with a "),a("code",[e._v("SimpleKafkaHeaderMapper")]),e._v(", as shown earlier.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("p",[e._v("By default, 3 headers are used:")]),e._v(" "),a("ul",[a("li",[a("p",[a("code",[e._v("KafkaHeaders.CORRELATION_ID")]),e._v(" - used to correlate the reply to a request")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("KafkaHeaders.REPLY_TOPIC")]),e._v(" - used to tell the server where to reply")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("KafkaHeaders.REPLY_PARTITION")]),e._v(" - (optional) used to tell the server which partition to reply to")])])]),e._v(" "),a("p",[e._v("These header names are used by the "),a("code",[e._v("@KafkaListener")]),e._v(" infrastructure to route the reply.")]),e._v(" "),a("p",[e._v("Starting with version 2.3, you can customize the header names - the template has 3 properties "),a("code",[e._v("correlationHeaderName")]),e._v(", "),a("code",[e._v("replyTopicHeaderName")]),e._v(", and "),a("code",[e._v("replyPartitionHeaderName")]),e._v(".\nThis is useful if your server is not a Spring application (or does not use the "),a("code",[e._v("@KafkaListener")]),e._v(").")]),e._v(" "),a("h6",{attrs:{id:"request-reply-with-message-s"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#request-reply-with-message-s"}},[e._v("#")]),e._v(" Request/Reply with "),a("code",[e._v("Message")]),e._v(" s")]),e._v(" "),a("p",[e._v("Version 2.7 added methods to the "),a("code",[e._v("ReplyingKafkaTemplate")]),e._v(" to send and receive "),a("code",[e._v("spring-messaging")]),e._v(" 's "),a("code",[e._v("Message")]),e._v(" abstraction:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("RequestReplyMessageFuture sendAndReceive(Message message);\n\n

RequestReplyTypedMessageFuture sendAndReceive(Message message,\n ParameterizedTypeReference

returnType);\n")])])]),a("p",[e._v("These will use the template’s default "),a("code",[e._v("replyTimeout")]),e._v(", there are also overloaded versions that can take a timeout in the method call.")]),e._v(" "),a("p",[e._v("Use the first method if the consumer’s "),a("code",[e._v("Deserializer")]),e._v(" or the template’s "),a("code",[e._v("MessageConverter")]),e._v(" can convert the payload without any additional information, either via configuration or type metadata in the reply message.")]),e._v(" "),a("p",[e._v("Use the second method if you need to provide type information for the return type, to assist the message converter.\nThis also allows the same template to receive different types, even if there is no type metadata in the replies, such as when the server side is not a Spring application.\nThe following is an example of the latter:")]),e._v(" "),a("p",[e._v("Example 6. Template Bean")]),e._v(" "),a("p",[e._v("Java")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@Bean\nReplyingKafkaTemplate template(\n ProducerFactory pf,\n ConcurrentKafkaListenerContainerFactory factory) {\n\n ConcurrentMessageListenerContainer replyContainer =\n factory.createContainer("replies");\n replyContainer.getContainerProperties().setGroupId("request.replies");\n ReplyingKafkaTemplate template =\n new ReplyingKafkaTemplate<>(pf, replyContainer);\n template.setMessageConverter(new ByteArrayJsonMessageConverter());\n template.setDefaultTopic("requests");\n return template;\n}\n')])])]),a("p",[e._v("Kotlin")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@Bean\nfun template(\n pf: ProducerFactory?,\n factory: ConcurrentKafkaListenerContainerFactory\n): ReplyingKafkaTemplate {\n val replyContainer = factory.createContainer("replies")\n replyContainer.containerProperties.groupId = "request.replies"\n val template = ReplyingKafkaTemplate(pf, replyContainer)\n template.messageConverter = ByteArrayJsonMessageConverter()\n template.defaultTopic = "requests"\n return template\n}\n')])])]),a("p",[e._v("Example 7. Using the template")]),e._v(" "),a("p",[e._v("Java")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('RequestReplyTypedMessageFuture future1 =\n template.sendAndReceive(MessageBuilder.withPayload("getAThing").build(),\n new ParameterizedTypeReference() { });\nlog.info(future1.getSendFuture().get(10, TimeUnit.SECONDS).getRecordMetadata().toString());\nThing thing = future1.get(10, TimeUnit.SECONDS).getPayload();\nlog.info(thing.toString());\n\nRequestReplyTypedMessageFuture> future2 =\n template.sendAndReceive(MessageBuilder.withPayload("getThings").build(),\n new ParameterizedTypeReference>() { });\nlog.info(future2.getSendFuture().get(10, TimeUnit.SECONDS).getRecordMetadata().toString());\nList things = future2.get(10, TimeUnit.SECONDS).getPayload();\nthings.forEach(thing1 -> log.info(thing1.toString()));\n')])])]),a("p",[e._v("Kotlin")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('val future1: RequestReplyTypedMessageFuture? =\n template.sendAndReceive(MessageBuilder.withPayload("getAThing").build(),\n object : ParameterizedTypeReference() {})\nlog.info(future1?.sendFuture?.get(10, TimeUnit.SECONDS)?.recordMetadata?.toString())\nval thing = future1?.get(10, TimeUnit.SECONDS)?.payload\nlog.info(thing.toString())\n\nval future2: RequestReplyTypedMessageFuture?>? =\n template.sendAndReceive(MessageBuilder.withPayload("getThings").build(),\n object : ParameterizedTypeReference?>() {})\nlog.info(future2?.sendFuture?.get(10, TimeUnit.SECONDS)?.recordMetadata.toString())\nval things = future2?.get(10, TimeUnit.SECONDS)?.payload\nthings?.forEach(Consumer { thing1: Thing? -> log.info(thing1.toString()) })\n')])])]),a("h5",{attrs:{id:"reply-type-message"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#reply-type-message"}},[e._v("#")]),e._v(" Reply Type Message")]),e._v(" "),a("p",[e._v("When the "),a("code",[e._v("@KafkaListener")]),e._v(" returns a "),a("code",[e._v("Message")]),e._v(", with versions before 2.5, it was necessary to populate the reply topic and correlation id headers.\nIn this example, we use the reply topic header from the request:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@KafkaListener(id = "requestor", topics = "request")\n@SendTo\npublic Message messageReturn(String in) {\n return MessageBuilder.withPayload(in.toUpperCase())\n .setHeader(KafkaHeaders.TOPIC, replyTo)\n .setHeader(KafkaHeaders.MESSAGE_KEY, 42)\n .setHeader(KafkaHeaders.CORRELATION_ID, correlation)\n .build();\n}\n')])])]),a("p",[e._v("This also shows how to set a key on the reply record.")]),e._v(" "),a("p",[e._v("Starting with version 2.5, the framework will detect if these headers are missing and populate them with the topic - either the topic determined from the "),a("code",[e._v("@SendTo")]),e._v(" value or the incoming "),a("code",[e._v("KafkaHeaders.REPLY_TOPIC")]),e._v(" header (if present).\nIt will also echo the incoming "),a("code",[e._v("KafkaHeaders.CORRELATION_ID")]),e._v(" and "),a("code",[e._v("KafkaHeaders.REPLY_PARTITION")]),e._v(", if present.")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@KafkaListener(id = "requestor", topics = "request")\n@SendTo // default REPLY_TOPIC header\npublic Message messageReturn(String in) {\n return MessageBuilder.withPayload(in.toUpperCase())\n .setHeader(KafkaHeaders.MESSAGE_KEY, 42)\n .build();\n}\n')])])]),a("h5",{attrs:{id:"aggregating-multiple-replies"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#aggregating-multiple-replies"}},[e._v("#")]),e._v(" Aggregating Multiple Replies")]),e._v(" "),a("p",[e._v("The template in "),a("a",{attrs:{href:"#replying-template"}},[e._v("Using "),a("code",[e._v("ReplyingKafkaTemplate")])]),e._v(" is strictly for a single request/reply scenario.\nFor cases where multiple receivers of a single message return a reply, you can use the "),a("code",[e._v("AggregatingReplyingKafkaTemplate")]),e._v(".\nThis is an implementation of the client-side of the "),a("a",{attrs:{href:"https://www.enterpriseintegrationpatterns.com/patterns/messaging/BroadcastAggregate.html",target:"_blank",rel:"noopener noreferrer"}},[e._v("Scatter-Gather Enterprise Integration Pattern"),a("OutboundLink")],1),e._v(".")]),e._v(" "),a("p",[e._v("Like the "),a("code",[e._v("ReplyingKafkaTemplate")]),e._v(", the "),a("code",[e._v("AggregatingReplyingKafkaTemplate")]),e._v(" constructor takes a producer factory and a listener container to receive the replies; it has a third parameter "),a("code",[e._v("BiPredicate>, Boolean> releaseStrategy")]),e._v(" which is consulted each time a reply is received; when the predicate returns "),a("code",[e._v("true")]),e._v(", the collection of "),a("code",[e._v("ConsumerRecord")]),e._v(" s is used to complete the "),a("code",[e._v("Future")]),e._v(" returned by the "),a("code",[e._v("sendAndReceive")]),e._v(" method.")]),e._v(" "),a("p",[e._v("There is an additional property "),a("code",[e._v("returnPartialOnTimeout")]),e._v(" (default false).\nWhen this is set to "),a("code",[e._v("true")]),e._v(", instead of completing the future with a "),a("code",[e._v("KafkaReplyTimeoutException")]),e._v(", a partial result completes the future normally (as long as at least one reply record has been received).")]),e._v(" "),a("p",[e._v("Starting with version 2.3.5, the predicate is also called after a timeout (if "),a("code",[e._v("returnPartialOnTimeout")]),e._v(" is "),a("code",[e._v("true")]),e._v(").\nThe first argument is the current list of records; the second is "),a("code",[e._v("true")]),e._v(" if this call is due to a timeout.\nThe predicate can modify the list of records.")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("AggregatingReplyingKafkaTemplate template =\n new AggregatingReplyingKafkaTemplate<>(producerFactory, container,\n coll -> coll.size() == releaseSize);\n...\nRequestReplyFuture>> future =\n template.sendAndReceive(record);\nfuture.getSendFuture().get(10, TimeUnit.SECONDS); // send ok\nConsumerRecord>> consumerRecord =\n future.get(30, TimeUnit.SECONDS);\n")])])]),a("p",[e._v("Notice that the return type is a "),a("code",[e._v("ConsumerRecord")]),e._v(" with a value that is a collection of "),a("code",[e._v("ConsumerRecord")]),e._v(' s.\nThe "outer" '),a("code",[e._v("ConsumerRecord")]),e._v(' is not a "real" record, it is synthesized by the template, as a holder for the actual reply records received for the request.\nWhen a normal release occurs (release strategy returns true), the topic is set to '),a("code",[e._v("aggregatedResults")]),e._v("; if "),a("code",[e._v("returnPartialOnTimeout")]),e._v(" is true, and timeout occurs (and at least one reply record has been received), the topic is set to "),a("code",[e._v("partialResultsAfterTimeout")]),e._v('.\nThe template provides constant static variables for these "topic" names:')]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('/**\n * Pseudo topic name for the "outer" {@link ConsumerRecords} that has the aggregated\n * results in its value after a normal release by the release strategy.\n */\npublic static final String AGGREGATED_RESULTS_TOPIC = "aggregatedResults";\n\n/**\n * Pseudo topic name for the "outer" {@link ConsumerRecords} that has the aggregated\n * results in its value after a timeout.\n */\npublic static final String PARTIAL_RESULTS_AFTER_TIMEOUT_TOPIC = "partialResultsAfterTimeout";\n')])])]),a("p",[e._v("The real "),a("code",[e._v("ConsumerRecord")]),e._v(" s in the "),a("code",[e._v("Collection")]),e._v(" contain the actual topic(s) from which the replies are received.")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("The listener container for the replies MUST be configured with "),a("code",[e._v("AckMode.MANUAL")]),e._v(" or "),a("code",[e._v("AckMode.MANUAL_IMMEDIATE")]),e._v("; the consumer property "),a("code",[e._v("enable.auto.commit")]),e._v(" must be "),a("code",[e._v("false")]),e._v(" (the default since version 2.3)."),a("br"),e._v("To avoid any possibility of losing messages, the template only commits offsets when there are zero requests outstanding, i.e. when the last outstanding request is released by the release strategy."),a("br"),e._v("After a rebalance, it is possible for duplicate reply deliveries; these will be ignored for any in-flight requests; you may see error log messages when duplicate replies are received for already released replies.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("If you use an "),a("a",{attrs:{href:"#error-handling-deserializer"}},[a("code",[e._v("ErrorHandlingDeserializer")])]),e._v(" with this aggregating template, the framework will not automatically detect "),a("code",[e._v("DeserializationException")]),e._v(" s."),a("br"),e._v("Instead, the record (with a "),a("code",[e._v("null")]),e._v(" value) will be returned intact, with the deserialization exception(s) in headers."),a("br"),e._v("It is recommended that applications call the utility method "),a("code",[e._v("ReplyingKafkaTemplate.checkDeserialization()")]),e._v(" method to determine if a deserialization exception occurred."),a("br"),e._v("See its javadocs for more information."),a("br"),e._v("The "),a("code",[e._v("replyErrorChecker")]),e._v(" is also not called for this aggregating template; you should perform the checks on each element of the reply.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("h4",{attrs:{id:"_4-1-4-receiving-messages"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_4-1-4-receiving-messages"}},[e._v("#")]),e._v(" 4.1.4. Receiving Messages")]),e._v(" "),a("p",[e._v("You can receive messages by configuring a "),a("code",[e._v("MessageListenerContainer")]),e._v(" and providing a message listener or by using the "),a("code",[e._v("@KafkaListener")]),e._v(" annotation.")]),e._v(" "),a("h5",{attrs:{id:"message-listeners"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#message-listeners"}},[e._v("#")]),e._v(" Message Listeners")]),e._v(" "),a("p",[e._v("When you use a "),a("a",{attrs:{href:"#message-listener-container"}},[e._v("message listener container")]),e._v(", you must provide a listener to receive data.\nThere are currently eight supported interfaces for message listeners.\nThe following listing shows these interfaces:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("public interface MessageListener { (1)\n\n void onMessage(ConsumerRecord data);\n\n}\n\npublic interface AcknowledgingMessageListener { (2)\n\n void onMessage(ConsumerRecord data, Acknowledgment acknowledgment);\n\n}\n\npublic interface ConsumerAwareMessageListener extends MessageListener { (3)\n\n void onMessage(ConsumerRecord data, Consumer consumer);\n\n}\n\npublic interface AcknowledgingConsumerAwareMessageListener extends MessageListener { (4)\n\n void onMessage(ConsumerRecord data, Acknowledgment acknowledgment, Consumer consumer);\n\n}\n\npublic interface BatchMessageListener { (5)\n\n void onMessage(List> data);\n\n}\n\npublic interface BatchAcknowledgingMessageListener { (6)\n\n void onMessage(List> data, Acknowledgment acknowledgment);\n\n}\n\npublic interface BatchConsumerAwareMessageListener extends BatchMessageListener { (7)\n\n void onMessage(List> data, Consumer consumer);\n\n}\n\npublic interface BatchAcknowledgingConsumerAwareMessageListener extends BatchMessageListener { (8)\n\n void onMessage(List> data, Acknowledgment acknowledgment, Consumer consumer);\n\n}\n")])])]),a("table",[a("thead",[a("tr",[a("th",[a("strong",[e._v("1")])]),e._v(" "),a("th",[e._v("Use this interface for processing individual "),a("code",[e._v("ConsumerRecord")]),e._v(" instances received from the Kafka consumer "),a("code",[e._v("poll()")]),e._v(" operation when using auto-commit or one of the container-managed "),a("a",{attrs:{href:"#committing-offsets"}},[e._v("commit methods")]),e._v(".")])])]),e._v(" "),a("tbody",[a("tr",[a("td",[a("strong",[e._v("2")])]),e._v(" "),a("td",[e._v("Use this interface for processing individual "),a("code",[e._v("ConsumerRecord")]),e._v(" instances received from the Kafka consumer "),a("code",[e._v("poll()")]),e._v(" operation when using one of the manual "),a("a",{attrs:{href:"#committing-offsets"}},[e._v("commit methods")]),e._v(".")])]),e._v(" "),a("tr",[a("td",[a("strong",[e._v("3")])]),e._v(" "),a("td",[e._v("Use this interface for processing individual "),a("code",[e._v("ConsumerRecord")]),e._v(" instances received from the Kafka consumer "),a("code",[e._v("poll()")]),e._v(" operation when using auto-commit or one of the container-managed "),a("a",{attrs:{href:"#committing-offsets"}},[e._v("commit methods")]),e._v("."),a("br"),e._v("Access to the "),a("code",[e._v("Consumer")]),e._v(" object is provided.")])]),e._v(" "),a("tr",[a("td",[a("strong",[e._v("4")])]),e._v(" "),a("td",[e._v("Use this interface for processing individual "),a("code",[e._v("ConsumerRecord")]),e._v(" instances received from the Kafka consumer "),a("code",[e._v("poll()")]),e._v(" operation when using one of the manual "),a("a",{attrs:{href:"#committing-offsets"}},[e._v("commit methods")]),e._v("."),a("br"),e._v("Access to the "),a("code",[e._v("Consumer")]),e._v(" object is provided.")])]),e._v(" "),a("tr",[a("td",[a("strong",[e._v("5")])]),e._v(" "),a("td",[e._v("Use this interface for processing all "),a("code",[e._v("ConsumerRecord")]),e._v(" instances received from the Kafka consumer "),a("code",[e._v("poll()")]),e._v(" operation when using auto-commit or one of the container-managed "),a("a",{attrs:{href:"#committing-offsets"}},[e._v("commit methods")]),e._v("."),a("code",[e._v("AckMode.RECORD")]),e._v(" is not supported when you use this interface, since the listener is given the complete batch.")])]),e._v(" "),a("tr",[a("td",[a("strong",[e._v("6")])]),e._v(" "),a("td",[e._v("Use this interface for processing all "),a("code",[e._v("ConsumerRecord")]),e._v(" instances received from the Kafka consumer "),a("code",[e._v("poll()")]),e._v(" operation when using one of the manual "),a("a",{attrs:{href:"#committing-offsets"}},[e._v("commit methods")]),e._v(".")])]),e._v(" "),a("tr",[a("td",[a("strong",[e._v("7")])]),e._v(" "),a("td",[e._v("Use this interface for processing all "),a("code",[e._v("ConsumerRecord")]),e._v(" instances received from the Kafka consumer "),a("code",[e._v("poll()")]),e._v(" operation when using auto-commit or one of the container-managed "),a("a",{attrs:{href:"#committing-offsets"}},[e._v("commit methods")]),e._v("."),a("code",[e._v("AckMode.RECORD")]),e._v(" is not supported when you use this interface, since the listener is given the complete batch."),a("br"),e._v("Access to the "),a("code",[e._v("Consumer")]),e._v(" object is provided.")])]),e._v(" "),a("tr",[a("td",[a("strong",[e._v("8")])]),e._v(" "),a("td",[e._v("Use this interface for processing all "),a("code",[e._v("ConsumerRecord")]),e._v(" instances received from the Kafka consumer "),a("code",[e._v("poll()")]),e._v(" operation when using one of the manual "),a("a",{attrs:{href:"#committing-offsets"}},[e._v("commit methods")]),e._v("."),a("br"),e._v("Access to the "),a("code",[e._v("Consumer")]),e._v(" object is provided.")])])])]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("The "),a("code",[e._v("Consumer")]),e._v(" object is not thread-safe."),a("br"),e._v("You must only invoke its methods on the thread that calls the listener.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("You should not execute any "),a("code",[e._v("Consumer")]),e._v(" methods that affect the consumer’s positions and or committed offsets in your listener; the container needs to manage such information.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("h5",{attrs:{id:"message-listener-containers"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#message-listener-containers"}},[e._v("#")]),e._v(" Message Listener Containers")]),e._v(" "),a("p",[e._v("Two "),a("code",[e._v("MessageListenerContainer")]),e._v(" implementations are provided:")]),e._v(" "),a("ul",[a("li",[a("p",[a("code",[e._v("KafkaMessageListenerContainer")])])]),e._v(" "),a("li",[a("p",[a("code",[e._v("ConcurrentMessageListenerContainer")])])])]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("KafkaMessageListenerContainer")]),e._v(" receives all message from all topics or partitions on a single thread.\nThe "),a("code",[e._v("ConcurrentMessageListenerContainer")]),e._v(" delegates to one or more "),a("code",[e._v("KafkaMessageListenerContainer")]),e._v(" instances to provide multi-threaded consumption.")]),e._v(" "),a("p",[e._v("Starting with version 2.2.7, you can add a "),a("code",[e._v("RecordInterceptor")]),e._v(" to the listener container; it will be invoked before calling the listener allowing inspection or modification of the record.\nIf the interceptor returns null, the listener is not called.\nStarting with version 2.7, it has additional methods which are called after the listener exits (normally, or by throwing an exception).\nAlso, starting with version 2.7, there is now a "),a("code",[e._v("BatchInterceptor")]),e._v(", providing similar functionality for "),a("a",{attrs:{href:"#batch-listeners"}},[e._v("Batch Listeners")]),e._v(".\nIn addition, the "),a("code",[e._v("ConsumerAwareRecordInterceptor")]),e._v(" (and "),a("code",[e._v("BatchInterceptor")]),e._v(") provide access to the "),a("code",[e._v("Consumer")]),e._v(".\nThis might be used, for example, to access the consumer metrics in the interceptor.")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("You should not execute any methods that affect the consumer’s positions and or committed offsets in these interceptors; the container needs to manage such information.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("CompositeRecordInterceptor")]),e._v(" and "),a("code",[e._v("CompositeBatchInterceptor")]),e._v(" can be used to invoke multiple interceptors.")]),e._v(" "),a("p",[e._v("By default, starting with version 2.8, when using transactions, the interceptor is invoked before the transaction has started.\nYou can set the listener container’s "),a("code",[e._v("interceptBeforeTx")]),e._v(" property to "),a("code",[e._v("false")]),e._v(" to invoke the interceptor after the transaction has started instead.")]),e._v(" "),a("p",[e._v("Starting with versions 2.3.8, 2.4.6, the "),a("code",[e._v("ConcurrentMessageListenerContainer")]),e._v(" now supports "),a("a",{attrs:{href:"https://kafka.apache.org/documentation/#static_membership",target:"_blank",rel:"noopener noreferrer"}},[e._v("Static Membership"),a("OutboundLink")],1),e._v(" when the concurrency is greater than one.\nThe "),a("code",[e._v("group.instance.id")]),e._v(" is suffixed with "),a("code",[e._v("-n")]),e._v(" with "),a("code",[e._v("n")]),e._v(" starting at "),a("code",[e._v("1")]),e._v(".\nThis, together with an increased "),a("code",[e._v("session.timeout.ms")]),e._v(", can be used to reduce rebalance events, for example, when application instances are restarted.")]),e._v(" "),a("h6",{attrs:{id:"using-kafkamessagelistenercontainer"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#using-kafkamessagelistenercontainer"}},[e._v("#")]),e._v(" Using "),a("code",[e._v("KafkaMessageListenerContainer")])]),e._v(" "),a("p",[e._v("The following constructor is available:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("public KafkaMessageListenerContainer(ConsumerFactory consumerFactory,\n ContainerProperties containerProperties)\n")])])]),a("p",[e._v("It receives a "),a("code",[e._v("ConsumerFactory")]),e._v(" and information about topics and partitions, as well as other configuration, in a "),a("code",[e._v("ContainerProperties")]),e._v("object."),a("code",[e._v("ContainerProperties")]),e._v(" has the following constructors:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("public ContainerProperties(TopicPartitionOffset... topicPartitions)\n\npublic ContainerProperties(String... topics)\n\npublic ContainerProperties(Pattern topicPattern)\n")])])]),a("p",[e._v("The first constructor takes an array of "),a("code",[e._v("TopicPartitionOffset")]),e._v(" arguments to explicitly instruct the container about which partitions to use (using the consumer "),a("code",[e._v("assign()")]),e._v(" method) and with an optional initial offset.\nA positive value is an absolute offset by default.\nA negative value is relative to the current last offset within a partition by default.\nA constructor for "),a("code",[e._v("TopicPartitionOffset")]),e._v(" that takes an additional "),a("code",[e._v("boolean")]),e._v(" argument is provided.\nIf this is "),a("code",[e._v("true")]),e._v(", the initial offsets (positive or negative) are relative to the current position for this consumer.\nThe offsets are applied when the container is started.\nThe second takes an array of topics, and Kafka allocates the partitions based on the "),a("code",[e._v("group.id")]),e._v(" property — distributing partitions across the group.\nThe third uses a regex "),a("code",[e._v("Pattern")]),e._v(" to select the topics.")]),e._v(" "),a("p",[e._v("To assign a "),a("code",[e._v("MessageListener")]),e._v(" to a container, you can use the "),a("code",[e._v("ContainerProps.setMessageListener")]),e._v(" method when creating the Container.\nThe following example shows how to do so:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('ContainerProperties containerProps = new ContainerProperties("topic1", "topic2");\ncontainerProps.setMessageListener(new MessageListener() {\n ...\n});\nDefaultKafkaConsumerFactory cf =\n new DefaultKafkaConsumerFactory<>(consumerProps());\nKafkaMessageListenerContainer container =\n new KafkaMessageListenerContainer<>(cf, containerProps);\nreturn container;\n')])])]),a("p",[e._v("Note that when creating a "),a("code",[e._v("DefaultKafkaConsumerFactory")]),e._v(", using the constructor that just takes in the properties as above means that key and value "),a("code",[e._v("Deserializer")]),e._v(" classes are picked up from configuration.\nAlternatively, "),a("code",[e._v("Deserializer")]),e._v(" instances may be passed to the "),a("code",[e._v("DefaultKafkaConsumerFactory")]),e._v(" constructor for key and/or value, in which case all Consumers share the same instances.\nAnother option is to provide "),a("code",[e._v("Supplier")]),e._v(" s (starting with version 2.3) that will be used to obtain separate "),a("code",[e._v("Deserializer")]),e._v(" instances for each "),a("code",[e._v("Consumer")]),e._v(":")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("DefaultKafkaConsumerFactory cf =\n new DefaultKafkaConsumerFactory<>(consumerProps(), null, () -> new CustomValueDeserializer());\nKafkaMessageListenerContainer container =\n new KafkaMessageListenerContainer<>(cf, containerProps);\nreturn container;\n")])])]),a("p",[e._v("Refer to the "),a("a",{attrs:{href:"https://docs.spring.io/spring-kafka/api/org/springframework/kafka/listener/ContainerProperties.html",target:"_blank",rel:"noopener noreferrer"}},[e._v("Javadoc"),a("OutboundLink")],1),e._v(" for "),a("code",[e._v("ContainerProperties")]),e._v(" for more information about the various properties that you can set.")]),e._v(" "),a("p",[e._v("Since version 2.1.1, a new property called "),a("code",[e._v("logContainerConfig")]),e._v(" is available.\nWhen "),a("code",[e._v("true")]),e._v(" and "),a("code",[e._v("INFO")]),e._v(" logging is enabled each listener container writes a log message summarizing its configuration properties.")]),e._v(" "),a("p",[e._v("By default, logging of topic offset commits is performed at the "),a("code",[e._v("DEBUG")]),e._v(" logging level.\nStarting with version 2.1.2, a property in "),a("code",[e._v("ContainerProperties")]),e._v(" called "),a("code",[e._v("commitLogLevel")]),e._v(" lets you specify the log level for these messages.\nFor example, to change the log level to "),a("code",[e._v("INFO")]),e._v(", you can use "),a("code",[e._v("containerProperties.setCommitLogLevel(LogIfLevelEnabled.Level.INFO);")]),e._v(".")]),e._v(" "),a("p",[e._v("Starting with version 2.2, a new container property called "),a("code",[e._v("missingTopicsFatal")]),e._v(" has been added (default: "),a("code",[e._v("false")]),e._v(" since 2.3.4).\nThis prevents the container from starting if any of the configured topics are not present on the broker.\nIt does not apply if the container is configured to listen to a topic pattern (regex).\nPreviously, the container threads looped within the "),a("code",[e._v("consumer.poll()")]),e._v(" method waiting for the topic to appear while logging many messages.\nAside from the logs, there was no indication that there was a problem.")]),e._v(" "),a("p",[e._v("As of version 2.8, a new container property "),a("code",[e._v("authExceptionRetryInterval")]),e._v(" has been introduced.\nThis causes the container to retry fetching messages after getting any "),a("code",[e._v("AuthenticationException")]),e._v(" or "),a("code",[e._v("AuthorizationException")]),e._v(" from the "),a("code",[e._v("KafkaConsumer")]),e._v(".\nThis can happen when, for example, the configured user is denied access to read a certain topic or credentials are incorrect.\nDefining "),a("code",[e._v("authExceptionRetryInterval")]),e._v(" allows the container to recover when proper permissions are granted.")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("By default, no interval is configured - authentication and authorization errors are considered fatal, which causes the container to stop.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("p",[e._v("Starting with version 2.8, when creating the consumer factory, if you provide deserializers as objects (in the constructor or via the setters), the factory will invoke the "),a("code",[e._v("configure()")]),e._v(" method to configure them with the configuration properties.")]),e._v(" "),a("h6",{attrs:{id:"using-concurrentmessagelistenercontainer"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#using-concurrentmessagelistenercontainer"}},[e._v("#")]),e._v(" Using "),a("code",[e._v("ConcurrentMessageListenerContainer")])]),e._v(" "),a("p",[e._v("The single constructor is similar to the "),a("code",[e._v("KafkaListenerContainer")]),e._v(" constructor.\nThe following listing shows the constructor’s signature:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("public ConcurrentMessageListenerContainer(ConsumerFactory consumerFactory,\n ContainerProperties containerProperties)\n")])])]),a("p",[e._v("It also has a "),a("code",[e._v("concurrency")]),e._v(" property.\nFor example, "),a("code",[e._v("container.setConcurrency(3)")]),e._v(" creates three "),a("code",[e._v("KafkaMessageListenerContainer")]),e._v(" instances.")]),e._v(" "),a("p",[e._v("For the first constructor, Kafka distributes the partitions across the consumers using its group management capabilities.")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("When listening to multiple topics, the default partition distribution may not be what you expect."),a("br"),e._v("For example, if you have three topics with five partitions each and you want to use "),a("code",[e._v("concurrency=15")]),e._v(", you see only five active consumers, each assigned one partition from each topic, with the other 10 consumers being idle."),a("br"),e._v("This is because the default Kafka "),a("code",[e._v("PartitionAssignor")]),e._v(" is the "),a("code",[e._v("RangeAssignor")]),e._v(" (see its Javadoc)."),a("br"),e._v("For this scenario, you may want to consider using the "),a("code",[e._v("RoundRobinAssignor")]),e._v(" instead, which distributes the partitions across all of the consumers."),a("br"),e._v("Then, each consumer is assigned one topic or partition."),a("br"),e._v("To change the "),a("code",[e._v("PartitionAssignor")]),e._v(", you can set the "),a("code",[e._v("partition.assignment.strategy")]),e._v(" consumer property ("),a("code",[e._v("ConsumerConfigs.PARTITION_ASSIGNMENT_STRATEGY_CONFIG")]),e._v(") in the properties provided to the "),a("code",[e._v("DefaultKafkaConsumerFactory")]),e._v("."),a("br"),a("br"),e._v("When using Spring Boot, you can assign set the strategy as follows:"),a("br"),a("br"),a("code",[e._v("
spring.kafka.consumer.properties.partition.assignment.strategy=\\
org.apache.kafka.clients.consumer.RoundRobinAssignor
")])])])]),e._v(" "),a("tbody")]),e._v(" "),a("p",[e._v("When the container properties are configured with "),a("code",[e._v("TopicPartitionOffset")]),e._v(" s, the "),a("code",[e._v("ConcurrentMessageListenerContainer")]),e._v(" distributes the "),a("code",[e._v("TopicPartitionOffset")]),e._v(" instances across the delegate "),a("code",[e._v("KafkaMessageListenerContainer")]),e._v(" instances.")]),e._v(" "),a("p",[e._v("If, say, six "),a("code",[e._v("TopicPartitionOffset")]),e._v(" instances are provided and the "),a("code",[e._v("concurrency")]),e._v(" is "),a("code",[e._v("3")]),e._v("; each container gets two partitions.\nFor five "),a("code",[e._v("TopicPartitionOffset")]),e._v(" instances, two containers get two partitions, and the third gets one.\nIf the "),a("code",[e._v("concurrency")]),e._v(" is greater than the number of "),a("code",[e._v("TopicPartitions")]),e._v(", the "),a("code",[e._v("concurrency")]),e._v(" is adjusted down such that each container gets one partition.")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("The "),a("code",[e._v("client.id")]),e._v(" property (if set) is appended with "),a("code",[e._v("-n")]),e._v(" where "),a("code",[e._v("n")]),e._v(" is the consumer instance that corresponds to the concurrency."),a("br"),e._v("This is required to provide unique names for MBeans when JMX is enabled.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("p",[e._v("Starting with version 1.3, the "),a("code",[e._v("MessageListenerContainer")]),e._v(" provides access to the metrics of the underlying "),a("code",[e._v("KafkaConsumer")]),e._v(".\nIn the case of "),a("code",[e._v("ConcurrentMessageListenerContainer")]),e._v(", the "),a("code",[e._v("metrics()")]),e._v(" method returns the metrics for all the target "),a("code",[e._v("KafkaMessageListenerContainer")]),e._v(" instances.\nThe metrics are grouped into the "),a("code",[e._v("Map")]),e._v(" by the "),a("code",[e._v("client-id")]),e._v(" provided for the underlying "),a("code",[e._v("KafkaConsumer")]),e._v(".")]),e._v(" "),a("p",[e._v("Starting with version 2.3, the "),a("code",[e._v("ContainerProperties")]),e._v(" provides an "),a("code",[e._v("idleBetweenPolls")]),e._v(" option to let the main loop in the listener container to sleep between "),a("code",[e._v("KafkaConsumer.poll()")]),e._v(" calls.\nAn actual sleep interval is selected as the minimum from the provided option and difference between the "),a("code",[e._v("max.poll.interval.ms")]),e._v(" consumer config and the current records batch processing time.")]),e._v(" "),a("h6",{attrs:{id:"committing-offsets"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#committing-offsets"}},[e._v("#")]),e._v(" Committing Offsets")]),e._v(" "),a("p",[e._v("Several options are provided for committing offsets.\nIf the "),a("code",[e._v("enable.auto.commit")]),e._v(" consumer property is "),a("code",[e._v("true")]),e._v(", Kafka auto-commits the offsets according to its configuration.\nIf it is "),a("code",[e._v("false")]),e._v(", the containers support several "),a("code",[e._v("AckMode")]),e._v(" settings (described in the next list).\nThe default "),a("code",[e._v("AckMode")]),e._v(" is "),a("code",[e._v("BATCH")]),e._v(".\nStarting with version 2.3, the framework sets "),a("code",[e._v("enable.auto.commit")]),e._v(" to "),a("code",[e._v("false")]),e._v(" unless explicitly set in the configuration.\nPreviously, the Kafka default ("),a("code",[e._v("true")]),e._v(") was used if the property was not set.")]),e._v(" "),a("p",[e._v("The consumer "),a("code",[e._v("poll()")]),e._v(" method returns one or more "),a("code",[e._v("ConsumerRecords")]),e._v(".\nThe "),a("code",[e._v("MessageListener")]),e._v(" is called for each record.\nThe following lists describes the action taken by the container for each "),a("code",[e._v("AckMode")]),e._v(" (when transactions are not being used):")]),e._v(" "),a("ul",[a("li",[a("p",[a("code",[e._v("RECORD")]),e._v(": Commit the offset when the listener returns after processing the record.")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("BATCH")]),e._v(": Commit the offset when all the records returned by the "),a("code",[e._v("poll()")]),e._v(" have been processed.")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("TIME")]),e._v(": Commit the offset when all the records returned by the "),a("code",[e._v("poll()")]),e._v(" have been processed, as long as the "),a("code",[e._v("ackTime")]),e._v(" since the last commit has been exceeded.")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("COUNT")]),e._v(": Commit the offset when all the records returned by the "),a("code",[e._v("poll()")]),e._v(" have been processed, as long as "),a("code",[e._v("ackCount")]),e._v(" records have been received since the last commit.")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("COUNT_TIME")]),e._v(": Similar to "),a("code",[e._v("TIME")]),e._v(" and "),a("code",[e._v("COUNT")]),e._v(", but the commit is performed if either condition is "),a("code",[e._v("true")]),e._v(".")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("MANUAL")]),e._v(": The message listener is responsible to "),a("code",[e._v("acknowledge()")]),e._v(" the "),a("code",[e._v("Acknowledgment")]),e._v(".\nAfter that, the same semantics as "),a("code",[e._v("BATCH")]),e._v(" are applied.")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("MANUAL_IMMEDIATE")]),e._v(": Commit the offset immediately when the "),a("code",[e._v("Acknowledgment.acknowledge()")]),e._v(" method is called by the listener.")])])]),e._v(" "),a("p",[e._v("When using "),a("a",{attrs:{href:"#transactions"}},[e._v("transactions")]),e._v(", the offset(s) are sent to the transaction and the semantics are equivalent to "),a("code",[e._v("RECORD")]),e._v(" or "),a("code",[e._v("BATCH")]),e._v(", depending on the listener type (record or batch).")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[a("code",[e._v("MANUAL")]),e._v(", and "),a("code",[e._v("MANUAL_IMMEDIATE")]),e._v(" require the listener to be an "),a("code",[e._v("AcknowledgingMessageListener")]),e._v(" or a "),a("code",[e._v("BatchAcknowledgingMessageListener")]),e._v("."),a("br"),e._v("See "),a("a",{attrs:{href:"#message-listeners"}},[e._v("Message Listeners")]),e._v(".")])])]),e._v(" "),a("tbody")]),e._v(" "),a("p",[e._v("Depending on the "),a("code",[e._v("syncCommits")]),e._v(" container property, the "),a("code",[e._v("commitSync()")]),e._v(" or "),a("code",[e._v("commitAsync()")]),e._v(" method on the consumer is used."),a("code",[e._v("syncCommits")]),e._v(" is "),a("code",[e._v("true")]),e._v(" by default; also see "),a("code",[e._v("setSyncCommitTimeout")]),e._v(".\nSee "),a("code",[e._v("setCommitCallback")]),e._v(" to get the results of asynchronous commits; the default callback is the "),a("code",[e._v("LoggingCommitCallback")]),e._v(" which logs errors (and successes at debug level).")]),e._v(" "),a("p",[e._v("Because the listener container has it’s own mechanism for committing offsets, it prefers the Kafka "),a("code",[e._v("ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG")]),e._v(" to be "),a("code",[e._v("false")]),e._v(".\nStarting with version 2.3, it unconditionally sets it to false unless specifically set in the consumer factory or the container’s consumer property overrides.")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("Acknowledgment")]),e._v(" has the following method:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("public interface Acknowledgment {\n\n void acknowledge();\n\n}\n")])])]),a("p",[e._v("This method gives the listener control over when offsets are committed.")]),e._v(" "),a("p",[e._v("Starting with version 2.3, the "),a("code",[e._v("Acknowledgment")]),e._v(" interface has two additional methods "),a("code",[e._v("nack(long sleep)")]),e._v(" and "),a("code",[e._v("nack(int index, long sleep)")]),e._v(".\nThe first one is used with a record listener, the second with a batch listener.\nCalling the wrong method for your listener type will throw an "),a("code",[e._v("IllegalStateException")]),e._v(".")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("If you want to commit a partial batch, using "),a("code",[e._v("nack()")]),e._v(", When using transactions, set the "),a("code",[e._v("AckMode")]),e._v(" to "),a("code",[e._v("MANUAL")]),e._v("; invoking "),a("code",[e._v("nack()")]),e._v(" will send the offsets of the successfully processed records to the transaction.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[a("code",[e._v("nack()")]),e._v(" can only be called on the consumer thread that invokes your listener.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("p",[e._v("With a record listener, when "),a("code",[e._v("nack()")]),e._v(" is called, any pending offsets are committed, the remaing records from the last poll are discarded, and seeks are performed on their partitions so that the failed record and unprocessed records are redelivered on the next "),a("code",[e._v("poll()")]),e._v(".\nThe consumer thread can be paused before redelivery, by setting the "),a("code",[e._v("sleep")]),e._v(" argument.\nThis is similar functionality to throwing an exception when the container is configured with a "),a("code",[e._v("DefaultErrorHandler")]),e._v(".")]),e._v(" "),a("p",[e._v("When using a batch listener, you can specify the index within the batch where the failure occurred.\nWhen "),a("code",[e._v("nack()")]),e._v(" is called, offsets will be committed for records before the index and seeks are performed on the partitions for the failed and discarded records so that they will be redelivered on the next "),a("code",[e._v("poll()")]),e._v(".")]),e._v(" "),a("p",[e._v("See "),a("a",{attrs:{href:"#error-handlers"}},[e._v("Container Error Handlers")]),e._v(" for more information.")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("When using partition assignment via group management, it is important to ensure the "),a("code",[e._v("sleep")]),e._v(" argument (plus the time spent processing records from the previous poll) is less than the consumer "),a("code",[e._v("max.poll.interval.ms")]),e._v(" property.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("h6",{attrs:{id:"listener-container-auto-startup"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#listener-container-auto-startup"}},[e._v("#")]),e._v(" Listener Container Auto Startup")]),e._v(" "),a("p",[e._v("The listener containers implement "),a("code",[e._v("SmartLifecycle")]),e._v(", and "),a("code",[e._v("autoStartup")]),e._v(" is "),a("code",[e._v("true")]),e._v(" by default.\nThe containers are started in a late phase ("),a("code",[e._v("Integer.MAX-VALUE - 100")]),e._v(").\nOther components that implement "),a("code",[e._v("SmartLifecycle")]),e._v(", to handle data from listeners, should be started in an earlier phase.\nThe "),a("code",[e._v("- 100")]),e._v(" leaves room for later phases to enable components to be auto-started after the containers.")]),e._v(" "),a("h5",{attrs:{id:"manually-committing-offsets"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#manually-committing-offsets"}},[e._v("#")]),e._v(" Manually Committing Offsets")]),e._v(" "),a("p",[e._v("Normally, when using "),a("code",[e._v("AckMode.MANUAL")]),e._v(" or "),a("code",[e._v("AckMode.MANUAL_IMMEDIATE")]),e._v(", the acknowledgments must be acknowledged in order, because Kafka does not maintain state for each record, only a committed offset for each group/partition.\nStarting with version 2.8, you can now set the container property "),a("code",[e._v("asyncAcks")]),e._v(", which allows the acknowledgments for records returned by the poll to be acknowledged in any order.\nThe listener container will defer the out-of-order commits until the missing acknowledgments are received.\nThe consumer will be paused (no new records delivered) until all the offsets for the previous poll have been committed.")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("While this feature allows applications to process records asynchronously, it should be understood that it increases the possibility of duplicate deliveries after a failure.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("h5",{attrs:{id:"kafkalistener-annotation"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#kafkalistener-annotation"}},[e._v("#")]),e._v(" "),a("code",[e._v("@KafkaListener")]),e._v(" Annotation")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("@KafkaListener")]),e._v(" annotation is used to designate a bean method as a listener for a listener container.\nThe bean is wrapped in a "),a("code",[e._v("MessagingMessageListenerAdapter")]),e._v(" configured with various features, such as converters to convert the data, if necessary, to match the method parameters.")]),e._v(" "),a("p",[e._v("You can configure most attributes on the annotation with SpEL by using "),a("code",[e._v("#{…​}")]),e._v(" or property placeholders ("),a("code",[e._v("${…​}")]),e._v(").\nSee the "),a("a",{attrs:{href:"https://docs.spring.io/spring-kafka/api/org/springframework/kafka/annotation/KafkaListener.html",target:"_blank",rel:"noopener noreferrer"}},[e._v("Javadoc"),a("OutboundLink")],1),e._v(" for more information.")]),e._v(" "),a("h6",{attrs:{id:"record-listeners"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#record-listeners"}},[e._v("#")]),e._v(" Record Listeners")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("@KafkaListener")]),e._v(" annotation provides a mechanism for simple POJO listeners.\nThe following example shows how to use it:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('public class Listener {\n\n @KafkaListener(id = "foo", topics = "myTopic", clientIdPrefix = "myClientId")\n public void listen(String data) {\n ...\n }\n\n}\n')])])]),a("p",[e._v("This mechanism requires an "),a("code",[e._v("@EnableKafka")]),e._v(" annotation on one of your "),a("code",[e._v("@Configuration")]),e._v(" classes and a listener container factory, which is used to configure the underlying "),a("code",[e._v("ConcurrentMessageListenerContainer")]),e._v(".\nBy default, a bean with name "),a("code",[e._v("kafkaListenerContainerFactory")]),e._v(" is expected.\nThe following example shows how to use "),a("code",[e._v("ConcurrentMessageListenerContainer")]),e._v(":")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("@Configuration\n@EnableKafka\npublic class KafkaConfig {\n\n @Bean\n KafkaListenerContainerFactory>\n kafkaListenerContainerFactory() {\n ConcurrentKafkaListenerContainerFactory factory =\n new ConcurrentKafkaListenerContainerFactory<>();\n factory.setConsumerFactory(consumerFactory());\n factory.setConcurrency(3);\n factory.getContainerProperties().setPollTimeout(3000);\n return factory;\n }\n\n @Bean\n public ConsumerFactory consumerFactory() {\n return new DefaultKafkaConsumerFactory<>(consumerConfigs());\n }\n\n @Bean\n public Map consumerConfigs() {\n Map props = new HashMap<>();\n props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, embeddedKafka.getBrokersAsString());\n ...\n return props;\n }\n}\n")])])]),a("p",[e._v("Notice that, to set container properties, you must use the "),a("code",[e._v("getContainerProperties()")]),e._v(" method on the factory.\nIt is used as a template for the actual properties injected into the container.")]),e._v(" "),a("p",[e._v("Starting with version 2.1.1, you can now set the "),a("code",[e._v("client.id")]),e._v(" property for consumers created by the annotation.\nThe "),a("code",[e._v("clientIdPrefix")]),e._v(" is suffixed with "),a("code",[e._v("-n")]),e._v(", where "),a("code",[e._v("n")]),e._v(" is an integer representing the container number when using concurrency.")]),e._v(" "),a("p",[e._v("Starting with version 2.2, you can now override the container factory’s "),a("code",[e._v("concurrency")]),e._v(" and "),a("code",[e._v("autoStartup")]),e._v(" properties by using properties on the annotation itself.\nThe properties can be simple values, property placeholders, or SpEL expressions.\nThe following example shows how to do so:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@KafkaListener(id = "myListener", topics = "myTopic",\n autoStartup = "${listen.auto.start:true}", concurrency = "${listen.concurrency:3}")\npublic void listen(String data) {\n ...\n}\n')])])]),a("h6",{attrs:{id:"explicit-partition-assignment"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#explicit-partition-assignment"}},[e._v("#")]),e._v(" Explicit Partition Assignment")]),e._v(" "),a("p",[e._v("You can also configure POJO listeners with explicit topics and partitions (and, optionally, their initial offsets).\nThe following example shows how to do so:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@KafkaListener(id = "thing2", topicPartitions =\n { @TopicPartition(topic = "topic1", partitions = { "0", "1" }),\n @TopicPartition(topic = "topic2", partitions = "0",\n partitionOffsets = @PartitionOffset(partition = "1", initialOffset = "100"))\n })\npublic void listen(ConsumerRecord record) {\n ...\n}\n')])])]),a("p",[e._v("You can specify each partition in the "),a("code",[e._v("partitions")]),e._v(" or "),a("code",[e._v("partitionOffsets")]),e._v(" attribute but not both.")]),e._v(" "),a("p",[e._v("As with most annotation properties, you can use SpEL expressions; for an example of how to generate a large list of partitions, see "),a("a",{attrs:{href:"#tip-assign-all-parts"}},[e._v("[tip-assign-all-parts]")]),e._v(".")]),e._v(" "),a("p",[e._v("Starting with version 2.5.5, you can apply an initial offset to all assigned partitions:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@KafkaListener(id = "thing3", topicPartitions =\n { @TopicPartition(topic = "topic1", partitions = { "0", "1" },\n partitionOffsets = @PartitionOffset(partition = "*", initialOffset = "0"))\n })\npublic void listen(ConsumerRecord record) {\n ...\n}\n')])])]),a("p",[e._v("The "),a("code",[e._v("*")]),e._v(" wildcard represents all partitions in the "),a("code",[e._v("partitions")]),e._v(" attribute.\nThere must only be one "),a("code",[e._v("@PartitionOffset")]),e._v(" with the wildcard in each "),a("code",[e._v("@TopicPartition")]),e._v(".")]),e._v(" "),a("p",[e._v("In addition, when the listener implements "),a("code",[e._v("ConsumerSeekAware")]),e._v(", "),a("code",[e._v("onPartitionsAssigned")]),e._v(" is now called, even when using manual assignment.\nThis allows, for example, any arbitrary seek operations at that time.")]),e._v(" "),a("p",[e._v("Starting with version 2.6.4, you can specify a comma-delimited list of partitions, or partition ranges:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@KafkaListener(id = "pp", autoStartup = "false",\n topicPartitions = @TopicPartition(topic = "topic1",\n partitions = "0-5, 7, 10-15"))\npublic void process(String in) {\n ...\n}\n')])])]),a("p",[e._v("The range is inclusive; the example above will assign partitions "),a("code",[e._v("0, 1, 2, 3, 4, 5, 7, 10, 11, 12, 13, 14, 15")]),e._v(".")]),e._v(" "),a("p",[e._v("The same technique can be used when specifying initial offsets:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@KafkaListener(id = "thing3", topicPartitions =\n { @TopicPartition(topic = "topic1",\n partitionOffsets = @PartitionOffset(partition = "0-5", initialOffset = "0"))\n })\npublic void listen(ConsumerRecord record) {\n ...\n}\n')])])]),a("p",[e._v("The initial offset will be applied to all 6 partitions.")]),e._v(" "),a("h6",{attrs:{id:"manual-acknowledgment"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#manual-acknowledgment"}},[e._v("#")]),e._v(" Manual Acknowledgment")]),e._v(" "),a("p",[e._v("When using manual "),a("code",[e._v("AckMode")]),e._v(", you can also provide the listener with the "),a("code",[e._v("Acknowledgment")]),e._v(".\nThe following example also shows how to use a different container factory.")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@KafkaListener(id = "cat", topics = "myTopic",\n containerFactory = "kafkaManualAckListenerContainerFactory")\npublic void listen(String data, Acknowledgment ack) {\n ...\n ack.acknowledge();\n}\n')])])]),a("h6",{attrs:{id:"consumer-record-metadata"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#consumer-record-metadata"}},[e._v("#")]),e._v(" Consumer Record Metadata")]),e._v(" "),a("p",[e._v("Finally, metadata about the record is available from message headers.\nYou can use the following header names to retrieve the headers of the message:")]),e._v(" "),a("ul",[a("li",[a("p",[a("code",[e._v("KafkaHeaders.OFFSET")])])]),e._v(" "),a("li",[a("p",[a("code",[e._v("KafkaHeaders.RECEIVED_MESSAGE_KEY")])])]),e._v(" "),a("li",[a("p",[a("code",[e._v("KafkaHeaders.RECEIVED_TOPIC")])])]),e._v(" "),a("li",[a("p",[a("code",[e._v("KafkaHeaders.RECEIVED_PARTITION_ID")])])]),e._v(" "),a("li",[a("p",[a("code",[e._v("KafkaHeaders.RECEIVED_TIMESTAMP")])])]),e._v(" "),a("li",[a("p",[a("code",[e._v("KafkaHeaders.TIMESTAMP_TYPE")])])])]),e._v(" "),a("p",[e._v("Starting with version 2.5 the "),a("code",[e._v("RECEIVED_MESSAGE_KEY")]),e._v(" is not present if the incoming record has a "),a("code",[e._v("null")]),e._v(" key; previously the header was populated with a "),a("code",[e._v("null")]),e._v(" value.\nThis change is to make the framework consistent with "),a("code",[e._v("spring-messaging")]),e._v(" conventions where "),a("code",[e._v("null")]),e._v(" valued headers are not present.")]),e._v(" "),a("p",[e._v("The following example shows how to use the headers:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@KafkaListener(id = "qux", topicPattern = "myTopic1")\npublic void listen(@Payload String foo,\n @Header(name = KafkaHeaders.RECEIVED_MESSAGE_KEY, required = false) Integer key,\n @Header(KafkaHeaders.RECEIVED_PARTITION_ID) int partition,\n @Header(KafkaHeaders.RECEIVED_TOPIC) String topic,\n @Header(KafkaHeaders.RECEIVED_TIMESTAMP) long ts\n ) {\n ...\n}\n')])])]),a("p",[e._v("Starting with version 2.5, instead of using discrete headers, you can receive record metadata in a "),a("code",[e._v("ConsumerRecordMetadata")]),e._v(" parameter.")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("@KafkaListener(...)\npublic void listen(String str, ConsumerRecordMetadata meta) {\n ...\n}\n")])])]),a("p",[e._v("This contains all the data from the "),a("code",[e._v("ConsumerRecord")]),e._v(" except the key and value.")]),e._v(" "),a("h6",{attrs:{id:"batch-listeners"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#batch-listeners"}},[e._v("#")]),e._v(" Batch Listeners")]),e._v(" "),a("p",[e._v("Starting with version 1.1, you can configure "),a("code",[e._v("@KafkaListener")]),e._v(" methods to receive the entire batch of consumer records received from the consumer poll.\nTo configure the listener container factory to create batch listeners, you can set the "),a("code",[e._v("batchListener")]),e._v(" property.\nThe following example shows how to do so:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("@Bean\npublic KafkaListenerContainerFactory batchFactory() {\n ConcurrentKafkaListenerContainerFactory factory =\n new ConcurrentKafkaListenerContainerFactory<>();\n factory.setConsumerFactory(consumerFactory());\n factory.setBatchListener(true); // <<<<<<<<<<<<<<<<<<<<<<<<<\n return factory;\n}\n")])])]),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("Starting with version 2.8, you can override the factory’s "),a("code",[e._v("batchListener")]),e._v(" propery using the "),a("code",[e._v("batch")]),e._v(" property on the "),a("code",[e._v("@KafkaListener")]),e._v(" annotation."),a("br"),e._v("This, together with the changes to "),a("a",{attrs:{href:"#error-handlers"}},[e._v("Container Error Handlers")]),e._v(" allows the same factory to be used for both record and batch listeners.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("p",[e._v("The following example shows how to receive a list of payloads:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@KafkaListener(id = "list", topics = "myTopic", containerFactory = "batchFactory")\npublic void listen(List list) {\n ...\n}\n')])])]),a("p",[e._v("The topic, partition, offset, and so on are available in headers that parallel the payloads.\nThe following example shows how to use the headers:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@KafkaListener(id = "list", topics = "myTopic", containerFactory = "batchFactory")\npublic void listen(List list,\n @Header(KafkaHeaders.RECEIVED_MESSAGE_KEY) List keys,\n @Header(KafkaHeaders.RECEIVED_PARTITION_ID) List partitions,\n @Header(KafkaHeaders.RECEIVED_TOPIC) List topics,\n @Header(KafkaHeaders.OFFSET) List offsets) {\n ...\n}\n')])])]),a("p",[e._v("Alternatively, you can receive a "),a("code",[e._v("List")]),e._v(" of "),a("code",[e._v("Message")]),e._v(" objects with each offset and other details in each message, but it must be the only parameter (aside from optional "),a("code",[e._v("Acknowledgment")]),e._v(", when using manual commits, and/or "),a("code",[e._v("Consumer")]),e._v(" parameters) defined on the method.\nThe following example shows how to do so:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@KafkaListener(id = "listMsg", topics = "myTopic", containerFactory = "batchFactory")\npublic void listen14(List> list) {\n ...\n}\n\n@KafkaListener(id = "listMsgAck", topics = "myTopic", containerFactory = "batchFactory")\npublic void listen15(List> list, Acknowledgment ack) {\n ...\n}\n\n@KafkaListener(id = "listMsgAckConsumer", topics = "myTopic", containerFactory = "batchFactory")\npublic void listen16(List> list, Acknowledgment ack, Consumer consumer) {\n ...\n}\n')])])]),a("p",[e._v("No conversion is performed on the payloads in this case.")]),e._v(" "),a("p",[e._v("If the "),a("code",[e._v("BatchMessagingMessageConverter")]),e._v(" is configured with a "),a("code",[e._v("RecordMessageConverter")]),e._v(", you can also add a generic type to the "),a("code",[e._v("Message")]),e._v(" parameter and the payloads are converted.\nSee "),a("a",{attrs:{href:"#payload-conversion-with-batch"}},[e._v("Payload Conversion with Batch Listeners")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("You can also receive a list of "),a("code",[e._v("ConsumerRecord")]),e._v(" objects, but it must be the only parameter (aside from optional "),a("code",[e._v("Acknowledgment")]),e._v(", when using manual commits and "),a("code",[e._v("Consumer")]),e._v(" parameters) defined on the method.\nThe following example shows how to do so:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@KafkaListener(id = "listCRs", topics = "myTopic", containerFactory = "batchFactory")\npublic void listen(List> list) {\n ...\n}\n\n@KafkaListener(id = "listCRsAck", topics = "myTopic", containerFactory = "batchFactory")\npublic void listen(List> list, Acknowledgment ack) {\n ...\n}\n')])])]),a("p",[e._v("Starting with version 2.2, the listener can receive the complete "),a("code",[e._v("ConsumerRecords")]),e._v(" object returned by the "),a("code",[e._v("poll()")]),e._v(" method, letting the listener access additional methods, such as "),a("code",[e._v("partitions()")]),e._v(" (which returns the "),a("code",[e._v("TopicPartition")]),e._v(" instances in the list) and "),a("code",[e._v("records(TopicPartition)")]),e._v(" (which gets selective records).\nAgain, this must be the only parameter (aside from optional "),a("code",[e._v("Acknowledgment")]),e._v(", when using manual commits or "),a("code",[e._v("Consumer")]),e._v(" parameters) on the method.\nThe following example shows how to do so:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@KafkaListener(id = "pollResults", topics = "myTopic", containerFactory = "batchFactory")\npublic void pollResults(ConsumerRecords records) {\n ...\n}\n')])])]),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("If the container factory has a "),a("code",[e._v("RecordFilterStrategy")]),e._v(" configured, it is ignored for "),a("code",[e._v("ConsumerRecords")]),e._v(" listeners, with a "),a("code",[e._v("WARN")]),e._v(" log message emitted."),a("br"),e._v("Records can only be filtered with a batch listener if the "),a("code",[e._v(">")]),e._v(" form of listener is used."),a("br"),e._v("By default, records are filtered one-at-a-time; starting with version 2.8, you can override "),a("code",[e._v("filterBatch")]),e._v(" to filter the entire batch in one call.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("h6",{attrs:{id:"annotation-properties"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#annotation-properties"}},[e._v("#")]),e._v(" Annotation Properties")]),e._v(" "),a("p",[e._v("Starting with version 2.0, the "),a("code",[e._v("id")]),e._v(" property (if present) is used as the Kafka consumer "),a("code",[e._v("group.id")]),e._v(" property, overriding the configured property in the consumer factory, if present.\nYou can also set "),a("code",[e._v("groupId")]),e._v(" explicitly or set "),a("code",[e._v("idIsGroup")]),e._v(" to false to restore the previous behavior of using the consumer factory "),a("code",[e._v("group.id")]),e._v(".")]),e._v(" "),a("p",[e._v("You can use property placeholders or SpEL expressions within most annotation properties, as the following example shows:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@KafkaListener(topics = "${some.property}")\n\n@KafkaListener(topics = "#{someBean.someProperty}",\n groupId = "#{someBean.someProperty}.group")\n')])])]),a("p",[e._v("Starting with version 2.1.2, the SpEL expressions support a special token: "),a("code",[e._v("__listener")]),e._v(".\nIt is a pseudo bean name that represents the current bean instance within which this annotation exists.")]),e._v(" "),a("p",[e._v("Consider the following example:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@Bean\npublic Listener listener1() {\n return new Listener("topic1");\n}\n\n@Bean\npublic Listener listener2() {\n return new Listener("topic2");\n}\n')])])]),a("p",[e._v("Given the beans in the previous example, we can then use the following:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('public class Listener {\n\n private final String topic;\n\n public Listener(String topic) {\n this.topic = topic;\n }\n\n @KafkaListener(topics = "#{__listener.topic}",\n groupId = "#{__listener.topic}.group")\n public void listen(...) {\n ...\n }\n\n public String getTopic() {\n return this.topic;\n }\n\n}\n')])])]),a("p",[e._v("If, in the unlikely event that you have an actual bean called "),a("code",[e._v("__listener")]),e._v(", you can change the expression token byusing the "),a("code",[e._v("beanRef")]),e._v(" attribute.\nThe following example shows how to do so:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@KafkaListener(beanRef = "__x", topics = "#{__x.topic}",\n groupId = "#{__x.topic}.group")\n')])])]),a("p",[e._v("Starting with version 2.2.4, you can specify Kafka consumer properties directly on the annotation, these will override any properties with the same name configured in the consumer factory. You "),a("strong",[e._v("cannot")]),e._v(" specify the "),a("code",[e._v("group.id")]),e._v(" and "),a("code",[e._v("client.id")]),e._v(" properties this way; they will be ignored; use the "),a("code",[e._v("groupId")]),e._v(" and "),a("code",[e._v("clientIdPrefix")]),e._v(" annotation properties for those.")]),e._v(" "),a("p",[e._v("The properties are specified as individual strings with the normal Java "),a("code",[e._v("Properties")]),e._v(" file format: "),a("code",[e._v("foo:bar")]),e._v(", "),a("code",[e._v("foo=bar")]),e._v(", or "),a("code",[e._v("foo bar")]),e._v(".")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@KafkaListener(topics = "myTopic", groupId = "group", properties = {\n "max.poll.interval.ms:60000",\n ConsumerConfig.MAX_POLL_RECORDS_CONFIG + "=100"\n})\n')])])]),a("p",[e._v("The following is an example of the corresponding listeners for the example in "),a("a",{attrs:{href:"#routing-template"}},[e._v("Using "),a("code",[e._v("RoutingKafkaTemplate")])]),e._v(".")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@KafkaListener(id = "one", topics = "one")\npublic void listen1(String in) {\n System.out.println("1: " + in);\n}\n\n@KafkaListener(id = "two", topics = "two",\n properties = "value.deserializer:org.apache.kafka.common.serialization.ByteArrayDeserializer")\npublic void listen2(byte[] in) {\n System.out.println("2: " + new String(in));\n}\n')])])]),a("h5",{attrs:{id:"obtaining-the-consumer-group-id"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#obtaining-the-consumer-group-id"}},[e._v("#")]),e._v(" Obtaining the Consumer "),a("code",[e._v("group.id")])]),e._v(" "),a("p",[e._v("When running the same listener code in multiple containers, it may be useful to be able to determine which container (identified by its "),a("code",[e._v("group.id")]),e._v(" consumer property) that a record came from.")]),e._v(" "),a("p",[e._v("You can call "),a("code",[e._v("KafkaUtils.getConsumerGroupId()")]),e._v(" on the listener thread to do this.\nAlternatively, you can access the group id in a method parameter.")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@KafkaListener(id = "bar", topicPattern = "${topicTwo:annotated2}", exposeGroupId = "${always:true}")\npublic void listener(@Payload String foo,\n @Header(KafkaHeaders.GROUP_ID) String groupId) {\n...\n}\n')])])]),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("This is available in record listeners and batch listeners that receive a "),a("code",[e._v("List")]),e._v(" of records."),a("br"),e._v("It is "),a("strong",[e._v("not")]),e._v(" available in a batch listener that receives a "),a("code",[e._v("ConsumerRecords")]),e._v(" argument."),a("br"),e._v("Use the "),a("code",[e._v("KafkaUtils")]),e._v(" mechanism in that case.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("h5",{attrs:{id:"container-thread-naming"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#container-thread-naming"}},[e._v("#")]),e._v(" Container Thread Naming")]),e._v(" "),a("p",[e._v("Listener containers currently use two task executors, one to invoke the consumer and another that is used to invoke the listener when the kafka consumer property "),a("code",[e._v("enable.auto.commit")]),e._v(" is "),a("code",[e._v("false")]),e._v(".\nYou can provide custom executors by setting the "),a("code",[e._v("consumerExecutor")]),e._v(" and "),a("code",[e._v("listenerExecutor")]),e._v(" properties of the container’s "),a("code",[e._v("ContainerProperties")]),e._v(".\nWhen using pooled executors, be sure that enough threads are available to handle the concurrency across all the containers in which they are used.\nWhen using the "),a("code",[e._v("ConcurrentMessageListenerContainer")]),e._v(", a thread from each is used for each consumer ("),a("code",[e._v("concurrency")]),e._v(").")]),e._v(" "),a("p",[e._v("If you do not provide a consumer executor, a "),a("code",[e._v("SimpleAsyncTaskExecutor")]),e._v(" is used.\nThis executor creates threads with names similar to "),a("code",[e._v("-C-1")]),e._v(" (consumer thread).\nFor the "),a("code",[e._v("ConcurrentMessageListenerContainer")]),e._v(", the "),a("code",[e._v("")]),e._v(" part of the thread name becomes "),a("code",[e._v("-m")]),e._v(", where "),a("code",[e._v("m")]),e._v(" represents the consumer instance."),a("code",[e._v("n")]),e._v(" increments each time the container is started.\nSo, with a bean name of "),a("code",[e._v("container")]),e._v(", threads in this container will be named "),a("code",[e._v("container-0-C-1")]),e._v(", "),a("code",[e._v("container-1-C-1")]),e._v(" etc., after the container is started the first time; "),a("code",[e._v("container-0-C-2")]),e._v(", "),a("code",[e._v("container-1-C-2")]),e._v(" etc., after a stop and subsequent start.")]),e._v(" "),a("h5",{attrs:{id:"kafkalistener-as-a-meta-annotation"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#kafkalistener-as-a-meta-annotation"}},[e._v("#")]),e._v(" "),a("code",[e._v("@KafkaListener")]),e._v(" as a Meta Annotation")]),e._v(" "),a("p",[e._v("Starting with version 2.2, you can now use "),a("code",[e._v("@KafkaListener")]),e._v(" as a meta annotation.\nThe following example shows how to do so:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@Target(ElementType.METHOD)\n@Retention(RetentionPolicy.RUNTIME)\n@KafkaListener\npublic @interface MyThreeConsumersListener {\n\n @AliasFor(annotation = KafkaListener.class, attribute = "id")\n String id();\n\n @AliasFor(annotation = KafkaListener.class, attribute = "topics")\n String[] topics();\n\n @AliasFor(annotation = KafkaListener.class, attribute = "concurrency")\n String concurrency() default "3";\n\n}\n')])])]),a("p",[e._v("You must alias at least one of "),a("code",[e._v("topics")]),e._v(", "),a("code",[e._v("topicPattern")]),e._v(", or "),a("code",[e._v("topicPartitions")]),e._v(" (and, usually, "),a("code",[e._v("id")]),e._v(" or "),a("code",[e._v("groupId")]),e._v(" unless you have specified a "),a("code",[e._v("group.id")]),e._v(" in the consumer factory configuration).\nThe following example shows how to do so:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@MyThreeConsumersListener(id = "my.group", topics = "my.topic")\npublic void listen1(String in) {\n ...\n}\n')])])]),a("h5",{attrs:{id:"kafkalistener-on-a-class"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#kafkalistener-on-a-class"}},[e._v("#")]),e._v(" "),a("code",[e._v("@KafkaListener")]),e._v(" on a Class")]),e._v(" "),a("p",[e._v("When you use "),a("code",[e._v("@KafkaListener")]),e._v(" at the class-level, you must specify "),a("code",[e._v("@KafkaHandler")]),e._v(" at the method level.\nWhen messages are delivered, the converted message payload type is used to determine which method to call.\nThe following example shows how to do so:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@KafkaListener(id = "multi", topics = "myTopic")\nstatic class MultiListenerBean {\n\n @KafkaHandler\n public void listen(String foo) {\n ...\n }\n\n @KafkaHandler\n public void listen(Integer bar) {\n ...\n }\n\n @KafkaHandler(isDefault = true)\n public void listenDefault(Object object) {\n ...\n }\n\n}\n')])])]),a("p",[e._v("Starting with version 2.1.3, you can designate a "),a("code",[e._v("@KafkaHandler")]),e._v(" method as the default method that is invoked if there is no match on other methods.\nAt most, one method can be so designated.\nWhen using "),a("code",[e._v("@KafkaHandler")]),e._v(" methods, the payload must have already been converted to the domain object (so the match can be performed).\nUse a custom deserializer, the "),a("code",[e._v("JsonDeserializer")]),e._v(", or the "),a("code",[e._v("JsonMessageConverter")]),e._v(" with its "),a("code",[e._v("TypePrecedence")]),e._v(" set to "),a("code",[e._v("TYPE_ID")]),e._v(".\nSee "),a("a",{attrs:{href:"#serdes"}},[e._v("Serialization, Deserialization, and Message Conversion")]),e._v(" for more information.")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("Due to some limitations in the way Spring resolves method arguments, a default "),a("code",[e._v("@KafkaHandler")]),e._v(" cannot receive discrete headers; it must use the "),a("code",[e._v("ConsumerRecordMetadata")]),e._v(" as discussed in "),a("a",{attrs:{href:"#consumer-record-metadata"}},[e._v("Consumer Record Metadata")]),e._v(".")])])]),e._v(" "),a("tbody")]),e._v(" "),a("p",[e._v("For example:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("@KafkaHandler(isDefault = true)\npublic void listenDefault(Object object, @Header(KafkaHeaders.RECEIVED_TOPIC) String topic) {\n ...\n}\n")])])]),a("p",[e._v("This won’t work if the object is a "),a("code",[e._v("String")]),e._v("; the "),a("code",[e._v("topic")]),e._v(" parameter will also get a reference to "),a("code",[e._v("object")]),e._v(".")]),e._v(" "),a("p",[e._v("If you need metadata about the record in a default method, use this:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("@KafkaHandler(isDefault = true)\nvoid listen(Object in, @Header(KafkaHeaders.RECORD_METADATA) ConsumerRecordMetadata meta) {\n String topic = meta.topic();\n ...\n}\n")])])]),a("h5",{attrs:{id:"kafkalistener-attribute-modification"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#kafkalistener-attribute-modification"}},[e._v("#")]),e._v(" "),a("code",[e._v("@KafkaListener")]),e._v(" Attribute Modification")]),e._v(" "),a("p",[e._v("Starting with version 2.7.2, you can now programmatically modify annotation attributes before the container is created.\nTo do so, add one or more "),a("code",[e._v("KafkaListenerAnnotationBeanPostProcessor.AnnotationEnhancer")]),e._v(" to the application context."),a("code",[e._v("AnnotationEnhancer")]),e._v(" is a "),a("code",[e._v("BiFunction, AnnotatedElement, Map")]),e._v(" and must return a map of attributes.\nThe attribute values can contain SpEL and/or property placeholders; the enhancer is called before any resolution is performed.\nIf more than one enhancer is present, and they implement "),a("code",[e._v("Ordered")]),e._v(", they will be invoked in order.")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[a("code",[e._v("AnnotationEnhancer")]),e._v(" bean definitions must be declared "),a("code",[e._v("static")]),e._v(" because they are required very early in the application context’s lifecycle.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("p",[e._v("An example follows:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@Bean\npublic static AnnotationEnhancer groupIdEnhancer() {\n return (attrs, element) -> {\n attrs.put("groupId", attrs.get("id") + "." + (element instanceof Class\n ? ((Class) element).getSimpleName()\n : ((Method) element).getDeclaringClass().getSimpleName()\n + "." + ((Method) element).getName()));\n return attrs;\n };\n}\n')])])]),a("h5",{attrs:{id:"kafkalistener-lifecycle-management"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#kafkalistener-lifecycle-management"}},[e._v("#")]),e._v(" "),a("code",[e._v("@KafkaListener")]),e._v(" Lifecycle Management")]),e._v(" "),a("p",[e._v("The listener containers created for "),a("code",[e._v("@KafkaListener")]),e._v(" annotations are not beans in the application context.\nInstead, they are registered with an infrastructure bean of type "),a("code",[e._v("KafkaListenerEndpointRegistry")]),e._v(".\nThis bean is automatically declared by the framework and manages the containers' lifecycles; it will auto-start any containers that have "),a("code",[e._v("autoStartup")]),e._v(" set to "),a("code",[e._v("true")]),e._v(".\nAll containers created by all container factories must be in the same "),a("code",[e._v("phase")]),e._v(".\nSee "),a("a",{attrs:{href:"#container-auto-startup"}},[e._v("Listener Container Auto Startup")]),e._v(" for more information.\nYou can manage the lifecycle programmatically by using the registry.\nStarting or stopping the registry will start or stop all the registered containers.\nAlternatively, you can get a reference to an individual container by using its "),a("code",[e._v("id")]),e._v(" attribute.\nYou can set "),a("code",[e._v("autoStartup")]),e._v(" on the annotation, which overrides the default setting configured into the container factory.\nYou can get a reference to the bean from the application context, such as auto-wiring, to manage its registered containers.\nThe following examples show how to do so:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@KafkaListener(id = "myContainer", topics = "myTopic", autoStartup = "false")\npublic void listen(...) { ... }\n')])])]),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@Autowired\nprivate KafkaListenerEndpointRegistry registry;\n\n...\n\n this.registry.getListenerContainer("myContainer").start();\n\n...\n')])])]),a("p",[e._v("The registry only maintains the life cycle of containers it manages; containers declared as beans are not managed by the registry and can be obtained from the application context.\nA collection of managed containers can be obtained by calling the registry’s "),a("code",[e._v("getListenerContainers()")]),e._v(" method.\nVersion 2.2.5 added a convenience method "),a("code",[e._v("getAllListenerContainers()")]),e._v(", which returns a collection of all containers, including those managed by the registry and those declared as beans.\nThe collection returned will include any prototype beans that have been initialized, but it will not initialize any lazy bean declarations.")]),e._v(" "),a("h5",{attrs:{id:"kafkalistener-payload-validation"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#kafkalistener-payload-validation"}},[e._v("#")]),e._v(" "),a("code",[e._v("@KafkaListener")]),e._v(" "),a("code",[e._v("@Payload")]),e._v(" Validation")]),e._v(" "),a("p",[e._v("Starting with version 2.2, it is now easier to add a "),a("code",[e._v("Validator")]),e._v(" to validate "),a("code",[e._v("@KafkaListener")]),e._v(" "),a("code",[e._v("@Payload")]),e._v(" arguments.\nPreviously, you had to configure a custom "),a("code",[e._v("DefaultMessageHandlerMethodFactory")]),e._v(" and add it to the registrar.\nNow, you can add the validator to the registrar itself.\nThe following code shows how to do so:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("@Configuration\n@EnableKafka\npublic class Config implements KafkaListenerConfigurer {\n\n ...\n\n @Override\n public void configureKafkaListeners(KafkaListenerEndpointRegistrar registrar) {\n registrar.setValidator(new MyValidator());\n }\n\n}\n")])])]),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("When you use Spring Boot with the validation starter, a "),a("code",[e._v("LocalValidatorFactoryBean")]),e._v(" is auto-configured, as the following example shows:")])])]),e._v(" "),a("tbody")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("@Configuration\n@EnableKafka\npublic class Config implements KafkaListenerConfigurer {\n\n @Autowired\n private LocalValidatorFactoryBean validator;\n ...\n\n @Override\n public void configureKafkaListeners(KafkaListenerEndpointRegistrar registrar) {\n registrar.setValidator(this.validator);\n }\n}\n")])])]),a("p",[e._v("The following examples show how to validate:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("public static class ValidatedClass {\n\n @Max(10)\n private int bar;\n\n public int getBar() {\n return this.bar;\n }\n\n public void setBar(int bar) {\n this.bar = bar;\n }\n\n}\n")])])]),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@KafkaListener(id="validated", topics = "annotated35", errorHandler = "validationErrorHandler",\n containerFactory = "kafkaJsonListenerContainerFactory")\npublic void validatedListener(@Payload @Valid ValidatedClass val) {\n ...\n}\n\n@Bean\npublic KafkaListenerErrorHandler validationErrorHandler() {\n return (m, e) -> {\n ...\n };\n}\n')])])]),a("p",[e._v("Starting with version 2.5.11, validation now works on payloads for "),a("code",[e._v("@KafkaHandler")]),e._v(" methods in a class-level listener.\nSee "),a("a",{attrs:{href:"#class-level-kafkalistener"}},[a("code",[e._v("@KafkaListener")]),e._v(" on a Class")]),e._v(".")]),e._v(" "),a("h5",{attrs:{id:"rebalancing-listeners"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#rebalancing-listeners"}},[e._v("#")]),e._v(" Rebalancing Listeners")]),e._v(" "),a("p",[a("code",[e._v("ContainerProperties")]),e._v(" has a property called "),a("code",[e._v("consumerRebalanceListener")]),e._v(", which takes an implementation of the Kafka client’s "),a("code",[e._v("ConsumerRebalanceListener")]),e._v(" interface.\nIf this property is not provided, the container configures a logging listener that logs rebalance events at the "),a("code",[e._v("INFO")]),e._v(" level.\nThe framework also adds a sub-interface "),a("code",[e._v("ConsumerAwareRebalanceListener")]),e._v(".\nThe following listing shows the "),a("code",[e._v("ConsumerAwareRebalanceListener")]),e._v(" interface definition:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("public interface ConsumerAwareRebalanceListener extends ConsumerRebalanceListener {\n\n void onPartitionsRevokedBeforeCommit(Consumer consumer, Collection partitions);\n\n void onPartitionsRevokedAfterCommit(Consumer consumer, Collection partitions);\n\n void onPartitionsAssigned(Consumer consumer, Collection partitions);\n\n void onPartitionsLost(Consumer consumer, Collection partitions);\n\n}\n")])])]),a("p",[e._v("Notice that there are two callbacks when partitions are revoked.\nThe first is called immediately.\nThe second is called after any pending offsets are committed.\nThis is useful if you wish to maintain offsets in some external repository, as the following example shows:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("containerProperties.setConsumerRebalanceListener(new ConsumerAwareRebalanceListener() {\n\n @Override\n public void onPartitionsRevokedBeforeCommit(Consumer consumer, Collection partitions) {\n // acknowledge any pending Acknowledgments (if using manual acks)\n }\n\n @Override\n public void onPartitionsRevokedAfterCommit(Consumer consumer, Collection partitions) {\n // ...\n store(consumer.position(partition));\n // ...\n }\n\n @Override\n public void onPartitionsAssigned(Collection partitions) {\n // ...\n consumer.seek(partition, offsetTracker.getOffset() + 1);\n // ...\n }\n});\n")])])]),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("Starting with version 2.4, a new method "),a("code",[e._v("onPartitionsLost()")]),e._v(" has been added (similar to a method with the same name in "),a("code",[e._v("ConsumerRebalanceLister")]),e._v(")."),a("br"),e._v("The default implementation on "),a("code",[e._v("ConsumerRebalanceLister")]),e._v(" simply calls "),a("code",[e._v("onPartionsRevoked")]),e._v("."),a("br"),e._v("The default implementation on "),a("code",[e._v("ConsumerAwareRebalanceListener")]),e._v(" does nothing."),a("br"),e._v("When supplying the listener container with a custom listener (of either type), it is important that your implementation not call "),a("code",[e._v("onPartitionsRevoked")]),e._v(" from "),a("code",[e._v("onPartitionsLost")]),e._v("."),a("br"),e._v("If you implement "),a("code",[e._v("ConsumerRebalanceListener")]),e._v(" you should override the default method."),a("br"),e._v("This is because the listener container will call its own "),a("code",[e._v("onPartitionsRevoked")]),e._v(" from its implementation of "),a("code",[e._v("onPartitionsLost")]),e._v(" after calling the method on your implementation."),a("br"),e._v("If you implementation delegates to the default behavior, "),a("code",[e._v("onPartitionsRevoked")]),e._v(" will be called twice each time the "),a("code",[e._v("Consumer")]),e._v(" calls that method on the container’s listener.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("h5",{attrs:{id:"forwarding-listener-results-using-sendto"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#forwarding-listener-results-using-sendto"}},[e._v("#")]),e._v(" Forwarding Listener Results using "),a("code",[e._v("@SendTo")])]),e._v(" "),a("p",[e._v("Starting with version 2.0, if you also annotate a "),a("code",[e._v("@KafkaListener")]),e._v(" with a "),a("code",[e._v("@SendTo")]),e._v(" annotation and the method invocation returns a result, the result is forwarded to the topic specified by the "),a("code",[e._v("@SendTo")]),e._v(".")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("@SendTo")]),e._v(" value can have several forms:")]),e._v(" "),a("ul",[a("li",[a("p",[a("code",[e._v('@SendTo("someTopic")')]),e._v(" routes to the literal topic")])]),e._v(" "),a("li",[a("p",[a("code",[e._v('@SendTo("#{someExpression}")')]),e._v(" routes to the topic determined by evaluating the expression once during application context initialization.")])]),e._v(" "),a("li",[a("p",[a("code",[e._v('@SendTo("!{someExpression}")')]),e._v(" routes to the topic determined by evaluating the expression at runtime.\nThe "),a("code",[e._v("#root")]),e._v(" object for the evaluation has three properties:")]),e._v(" "),a("ul",[a("li",[a("p",[a("code",[e._v("request")]),e._v(": The inbound "),a("code",[e._v("ConsumerRecord")]),e._v(" (or "),a("code",[e._v("ConsumerRecords")]),e._v(" object for a batch listener))")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("source")]),e._v(": The "),a("code",[e._v("org.springframework.messaging.Message")]),e._v(" converted from the "),a("code",[e._v("request")]),e._v(".")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("result")]),e._v(": The method return result.")])])])]),e._v(" "),a("li",[a("p",[a("code",[e._v("@SendTo")]),e._v(" (no properties): This is treated as "),a("code",[e._v("!{source.headers['kafka_replyTopic']}")]),e._v(" (since version 2.1.3).")])])]),e._v(" "),a("p",[e._v("Starting with versions 2.1.11 and 2.2.1, property placeholders are resolved within "),a("code",[e._v("@SendTo")]),e._v(" values.")]),e._v(" "),a("p",[e._v("The result of the expression evaluation must be a "),a("code",[e._v("String")]),e._v(" that represents the topic name.\nThe following examples show the various ways to use "),a("code",[e._v("@SendTo")]),e._v(":")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@KafkaListener(topics = "annotated21")\n@SendTo("!{request.value()}") // runtime SpEL\npublic String replyingListener(String in) {\n ...\n}\n\n@KafkaListener(topics = "${some.property:annotated22}")\n@SendTo("#{myBean.replyTopic}") // config time SpEL\npublic Collection replyingBatchListener(List in) {\n ...\n}\n\n@KafkaListener(topics = "annotated23", errorHandler = "replyErrorHandler")\n@SendTo("annotated23reply") // static reply topic definition\npublic String replyingListenerWithErrorHandler(String in) {\n ...\n}\n...\n@KafkaListener(topics = "annotated25")\n@SendTo("annotated25reply1")\npublic class MultiListenerSendTo {\n\n @KafkaHandler\n public String foo(String in) {\n ...\n }\n\n @KafkaHandler\n @SendTo("!{\'annotated25reply2\'}")\n public String bar(@Payload(required = false) KafkaNull nul,\n @Header(KafkaHeaders.RECEIVED_MESSAGE_KEY) int key) {\n ...\n }\n\n}\n')])])]),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("In order to support "),a("code",[e._v("@SendTo")]),e._v(", the listener container factory must be provided with a "),a("code",[e._v("KafkaTemplate")]),e._v(" (in its "),a("code",[e._v("replyTemplate")]),e._v(" property), which is used to send the reply."),a("br"),e._v("This should be a "),a("code",[e._v("KafkaTemplate")]),e._v(" and not a "),a("code",[e._v("ReplyingKafkaTemplate")]),e._v(" which is used on the client-side for request/reply processing."),a("br"),e._v("When using Spring Boot, boot will auto-configure the template into the factory; when configuring your own factory, it must be set as shown in the examples below.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("p",[e._v("Starting with version 2.2, you can add a "),a("code",[e._v("ReplyHeadersConfigurer")]),e._v(" to the listener container factory.\nThis is consulted to determine which headers you want to set in the reply message.\nThe following example shows how to add a "),a("code",[e._v("ReplyHeadersConfigurer")]),e._v(":")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@Bean\npublic ConcurrentKafkaListenerContainerFactory kafkaListenerContainerFactory() {\n ConcurrentKafkaListenerContainerFactory factory =\n new ConcurrentKafkaListenerContainerFactory<>();\n factory.setConsumerFactory(cf());\n factory.setReplyTemplate(template());\n factory.setReplyHeadersConfigurer((k, v) -> k.equals("cat"));\n return factory;\n}\n')])])]),a("p",[e._v("You can also add more headers if you wish.\nThe following example shows how to do so:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@Bean\npublic ConcurrentKafkaListenerContainerFactory kafkaListenerContainerFactory() {\n ConcurrentKafkaListenerContainerFactory factory =\n new ConcurrentKafkaListenerContainerFactory<>();\n factory.setConsumerFactory(cf());\n factory.setReplyTemplate(template());\n factory.setReplyHeadersConfigurer(new ReplyHeadersConfigurer() {\n\n @Override\n public boolean shouldCopy(String headerName, Object headerValue) {\n return false;\n }\n\n @Override\n public Map additionalHeaders() {\n return Collections.singletonMap("qux", "fiz");\n }\n\n });\n return factory;\n}\n')])])]),a("p",[e._v("When you use "),a("code",[e._v("@SendTo")]),e._v(", you must configure the "),a("code",[e._v("ConcurrentKafkaListenerContainerFactory")]),e._v(" with a "),a("code",[e._v("KafkaTemplate")]),e._v(" in its "),a("code",[e._v("replyTemplate")]),e._v(" property to perform the send.")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("Unless you use "),a("a",{attrs:{href:"#replying-template"}},[e._v("request/reply semantics")]),e._v(" only the simple "),a("code",[e._v("send(topic, value)")]),e._v(" method is used, so you may wish to create a subclass to generate the partition or key."),a("br"),e._v("The following example shows how to do so:")])])]),e._v(" "),a("tbody")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("@Bean\npublic KafkaTemplate myReplyingTemplate() {\n return new KafkaTemplate(producerFactory()) {\n\n @Override\n public ListenableFuture> send(String topic, String data) {\n return super.send(topic, partitionForData(data), keyForData(data), data);\n }\n\n ...\n\n };\n}\n")])])]),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("If the listener method returns "),a("code",[e._v("Message")]),e._v(" or "),a("code",[e._v("Collection>")]),e._v(", the listener method is responsible for setting up the message headers for the reply."),a("br"),e._v("For example, when handling a request from a "),a("code",[e._v("ReplyingKafkaTemplate")]),e._v(", you might do the following:"),a("br"),a("br"),a("code",[e._v('
@KafkaListener(id = "messageReturned", topics = "someTopic")
public Message listen(String in, @Header(KafkaHeaders.REPLY_TOPIC) byte[] replyTo,
@Header(KafkaHeaders.CORRELATION_ID) byte[] correlation) {
return MessageBuilder.withPayload(in.toUpperCase())
.setHeader(KafkaHeaders.TOPIC, replyTo)
.setHeader(KafkaHeaders.MESSAGE_KEY, 42)
.setHeader(KafkaHeaders.CORRELATION_ID, correlation)
.setHeader("someOtherHeader", "someValue")
.build();
}
')])])])]),e._v(" "),a("tbody")]),e._v(" "),a("p",[e._v("When using request/reply semantics, the target partition can be requested by the sender.")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("You can annotate a "),a("code",[e._v("@KafkaListener")]),e._v(" method with "),a("code",[e._v("@SendTo")]),e._v(" even if no result is returned."),a("br"),e._v("This is to allow the configuration of an "),a("code",[e._v("errorHandler")]),e._v(" that can forward information about a failed message delivery to some topic."),a("br"),e._v("The following example shows how to do so:"),a("br"),a("br"),a("code",[e._v('
@KafkaListener(id = "voidListenerWithReplyingErrorHandler", topics = "someTopic",
errorHandler = "voidSendToErrorHandler")
@SendTo("failures")
public void voidListenerWithReplyingErrorHandler(String in) {
throw new RuntimeException("fail");
}

@Bean
public KafkaListenerErrorHandler voidSendToErrorHandler() {
return (m, e) -> {
return ... // some information about the failure and input data
};
}
')]),a("br"),a("br"),e._v("See "),a("a",{attrs:{href:"#annotation-error-handling"}},[e._v("Handling Exceptions")]),e._v(" for more information.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("If a listener method returns an "),a("code",[e._v("Iterable")]),e._v(", by default a record for each element as the value is sent."),a("br"),e._v("Starting with version 2.3.5, set the "),a("code",[e._v("splitIterables")]),e._v(" property on "),a("code",[e._v("@KafkaListener")]),e._v(" to "),a("code",[e._v("false")]),e._v(" and the entire result will be sent as the value of a single "),a("code",[e._v("ProducerRecord")]),e._v("."),a("br"),e._v("This requires a suitable serializer in the reply template’s producer configuration."),a("br"),e._v("However, if the reply is "),a("code",[e._v("Iterable>")]),e._v(" the property is ignored and each message is sent separately.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("h5",{attrs:{id:"filtering-messages"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#filtering-messages"}},[e._v("#")]),e._v(" Filtering Messages")]),e._v(" "),a("p",[e._v("In certain scenarios, such as rebalancing, a message that has already been processed may be redelivered.\nThe framework cannot know whether such a message has been processed or not.\nThat is an application-level function.\nThis is known as the "),a("a",{attrs:{href:"https://www.enterpriseintegrationpatterns.com/patterns/messaging/IdempotentReceiver.html",target:"_blank",rel:"noopener noreferrer"}},[e._v("Idempotent Receiver"),a("OutboundLink")],1),e._v(" pattern and Spring Integration provides an "),a("a",{attrs:{href:"https://docs.spring.io/spring-integration/reference/html/#idempotent-receiver",target:"_blank",rel:"noopener noreferrer"}},[e._v("implementation of it"),a("OutboundLink")],1),e._v(".")]),e._v(" "),a("p",[e._v("The Spring for Apache Kafka project also provides some assistance by means of the "),a("code",[e._v("FilteringMessageListenerAdapter")]),e._v(" class, which can wrap your "),a("code",[e._v("MessageListener")]),e._v(".\nThis class takes an implementation of "),a("code",[e._v("RecordFilterStrategy")]),e._v(" in which you implement the "),a("code",[e._v("filter")]),e._v(" method to signal that a message is a duplicate and should be discarded.\nThis has an additional property called "),a("code",[e._v("ackDiscarded")]),e._v(", which indicates whether the adapter should acknowledge the discarded record.\nIt is "),a("code",[e._v("false")]),e._v(" by default.")]),e._v(" "),a("p",[e._v("When you use "),a("code",[e._v("@KafkaListener")]),e._v(", set the "),a("code",[e._v("RecordFilterStrategy")]),e._v(" (and optionally "),a("code",[e._v("ackDiscarded")]),e._v(") on the container factory so that the listener is wrapped in the appropriate filtering adapter.")]),e._v(" "),a("p",[e._v("In addition, a "),a("code",[e._v("FilteringBatchMessageListenerAdapter")]),e._v(" is provided, for when you use a batch "),a("a",{attrs:{href:"#message-listeners"}},[e._v("message listener")]),e._v(".")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("The "),a("code",[e._v("FilteringBatchMessageListenerAdapter")]),e._v(" is ignored if your "),a("code",[e._v("@KafkaListener")]),e._v(" receives a "),a("code",[e._v("ConsumerRecords")]),e._v(" instead of "),a("code",[e._v("List>")]),e._v(", because "),a("code",[e._v("ConsumerRecords")]),e._v(" is immutable.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("h5",{attrs:{id:"retrying-deliveries"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#retrying-deliveries"}},[e._v("#")]),e._v(" Retrying Deliveries")]),e._v(" "),a("p",[e._v("See the "),a("code",[e._v("DefaultErrorHandler")]),e._v(" in "),a("a",{attrs:{href:"#annotation-error-handling"}},[e._v("Handling Exceptions")]),e._v(".")]),e._v(" "),a("h5",{attrs:{id:"starting-kafkalistener-s-in-sequence"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#starting-kafkalistener-s-in-sequence"}},[e._v("#")]),e._v(" Starting "),a("code",[e._v("@KafkaListener")]),e._v(" s in Sequence")]),e._v(" "),a("p",[e._v("A common use case is to start a listener after another listener has consumed all the records in a topic.\nFor example, you may want to load the contents of one or more compacted topics into memory before processing records from other topics.\nStarting with version 2.7.3, a new component "),a("code",[e._v("ContainerGroupSequencer")]),e._v(" has been introduced.\nIt uses the "),a("code",[e._v("@KafkaListener")]),e._v(" "),a("code",[e._v("containerGroup")]),e._v(" property to group containers together and start the containers in the next group, when all the containers in the current group have gone idle.")]),e._v(" "),a("p",[e._v("It is best illustrated with an example.")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@KafkaListener(id = "listen1", topics = "topic1", containerGroup = "g1", concurrency = "2")\npublic void listen1(String in) {\n}\n\n@KafkaListener(id = "listen2", topics = "topic2", containerGroup = "g1", concurrency = "2")\npublic void listen2(String in) {\n}\n\n@KafkaListener(id = "listen3", topics = "topic3", containerGroup = "g2", concurrency = "2")\npublic void listen3(String in) {\n}\n\n@KafkaListener(id = "listen4", topics = "topic4", containerGroup = "g2", concurrency = "2")\npublic void listen4(String in) {\n}\n\n@Bean\nContainerGroupSequencer sequencer(KafkaListenerEndpointRegistry registry) {\n return new ContainerGroupSequencer(registry, 5000, "g1", "g2");\n}\n')])])]),a("p",[e._v("Here, we have 4 listeners in two groups, "),a("code",[e._v("g1")]),e._v(" and "),a("code",[e._v("g2")]),e._v(".")]),e._v(" "),a("p",[e._v("During application context initialization, the sequencer, sets the "),a("code",[e._v("autoStartup")]),e._v(" property of all the containers in the provided groups to "),a("code",[e._v("false")]),e._v(".\nIt also sets the "),a("code",[e._v("idleEventInterval")]),e._v(" for any containers (that do not already have one set) to the supplied value (5000ms in this case).\nThen, when the sequencer is started by the application context, the containers in the first group are started.\nAs "),a("code",[e._v("ListenerContainerIdleEvent")]),e._v(" s are received, each individual child container in each container is stopped.\nWhen all child containers in a "),a("code",[e._v("ConcurrentMessageListenerContainer")]),e._v(" are stopped, the parent container is stopped.\nWhen all containers in a group have been stopped, the containers in the next group are started.\nThere is no limit to the number of groups or containers in a group.")]),e._v(" "),a("p",[e._v("By default, the containers in the final group ("),a("code",[e._v("g2")]),e._v(" above) are not stopped when they go idle.\nTo modify that behavior, set "),a("code",[e._v("stopLastGroupWhenIdle")]),e._v(" to "),a("code",[e._v("true")]),e._v(" on the sequencer.")]),e._v(" "),a("p",[e._v("As an aside; previously, containers in each group were added to a bean of type "),a("code",[e._v("Collection")]),e._v(" with the bean name being the "),a("code",[e._v("containerGroup")]),e._v(".\nThese collections are now deprecated in favor of beans of type "),a("code",[e._v("ContainerGroup")]),e._v(" with a bean name that is the group name, suffixed with "),a("code",[e._v(".group")]),e._v("; in the example above, there would be 2 beans "),a("code",[e._v("g1.group")]),e._v(" and "),a("code",[e._v("g2.group")]),e._v(".\nThe "),a("code",[e._v("Collection")]),e._v(" beans will be removed in a future release.")]),e._v(" "),a("h5",{attrs:{id:"using-kafkatemplate-to-receive"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#using-kafkatemplate-to-receive"}},[e._v("#")]),e._v(" Using "),a("code",[e._v("KafkaTemplate")]),e._v(" to Receive")]),e._v(" "),a("p",[e._v("This section covers how to use "),a("code",[e._v("KafkaTemplate")]),e._v(" to receive messages.")]),e._v(" "),a("p",[e._v("Starting with version 2.8, the template has four "),a("code",[e._v("receive()")]),e._v(" methods:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("ConsumerRecord receive(String topic, int partition, long offset);\n\nConsumerRecord receive(String topic, int partition, long offset, Duration pollTimeout);\n\nConsumerRecords receive(Collection requested);\n\nConsumerRecords receive(Collection requested, Duration pollTimeout);\n")])])]),a("p",[e._v("As you can see, you need to know the partition and offset of the record(s) you need to retrieve; a new "),a("code",[e._v("Consumer")]),e._v(" is created (and closed) for each operation.")]),e._v(" "),a("p",[e._v("With the last two methods, each record is retrieved individually and the results assembled into a "),a("code",[e._v("ConsumerRecords")]),e._v(" object.\nWhen creating the "),a("code",[e._v("TopicPartitionOffset")]),e._v(" s for the request, only positive, absolute offsets are supported.")]),e._v(" "),a("h4",{attrs:{id:"_4-1-5-listener-container-properties"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_4-1-5-listener-container-properties"}},[e._v("#")]),e._v(" 4.1.5. Listener Container Properties")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th",[e._v("Property")]),e._v(" "),a("th",[e._v("Default")]),e._v(" "),a("th",[e._v("Description")])])]),e._v(" "),a("tbody",[a("tr",[a("td"),e._v(" "),a("td",[e._v("1")]),e._v(" "),a("td",[e._v("The number of records before committing pending offsets when the "),a("code",[e._v("ackMode")]),e._v(" is "),a("code",[e._v("COUNT")]),e._v(" or "),a("code",[e._v("COUNT_TIME")]),e._v(".")])]),e._v(" "),a("tr",[a("td",[e._v("wrapping the message listener, invoked in order.")]),e._v(" "),a("td"),e._v(" "),a("td")]),e._v(" "),a("tr",[a("td",[e._v(".")]),e._v(" "),a("td"),e._v(" "),a("td")]),e._v(" "),a("tr",[a("td",[e._v("`]")]),e._v(" "),a("td"),e._v(" "),a("td")]),e._v(" "),a("tr",[a("td"),e._v(" "),a("td",[e._v("5000")]),e._v(" "),a("td",[e._v("The time in milliseconds after which pending offsets are committed when the "),a("code",[e._v("ackMode")]),e._v(" is "),a("code",[e._v("TIME")]),e._v(" or "),a("code",[e._v("COUNT_TIME")]),e._v(".")])]),e._v(" "),a("tr",[a("td"),e._v(" "),a("td",[e._v("LATEST_ONLY _NO_TX")]),e._v(" "),a("td",[e._v("Whether or not to commit the initial position on assignment; by default, the initial offset will only be committed if the "),a("code",[e._v("ConsumerConfig.AUTO_OFFSET_RESET_CONFIG")]),e._v(" is "),a("code",[e._v("latest")]),e._v(" and it won’t run in a transaction even if there is a transaction manager present."),a("br"),e._v("See the javadocs for "),a("code",[e._v("ContainerProperties.AssignmentCommitOption")]),e._v(" for more information about the available options.")])]),e._v(" "),a("tr",[a("td"),e._v(" "),a("td",[a("code",[e._v("null")])]),e._v(" "),a("td",[e._v("When not null, a "),a("code",[e._v("Duration")]),e._v(" to sleep between polls when an "),a("code",[e._v("AuthenticationException")]),e._v(" or "),a("code",[e._v("AuthorizationException")]),e._v(" is thrown by the Kafka client."),a("br"),e._v("When null, such exceptions are considered fatal and the container will stop.")])]),e._v(" "),a("tr",[a("td"),e._v(" "),a("td",[e._v("A prefix for the "),a("code",[e._v("client.id")]),e._v(" consumer property."),a("br"),e._v("Overrides the consumer factory "),a("code",[e._v("client.id")]),e._v(" property; in a concurrent container, "),a("code",[e._v("-n")]),e._v(" is added as a suffix for each consumer instance.")]),e._v(" "),a("td")]),e._v(" "),a("tr",[a("td"),e._v(" "),a("td",[e._v("false")]),e._v(" "),a("td",[e._v("Set to "),a("code",[e._v("true")]),e._v(" to always check for a "),a("code",[e._v("DeserializationException")]),e._v(" header when a "),a("code",[e._v("null")]),e._v(" "),a("code",[e._v("key")]),e._v(" is received."),a("br"),e._v("Useful when the consumer code cannot determine that an "),a("code",[e._v("ErrorHandlingDeserializer")]),e._v(" has been configured, such as when using a delegating deserializer.")])]),e._v(" "),a("tr",[a("td"),e._v(" "),a("td",[e._v("false")]),e._v(" "),a("td",[e._v("Set to "),a("code",[e._v("true")]),e._v(" to always check for a "),a("code",[e._v("DeserializationException")]),e._v(" header when a "),a("code",[e._v("null")]),e._v(" "),a("code",[e._v("value")]),e._v(" is received."),a("br"),e._v("Useful when the consumer code cannot determine that an "),a("code",[e._v("ErrorHandlingDeserializer")]),e._v(" has been configured, such as when using a delegating deserializer.")])]),e._v(" "),a("tr",[a("td"),e._v(" "),a("td",[a("code",[e._v("null")])]),e._v(" "),a("td",[e._v("When present and "),a("code",[e._v("syncCommits")]),e._v(" is "),a("code",[e._v("false")]),e._v(" a callback invoked after the commit completes.")])]),e._v(" "),a("tr",[a("td"),e._v(" "),a("td",[e._v("DEBUG")]),e._v(" "),a("td",[e._v("The logging level for logs pertaining to committing offsets.")])]),e._v(" "),a("tr",[a("td",[e._v(".")]),e._v(" "),a("td"),e._v(" "),a("td")]),e._v(" "),a("tr",[a("td"),e._v(" "),a("td",[e._v("30s")]),e._v(" "),a("td",[e._v("The time to wait for the consumer to start before logging an error; this might happen if, say, you use a task executor with insufficient threads.")])]),e._v(" "),a("tr",[a("td"),e._v(" "),a("td",[a("code",[e._v("SimpleAsyncTaskExecutor")])]),e._v(" "),a("td",[e._v("A task executor to run the consumer threads."),a("br"),e._v("The default executor creates threads named "),a("code",[e._v("-C-n")]),e._v("; with the "),a("code",[e._v("KafkaMessageListenerContainer")]),e._v(", the name is the bean name; with the "),a("code",[e._v("ConcurrentMessageListenerContainer")]),e._v(" the name is the bean name suffixed with "),a("code",[e._v("-n")]),e._v(" where n is incremented for each child container.")])]),e._v(" "),a("tr",[a("td",[e._v(".")]),e._v(" "),a("td"),e._v(" "),a("td")]),e._v(" "),a("tr",[a("td",[e._v(".")]),e._v(" "),a("td"),e._v(" "),a("td")]),e._v(" "),a("tr",[a("td",[e._v("for more information.")]),e._v(" "),a("td"),e._v(" "),a("td")]),e._v(" "),a("tr",[a("td"),e._v(" "),a("td",[a("code",[e._v("null")])]),e._v(" "),a("td",[e._v("Overrides the consumer "),a("code",[e._v("group.id")]),e._v(" property; automatically set by the "),a("code",[e._v("@KafkaListener")]),e._v(" "),a("code",[e._v("id")]),e._v(" or "),a("code",[e._v("groupId")]),e._v(" property.")])]),e._v(" "),a("tr",[a("td"),e._v(" "),a("td",[e._v("5.0")]),e._v(" "),a("td",[e._v("Multiplier for "),a("code",[e._v("idleEventInterval")]),e._v(" that is applied before any records are received."),a("br"),e._v("After a record is received, the multiplier is no longer applied."),a("br"),e._v("Available since version 2.8.")])]),e._v(" "),a("tr",[a("td"),e._v(" "),a("td",[e._v("0")]),e._v(" "),a("td",[e._v("Used to slow down deliveries by sleeping the thread between polls."),a("br"),e._v("The time to process a batch of records plus this value must be less than the "),a("code",[e._v("max.poll.interval.ms")]),e._v(" consumer property.")])]),e._v(" "),a("tr",[a("td",[e._v("."),a("br"),e._v("Also see "),a("code",[e._v("idleBeforeDataMultiplier")]),e._v(".")]),e._v(" "),a("td"),e._v(" "),a("td")]),e._v(" "),a("tr",[a("td",[e._v(".")]),e._v(" "),a("td"),e._v(" "),a("td")]),e._v(" "),a("tr",[a("td"),e._v(" "),a("td",[e._v("None")]),e._v(" "),a("td",[e._v("Used to override any arbitrary consumer properties configured on the consumer factory.")])]),e._v(" "),a("tr",[a("td"),e._v(" "),a("td",[a("code",[e._v("false")])]),e._v(" "),a("td",[e._v("Set to true to log at INFO level all container properties.")])]),e._v(" "),a("tr",[a("td"),e._v(" "),a("td",[a("code",[e._v("null")])]),e._v(" "),a("td",[e._v("The message listener.")])]),e._v(" "),a("tr",[a("td"),e._v(" "),a("td",[a("code",[e._v("true")])]),e._v(" "),a("td",[e._v("Whether or not to maintain Micrometer timers for the consumer threads.")])]),e._v(" "),a("tr",[a("td",[e._v("are not present on the broker.")]),e._v(" "),a("td"),e._v(" "),a("td")]),e._v(" "),a("tr",[a("td"),e._v(" "),a("td",[e._v("30s")]),e._v(" "),a("td",[e._v("How often to check the state of the consumer threads for "),a("code",[e._v("NonResponsiveConsumerEvent")]),e._v(" s."),a("br"),e._v("See "),a("code",[e._v("noPollThreshold")]),e._v(" and "),a("code",[e._v("pollTimeout")]),e._v(".")])]),e._v(" "),a("tr",[a("td"),e._v(" "),a("td",[e._v("3.0")]),e._v(" "),a("td",[e._v("Multiplied by "),a("code",[e._v("pollTimeOut")]),e._v(" to determine whether to publish a "),a("code",[e._v("NonResponsiveConsumerEvent")]),e._v("."),a("br"),e._v("See "),a("code",[e._v("monitorInterval")]),e._v(".")])]),e._v(" "),a("tr",[a("td",[e._v("`.")]),e._v(" "),a("td"),e._v(" "),a("td")]),e._v(" "),a("tr",[a("td",[e._v("`.")]),e._v(" "),a("td"),e._v(" "),a("td")]),e._v(" "),a("tr",[a("td"),e._v(" "),a("td",[a("code",[e._v("ThreadPoolTaskScheduler")])]),e._v(" "),a("td",[e._v("A scheduler on which to run the consumer monitor task.")])]),e._v(" "),a("tr",[a("td",[e._v("` method until all consumers stop and before publishing the container stopped event.")]),e._v(" "),a("td"),e._v(" "),a("td")]),e._v(" "),a("tr",[a("td",[e._v("for more information.")]),e._v(" "),a("td"),e._v(" "),a("td")]),e._v(" "),a("tr",[a("td"),e._v(" "),a("td",[a("code",[e._v("false")])]),e._v(" "),a("td",[e._v("When the container is stopped, stop processing after the current record instead of after processing all the records from the previous poll.")])]),e._v(" "),a("tr",[a("td",[e._v(".")]),e._v(" "),a("td"),e._v(" "),a("td")]),e._v(" "),a("tr",[a("td"),e._v(" "),a("td",[a("code",[e._v("null")])]),e._v(" "),a("td",[e._v("The timeout to use when "),a("code",[e._v("syncCommits")]),e._v(" is "),a("code",[e._v("true")]),e._v("."),a("br"),e._v("When not set, the container will attempt to determine the "),a("code",[e._v("default.api.timeout.ms")]),e._v(" consumer property and use that; otherwise it will use 60 seconds.")])]),e._v(" "),a("tr",[a("td"),e._v(" "),a("td",[a("code",[e._v("true")])]),e._v(" "),a("td",[e._v("Whether to use sync or async commits for offsets; see "),a("code",[e._v("commitCallback")]),e._v(".")])]),e._v(" "),a("tr",[a("td"),e._v(" "),a("td",[e._v("n/a")]),e._v(" "),a("td",[e._v("The configured topics, topic pattern or explicitly assigned topics/partitions."),a("br"),e._v("Mutually exclusive; at least one must be provided; enforced by "),a("code",[e._v("ContainerProperties")]),e._v(" constructors.")])]),e._v(" "),a("tr",[a("td",[e._v(".")]),e._v(" "),a("td"),e._v(" "),a("td")])])]),e._v(" "),a("table",[a("thead",[a("tr",[a("th",[e._v("Property")]),e._v(" "),a("th",[e._v("Default")]),e._v(" "),a("th",[e._v("Description")])])]),e._v(" "),a("tbody",[a("tr",[a("td"),e._v(" "),a("td",[a("code",[e._v("DefaultAfterRollbackProcessor")])]),e._v(" "),a("td",[e._v("An "),a("code",[e._v("AfterRollbackProcessor")]),e._v(" to invoke after a transaction is rolled back.")])]),e._v(" "),a("tr",[a("td"),e._v(" "),a("td",[e._v("application context")]),e._v(" "),a("td",[e._v("The event publisher.")])]),e._v(" "),a("tr",[a("td"),e._v(" "),a("td",[e._v("See desc.")]),e._v(" "),a("td",[e._v("Deprecated - see "),a("code",[e._v("commonErrorHandler")]),e._v(".")])]),e._v(" "),a("tr",[a("td"),e._v(" "),a("td",[a("code",[e._v("null")])]),e._v(" "),a("td",[e._v("Set a "),a("code",[e._v("BatchInterceptor")]),e._v(" to call before invoking the batch listener; does not apply to record listeners."),a("br"),e._v("Also see "),a("code",[e._v("interceptBeforeTx")]),e._v(".")])]),e._v(" "),a("tr",[a("td"),e._v(" "),a("td",[e._v("bean name")]),e._v(" "),a("td",[e._v("The bean name of the container; suffixed with "),a("code",[e._v("-n")]),e._v(" for child containers.")])]),e._v(" "),a("tr",[a("td",[e._v(".")]),e._v(" "),a("td"),e._v(" "),a("td")]),e._v(" "),a("tr",[a("td"),e._v(" "),a("td",[a("code",[e._v("ContainerProperties")])]),e._v(" "),a("td",[e._v("The container properties instance.")])]),e._v(" "),a("tr",[a("td"),e._v(" "),a("td",[e._v("See desc.")]),e._v(" "),a("td",[e._v("Deprecated - see "),a("code",[e._v("commonErrorHandler")]),e._v(".")])]),e._v(" "),a("tr",[a("td"),e._v(" "),a("td",[e._v("See desc.")]),e._v(" "),a("td",[e._v("Deprecated - see "),a("code",[e._v("commonErrorHandler")]),e._v(".")])]),e._v(" "),a("tr",[a("td"),e._v(" "),a("td",[e._v("See desc.")]),e._v(" "),a("td",[e._v("The "),a("code",[e._v("containerProperties.groupId")]),e._v(", if present, otherwise the "),a("code",[e._v("group.id")]),e._v(" property from the consumer factory.")])]),e._v(" "),a("tr",[a("td"),e._v(" "),a("td",[a("code",[e._v("true")])]),e._v(" "),a("td",[e._v("Determines whether the "),a("code",[e._v("recordInterceptor")]),e._v(" is called before or after a transaction starts.")])]),e._v(" "),a("tr",[a("td"),e._v(" "),a("td",[e._v("See desc.")]),e._v(" "),a("td",[e._v("The bean name for user-configured containers or the "),a("code",[e._v("id")]),e._v(" attribute of "),a("code",[e._v("@KafkaListener")]),e._v(" s.")])]),e._v(" "),a("tr",[a("td"),e._v(" "),a("td",[e._v("True if a consumer pause has been requested.")]),e._v(" "),a("td")]),e._v(" "),a("tr",[a("td"),e._v(" "),a("td",[a("code",[e._v("null")])]),e._v(" "),a("td",[e._v("Set a "),a("code",[e._v("RecordInterceptor")]),e._v(" to call before invoking the record listener; does not apply to batch listeners."),a("br"),e._v("Also see "),a("code",[e._v("interceptBeforeTx")]),e._v(".")])]),e._v(" "),a("tr",[a("td"),e._v(" "),a("td",[e._v("30s")]),e._v(" "),a("td",[e._v("When the "),a("code",[e._v("missingTopicsFatal")]),e._v(" container property is "),a("code",[e._v("true")]),e._v(", how long to wait, in seconds, for the "),a("code",[e._v("describeTopics")]),e._v(" operation to complete.")])])])]),e._v(" "),a("table",[a("thead",[a("tr",[a("th",[e._v("Property")]),e._v(" "),a("th",[e._v("Default")]),e._v(" "),a("th",[e._v("Description")])])]),e._v(" "),a("tbody",[a("tr",[a("td",[e._v(".")]),e._v(" "),a("td"),e._v(" "),a("td")]),e._v(" "),a("tr",[a("td",[e._v(".")]),e._v(" "),a("td"),e._v(" "),a("td")]),e._v(" "),a("tr",[a("td"),e._v(" "),a("td",[a("code",[e._v("null")])]),e._v(" "),a("td",[e._v("Used by the concurrent container to give each child container’s consumer a unique "),a("code",[e._v("client.id")]),e._v(".")])]),e._v(" "),a("tr",[a("td"),e._v(" "),a("td",[e._v("n/a")]),e._v(" "),a("td",[e._v("True if pause has been requested and the consumer has actually paused.")])])])]),e._v(" "),a("table",[a("thead",[a("tr",[a("th",[e._v("Property")]),e._v(" "),a("th",[e._v("Default")]),e._v(" "),a("th",[e._v("Description")])])]),e._v(" "),a("tbody",[a("tr",[a("td"),e._v(" "),a("td",[a("code",[e._v("true")])]),e._v(" "),a("td",[e._v("Set to false to suppress adding a suffix to the "),a("code",[e._v("client.id")]),e._v(" consumer property, when the "),a("code",[e._v("concurrency")]),e._v(" is only 1.")])]),e._v(" "),a("tr",[a("td",[e._v(".")]),e._v(" "),a("td"),e._v(" "),a("td")]),e._v(" "),a("tr",[a("td",[e._v(", keyed by the child container’s consumer’s "),a("code",[e._v("client.id")]),e._v(" property.")]),e._v(" "),a("td"),e._v(" "),a("td")]),e._v(" "),a("tr",[a("td"),e._v(" "),a("td",[e._v("1")]),e._v(" "),a("td",[e._v("The number of child "),a("code",[e._v("KafkaMessageListenerContainer")]),e._v(" s to manage.")])]),e._v(" "),a("tr",[a("td"),e._v(" "),a("td",[e._v("n/a")]),e._v(" "),a("td",[e._v("True if pause has been requested and all child containers' consumer has actually paused.")])]),e._v(" "),a("tr",[a("td"),e._v(" "),a("td",[e._v("n/a")]),e._v(" "),a("td",[e._v("A reference to all child "),a("code",[e._v("KafkaMessageListenerContainer")]),e._v(" s.")])])])]),e._v(" "),a("h4",{attrs:{id:"_4-1-6-application-events"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_4-1-6-application-events"}},[e._v("#")]),e._v(" 4.1.6. Application Events")]),e._v(" "),a("p",[e._v("The following Spring application events are published by listener containers and their consumers:")]),e._v(" "),a("ul",[a("li",[a("p",[a("code",[e._v("ConsumerStartingEvent")]),e._v(" - published when a consumer thread is first started, before it starts polling.")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("ConsumerStartedEvent")]),e._v(" - published when a consumer is about to start polling.")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("ConsumerFailedToStartEvent")]),e._v(" - published if no "),a("code",[e._v("ConsumerStartingEvent")]),e._v(" is published within the "),a("code",[e._v("consumerStartTimeout")]),e._v(" container property.\nThis event might signal that the configured task executor has insufficient threads to support the containers it is used in and their concurrency.\nAn error message is also logged when this condition occurs.")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("ListenerContainerIdleEvent")]),e._v(": published when no messages have been received in "),a("code",[e._v("idleInterval")]),e._v(" (if configured).")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("ListenerContainerNoLongerIdleEvent")]),e._v(": published when a record is consumed after previously publishing a "),a("code",[e._v("ListenerContainerIdleEvent")]),e._v(".")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("ListenerContainerPartitionIdleEvent")]),e._v(": published when no messages have been received from that partition in "),a("code",[e._v("idlePartitionEventInterval")]),e._v(" (if configured).")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("ListenerContainerPartitionNoLongerIdleEvent")]),e._v(": published when a record is consumed from a partition that has previously published a "),a("code",[e._v("ListenerContainerPartitionIdleEvent")]),e._v(".")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("NonResponsiveConsumerEvent")]),e._v(": published when the consumer appears to be blocked in the "),a("code",[e._v("poll")]),e._v(" method.")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("ConsumerPartitionPausedEvent")]),e._v(": published by each consumer when a partition is paused.")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("ConsumerPartitionResumedEvent")]),e._v(": published by each consumer when a partition is resumed.")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("ConsumerPausedEvent")]),e._v(": published by each consumer when the container is paused.")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("ConsumerResumedEvent")]),e._v(": published by each consumer when the container is resumed.")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("ConsumerStoppingEvent")]),e._v(": published by each consumer just before stopping.")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("ConsumerStoppedEvent")]),e._v(": published after the consumer is closed.\nSee "),a("a",{attrs:{href:"#thread-safety"}},[e._v("Thread Safety")]),e._v(".")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("ContainerStoppedEvent")]),e._v(": published when all consumers have stopped.")])])]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("By default, the application context’s event multicaster invokes event listeners on the calling thread."),a("br"),e._v("If you change the multicaster to use an async executor, you must not invoke any "),a("code",[e._v("Consumer")]),e._v(" methods when the event contains a reference to the consumer.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("ListenerContainerIdleEvent")]),e._v(" has the following properties:")]),e._v(" "),a("ul",[a("li",[a("p",[a("code",[e._v("source")]),e._v(": The listener container instance that published the event.")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("container")]),e._v(": The listener container or the parent listener container, if the source container is a child.")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("id")]),e._v(": The listener ID (or container bean name).")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("idleTime")]),e._v(": The time the container had been idle when the event was published.")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("topicPartitions")]),e._v(": The topics and partitions that the container was assigned at the time the event was generated.")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("consumer")]),e._v(": A reference to the Kafka "),a("code",[e._v("Consumer")]),e._v(" object.\nFor example, if the consumer’s "),a("code",[e._v("pause()")]),e._v(" method was previously called, it can "),a("code",[e._v("resume()")]),e._v(" when the event is received.")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("paused")]),e._v(": Whether the container is currently paused.\nSee "),a("a",{attrs:{href:"#pause-resume"}},[e._v("Pausing and Resuming Listener Containers")]),e._v(" for more information.")])])]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("ListenerContainerNoLongerIdleEvent")]),e._v(" has the same properties, except "),a("code",[e._v("idleTime")]),e._v(" and "),a("code",[e._v("paused")]),e._v(".")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("ListenerContainerPartitionIdleEvent")]),e._v(" has the following properties:")]),e._v(" "),a("ul",[a("li",[a("p",[a("code",[e._v("source")]),e._v(": The listener container instance that published the event.")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("container")]),e._v(": The listener container or the parent listener container, if the source container is a child.")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("id")]),e._v(": The listener ID (or container bean name).")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("idleTime")]),e._v(": The time partition consumption had been idle when the event was published.")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("topicPartition")]),e._v(": The topic and partition that triggered the event.")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("consumer")]),e._v(": A reference to the Kafka "),a("code",[e._v("Consumer")]),e._v(" object.\nFor example, if the consumer’s "),a("code",[e._v("pause()")]),e._v(" method was previously called, it can "),a("code",[e._v("resume()")]),e._v(" when the event is received.")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("paused")]),e._v(": Whether that partition consumption is currently paused for that consumer.\nSee "),a("a",{attrs:{href:"#pause-resume"}},[e._v("Pausing and Resuming Listener Containers")]),e._v(" for more information.")])])]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("ListenerContainerPartitionNoLongerIdleEvent")]),e._v(" has the same properties, except "),a("code",[e._v("idleTime")]),e._v(" and "),a("code",[e._v("paused")]),e._v(".")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("NonResponsiveConsumerEvent")]),e._v(" has the following properties:")]),e._v(" "),a("ul",[a("li",[a("p",[a("code",[e._v("source")]),e._v(": The listener container instance that published the event.")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("container")]),e._v(": The listener container or the parent listener container, if the source container is a child.")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("id")]),e._v(": The listener ID (or container bean name).")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("timeSinceLastPoll")]),e._v(": The time just before the container last called "),a("code",[e._v("poll()")]),e._v(".")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("topicPartitions")]),e._v(": The topics and partitions that the container was assigned at the time the event was generated.")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("consumer")]),e._v(": A reference to the Kafka "),a("code",[e._v("Consumer")]),e._v(" object.\nFor example, if the consumer’s "),a("code",[e._v("pause()")]),e._v(" method was previously called, it can "),a("code",[e._v("resume()")]),e._v(" when the event is received.")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("paused")]),e._v(": Whether the container is currently paused.\nSee "),a("a",{attrs:{href:"#pause-resume"}},[e._v("Pausing and Resuming Listener Containers")]),e._v(" for more information.")])])]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("ConsumerPausedEvent")]),e._v(", "),a("code",[e._v("ConsumerResumedEvent")]),e._v(", and "),a("code",[e._v("ConsumerStopping")]),e._v(" events have the following properties:")]),e._v(" "),a("ul",[a("li",[a("p",[a("code",[e._v("source")]),e._v(": The listener container instance that published the event.")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("container")]),e._v(": The listener container or the parent listener container, if the source container is a child.")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("partitions")]),e._v(": The "),a("code",[e._v("TopicPartition")]),e._v(" instances involved.")])])]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("ConsumerPartitionPausedEvent")]),e._v(", "),a("code",[e._v("ConsumerPartitionResumedEvent")]),e._v(" events have the following properties:")]),e._v(" "),a("ul",[a("li",[a("p",[a("code",[e._v("source")]),e._v(": The listener container instance that published the event.")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("container")]),e._v(": The listener container or the parent listener container, if the source container is a child.")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("partition")]),e._v(": The "),a("code",[e._v("TopicPartition")]),e._v(" instance involved.")])])]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("ConsumerStartingEvent")]),e._v(", "),a("code",[e._v("ConsumerStartingEvent")]),e._v(", "),a("code",[e._v("ConsumerFailedToStartEvent")]),e._v(", "),a("code",[e._v("ConsumerStoppedEvent")]),e._v(" and "),a("code",[e._v("ContainerStoppedEvent")]),e._v(" events have the following properties:")]),e._v(" "),a("ul",[a("li",[a("p",[a("code",[e._v("source")]),e._v(": The listener container instance that published the event.")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("container")]),e._v(": The listener container or the parent listener container, if the source container is a child.")])])]),e._v(" "),a("p",[e._v("All containers (whether a child or a parent) publish "),a("code",[e._v("ContainerStoppedEvent")]),e._v(".\nFor a parent container, the source and container properties are identical.")]),e._v(" "),a("p",[e._v("In addition, the "),a("code",[e._v("ConsumerStoppedEvent")]),e._v(" has the following additional property:")]),e._v(" "),a("ul",[a("li",[a("p",[a("code",[e._v("reason")])]),e._v(" "),a("ul",[a("li",[a("p",[a("code",[e._v("NORMAL")]),e._v(" - the consumer stopped normally (container was stopped).")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("ERROR")]),e._v(" - a "),a("code",[e._v("java.lang.Error")]),e._v(" was thrown.")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("FENCED")]),e._v(" - the transactional producer was fenced and the "),a("code",[e._v("stopContainerWhenFenced")]),e._v(" container property is "),a("code",[e._v("true")]),e._v(".")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("AUTH")]),e._v(" - an "),a("code",[e._v("AuthenticationException")]),e._v(" or "),a("code",[e._v("AuthorizationException")]),e._v(" was thrown and the "),a("code",[e._v("authExceptionRetryInterval")]),e._v(" is not configured.")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("NO_OFFSET")]),e._v(" - there is no offset for a partition and the "),a("code",[e._v("auto.offset.reset")]),e._v(" policy is "),a("code",[e._v("none")]),e._v(".")])])])])]),e._v(" "),a("p",[e._v("You can use this event to restart the container after such a condition:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("if (event.getReason.equals(Reason.FENCED)) {\n event.getSource(MessageListenerContainer.class).start();\n}\n")])])]),a("h5",{attrs:{id:"detecting-idle-and-non-responsive-consumers"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#detecting-idle-and-non-responsive-consumers"}},[e._v("#")]),e._v(" Detecting Idle and Non-Responsive Consumers")]),e._v(" "),a("p",[e._v("While efficient, one problem with asynchronous consumers is detecting when they are idle.\nYou might want to take some action if no messages arrive for some period of time.")]),e._v(" "),a("p",[e._v("You can configure the listener container to publish a "),a("code",[e._v("ListenerContainerIdleEvent")]),e._v(" when some time passes with no message delivery.\nWhile the container is idle, an event is published every "),a("code",[e._v("idleEventInterval")]),e._v(" milliseconds.")]),e._v(" "),a("p",[e._v("To configure this feature, set the "),a("code",[e._v("idleEventInterval")]),e._v(" on the container.\nThe following example shows how to do so:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@Bean\npublic KafkaMessageListenerContainer(ConsumerFactory consumerFactory) {\n ContainerProperties containerProps = new ContainerProperties("topic1", "topic2");\n ...\n containerProps.setIdleEventInterval(60000L);\n ...\n KafkaMessageListenerContainer container = new KafKaMessageListenerContainer<>(...);\n return container;\n}\n')])])]),a("p",[e._v("The following example shows how to set the "),a("code",[e._v("idleEventInterval")]),e._v(" for a "),a("code",[e._v("@KafkaListener")]),e._v(":")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("@Bean\npublic ConcurrentKafkaListenerContainerFactory kafkaListenerContainerFactory() {\n ConcurrentKafkaListenerContainerFactory factory =\n new ConcurrentKafkaListenerContainerFactory<>();\n ...\n factory.getContainerProperties().setIdleEventInterval(60000L);\n ...\n return factory;\n}\n")])])]),a("p",[e._v("In each of these cases, an event is published once per minute while the container is idle.")]),e._v(" "),a("p",[e._v("If, for some reason, the consumer "),a("code",[e._v("poll()")]),e._v(" method does not exit, no messages are received and idle events cannot be generated (this was a problem with early versions of the "),a("code",[e._v("kafka-clients")]),e._v(" when the broker wasn’t reachable).\nIn this case, the container publishes a "),a("code",[e._v("NonResponsiveConsumerEvent")]),e._v(" if a poll does not return within "),a("code",[e._v("3x")]),e._v(" the "),a("code",[e._v("pollTimeout")]),e._v(" property.\nBy default, this check is performed once every 30 seconds in each container.\nYou can modify this behavior by setting the "),a("code",[e._v("monitorInterval")]),e._v(" (default 30 seconds) and "),a("code",[e._v("noPollThreshold")]),e._v(" (default 3.0) properties in the "),a("code",[e._v("ContainerProperties")]),e._v(" when configuring the listener container.\nThe "),a("code",[e._v("noPollThreshold")]),e._v(" should be greater than "),a("code",[e._v("1.0")]),e._v(" to avoid getting spurious events due to a race condition.\nReceiving such an event lets you stop the containers, thus waking the consumer so that it can stop.")]),e._v(" "),a("p",[e._v("Starting with version 2.6.2, if a container has published a "),a("code",[e._v("ListenerContainerIdleEvent")]),e._v(", it will publish a "),a("code",[e._v("ListenerContainerNoLongerIdleEvent")]),e._v(" when a record is subsequently received.")]),e._v(" "),a("h5",{attrs:{id:"event-consumption"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#event-consumption"}},[e._v("#")]),e._v(" Event Consumption")]),e._v(" "),a("p",[e._v("You can capture these events by implementing "),a("code",[e._v("ApplicationListener")]),e._v(" — either a general listener or one narrowed to only receive this specific event.\nYou can also use "),a("code",[e._v("@EventListener")]),e._v(", introduced in Spring Framework 4.2.")]),e._v(" "),a("p",[e._v("The next example combines "),a("code",[e._v("@KafkaListener")]),e._v(" and "),a("code",[e._v("@EventListener")]),e._v(" into a single class.\nYou should understand that the application listener gets events for all containers, so you may need to check the listener ID if you want to take specific action based on which container is idle.\nYou can also use the "),a("code",[e._v("@EventListener")]),e._v(" "),a("code",[e._v("condition")]),e._v(" for this purpose.")]),e._v(" "),a("p",[e._v("See "),a("a",{attrs:{href:"#events"}},[e._v("Application Events")]),e._v(" for information about event properties.")]),e._v(" "),a("p",[e._v("The event is normally published on the consumer thread, so it is safe to interact with the "),a("code",[e._v("Consumer")]),e._v(" object.")]),e._v(" "),a("p",[e._v("The following example uses both "),a("code",[e._v("@KafkaListener")]),e._v(" and "),a("code",[e._v("@EventListener")]),e._v(":")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('public class Listener {\n\n @KafkaListener(id = "qux", topics = "annotated")\n public void listen4(@Payload String foo, Acknowledgment ack) {\n ...\n }\n\n @EventListener(condition = "event.listenerId.startsWith(\'qux-\')")\n public void eventHandler(ListenerContainerIdleEvent event) {\n ...\n }\n\n}\n')])])]),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("Event listeners see events for all containers."),a("br"),e._v("Consequently, in the preceding example, we narrow the events received based on the listener ID."),a("br"),e._v("Since containers created for the "),a("code",[e._v("@KafkaListener")]),e._v(" support concurrency, the actual containers are named "),a("code",[e._v("id-n")]),e._v(" where the "),a("code",[e._v("n")]),e._v(" is a unique value for each instance to support the concurrency."),a("br"),e._v("That is why we use "),a("code",[e._v("startsWith")]),e._v(" in the condition.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("If you wish to use the idle event to stop the lister container, you should not call "),a("code",[e._v("container.stop()")]),e._v(" on the thread that calls the listener."),a("br"),e._v("Doing so causes delays and unnecessary log messages."),a("br"),e._v("Instead, you should hand off the event to a different thread that can then stop the container."),a("br"),e._v("Also, you should not "),a("code",[e._v("stop()")]),e._v(" the container instance if it is a child container."),a("br"),e._v("You should stop the concurrent container instead.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("h6",{attrs:{id:"current-positions-when-idle"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#current-positions-when-idle"}},[e._v("#")]),e._v(" Current Positions when Idle")]),e._v(" "),a("p",[e._v("Note that you can obtain the current positions when idle is detected by implementing "),a("code",[e._v("ConsumerSeekAware")]),e._v(" in your listener.\nSee "),a("code",[e._v("onIdleContainer()")]),e._v(" in "),a("a",{attrs:{href:"#seek"}},[e._v("Seeking to a Specific Offset")]),e._v(".")]),e._v(" "),a("h4",{attrs:{id:"_4-1-7-topic-partition-initial-offset"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_4-1-7-topic-partition-initial-offset"}},[e._v("#")]),e._v(" 4.1.7. Topic/Partition Initial Offset")]),e._v(" "),a("p",[e._v("There are several ways to set the initial offset for a partition.")]),e._v(" "),a("p",[e._v("When manually assigning partitions, you can set the initial offset (if desired) in the configured "),a("code",[e._v("TopicPartitionOffset")]),e._v(" arguments (see "),a("a",{attrs:{href:"#message-listener-container"}},[e._v("Message Listener Containers")]),e._v(").\nYou can also seek to a specific offset at any time.")]),e._v(" "),a("p",[e._v("When you use group management where the broker assigns partitions:")]),e._v(" "),a("ul",[a("li",[a("p",[e._v("For a new "),a("code",[e._v("group.id")]),e._v(", the initial offset is determined by the "),a("code",[e._v("auto.offset.reset")]),e._v(" consumer property ("),a("code",[e._v("earliest")]),e._v(" or "),a("code",[e._v("latest")]),e._v(").")])]),e._v(" "),a("li",[a("p",[e._v("For an existing group ID, the initial offset is the current offset for that group ID.\nYou can, however, seek to a specific offset during initialization (or at any time thereafter).")])])]),e._v(" "),a("h4",{attrs:{id:"_4-1-8-seeking-to-a-specific-offset"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_4-1-8-seeking-to-a-specific-offset"}},[e._v("#")]),e._v(" 4.1.8. Seeking to a Specific Offset")]),e._v(" "),a("p",[e._v("In order to seek, your listener must implement "),a("code",[e._v("ConsumerSeekAware")]),e._v(", which has the following methods:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("void registerSeekCallback(ConsumerSeekCallback callback);\n\nvoid onPartitionsAssigned(Map assignments, ConsumerSeekCallback callback);\n\nvoid onPartitionsRevoked(Collection partitions)\n\nvoid onIdleContainer(Map assignments, ConsumerSeekCallback callback);\n")])])]),a("p",[e._v("The "),a("code",[e._v("registerSeekCallback")]),e._v(" is called when the container is started and whenever partitions are assigned.\nYou should use this callback when seeking at some arbitrary time after initialization.\nYou should save a reference to the callback.\nIf you use the same listener in multiple containers (or in a "),a("code",[e._v("ConcurrentMessageListenerContainer")]),e._v("), you should store the callback in a "),a("code",[e._v("ThreadLocal")]),e._v(" or some other structure keyed by the listener "),a("code",[e._v("Thread")]),e._v(".")]),e._v(" "),a("p",[e._v("When using group management, "),a("code",[e._v("onPartitionsAssigned")]),e._v(" is called when partitions are assigned.\nYou can use this method, for example, for setting initial offsets for the partitions, by calling the callback.\nYou can also use this method to associate this thread’s callback with the assigned partitions (see the example below).\nYou must use the callback argument, not the one passed into "),a("code",[e._v("registerSeekCallback")]),e._v(".\nStarting with version 2.5.5, this method is called, even when using "),a("a",{attrs:{href:"#manual-assignment"}},[e._v("manual partition assignment")]),e._v(".")]),e._v(" "),a("p",[a("code",[e._v("onPartitionsRevoked")]),e._v(" is called when the container is stopped or Kafka revokes assignments.\nYou should discard this thread’s callback and remove any associations to the revoked partitions.")]),e._v(" "),a("p",[e._v("The callback has the following methods:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("void seek(String topic, int partition, long offset);\n\nvoid seekToBeginning(String topic, int partition);\n\nvoid seekToBeginning(Collection= partitions);\n\nvoid seekToEnd(String topic, int partition);\n\nvoid seekToEnd(Collection= partitions);\n\nvoid seekRelative(String topic, int partition, long offset, boolean toCurrent);\n\nvoid seekToTimestamp(String topic, int partition, long timestamp);\n\nvoid seekToTimestamp(Collection topicPartitions, long timestamp);\n")])])]),a("p",[a("code",[e._v("seekRelative")]),e._v(" was added in version 2.3, to perform relative seeks.")]),e._v(" "),a("ul",[a("li",[a("p",[a("code",[e._v("offset")]),e._v(" negative and "),a("code",[e._v("toCurrent")]),e._v(" "),a("code",[e._v("false")]),e._v(" - seek relative to the end of the partition.")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("offset")]),e._v(" positive and "),a("code",[e._v("toCurrent")]),e._v(" "),a("code",[e._v("false")]),e._v(" - seek relative to the beginning of the partition.")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("offset")]),e._v(" negative and "),a("code",[e._v("toCurrent")]),e._v(" "),a("code",[e._v("true")]),e._v(" - seek relative to the current position (rewind).")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("offset")]),e._v(" positive and "),a("code",[e._v("toCurrent")]),e._v(" "),a("code",[e._v("true")]),e._v(" - seek relative to the current position (fast forward).")])])]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("seekToTimestamp")]),e._v(" methods were also added in version 2.3.")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("When seeking to the same timestamp for multiple partitions in the "),a("code",[e._v("onIdleContainer")]),e._v(" or "),a("code",[e._v("onPartitionsAssigned")]),e._v(" methods, the second method is preferred because it is more efficient to find the offsets for the timestamps in a single call to the consumer’s "),a("code",[e._v("offsetsForTimes")]),e._v(" method."),a("br"),e._v("When called from other locations, the container will gather all timestamp seek requests and make one call to "),a("code",[e._v("offsetsForTimes")]),e._v(".")])])]),e._v(" "),a("tbody")]),e._v(" "),a("p",[e._v("You can also perform seek operations from "),a("code",[e._v("onIdleContainer()")]),e._v(" when an idle container is detected.\nSee "),a("a",{attrs:{href:"#idle-containers"}},[e._v("Detecting Idle and Non-Responsive Consumers")]),e._v(" for how to enable idle container detection.")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("The "),a("code",[e._v("seekToBeginning")]),e._v(" method that accepts a collection is useful, for example, when processing a compacted topic and you wish to seek to the beginning every time the application is started:")])])]),e._v(" "),a("tbody")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("public class MyListener implements ConsumerSeekAware {\n\n...\n\n @Override\n public void onPartitionsAssigned(Map assignments, ConsumerSeekCallback callback) {\n callback.seekToBeginning(assignments.keySet());\n }\n\n}\n")])])]),a("p",[e._v("To arbitrarily seek at runtime, use the callback reference from the "),a("code",[e._v("registerSeekCallback")]),e._v(" for the appropriate thread.")]),e._v(" "),a("p",[e._v("Here is a trivial Spring Boot application that demonstrates how to use the callback; it sends 10 records to the topic; hitting "),a("code",[e._v("")]),e._v(" in the console causes all partitions to seek to the beginning.")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@SpringBootApplication\npublic class SeekExampleApplication {\n\n public static void main(String[] args) {\n SpringApplication.run(SeekExampleApplication.class, args);\n }\n\n @Bean\n public ApplicationRunner runner(Listener listener, KafkaTemplate template) {\n return args -> {\n IntStream.range(0, 10).forEach(i -> template.send(\n new ProducerRecord<>("seekExample", i % 3, "foo", "bar")));\n while (true) {\n System.in.read();\n listener.seekToStart();\n }\n };\n }\n\n @Bean\n public NewTopic topic() {\n return new NewTopic("seekExample", 3, (short) 1);\n }\n\n}\n\n@Component\nclass Listener implements ConsumerSeekAware {\n\n private static final Logger logger = LoggerFactory.getLogger(Listener.class);\n\n private final ThreadLocal callbackForThread = new ThreadLocal<>();\n\n private final Map callbacks = new ConcurrentHashMap<>();\n\n @Override\n public void registerSeekCallback(ConsumerSeekCallback callback) {\n this.callbackForThread.set(callback);\n }\n\n @Override\n public void onPartitionsAssigned(Map assignments, ConsumerSeekCallback callback) {\n assignments.keySet().forEach(tp -> this.callbacks.put(tp, this.callbackForThread.get()));\n }\n\n @Override\n public void onPartitionsRevoked(Collection partitions) {\n partitions.forEach(tp -> this.callbacks.remove(tp));\n this.callbackForThread.remove();\n }\n\n @Override\n public void onIdleContainer(Map assignments, ConsumerSeekCallback callback) {\n }\n\n @KafkaListener(id = "seekExample", topics = "seekExample", concurrency = "3")\n public void listen(ConsumerRecord in) {\n logger.info(in.toString());\n }\n\n public void seekToStart() {\n this.callbacks.forEach((tp, callback) -> callback.seekToBeginning(tp.topic(), tp.partition()));\n }\n\n}\n')])])]),a("p",[e._v("To make things simpler, version 2.3 added the "),a("code",[e._v("AbstractConsumerSeekAware")]),e._v(" class, which keeps track of which callback is to be used for a topic/partition.\nThe following example shows how to seek to the last record processed, in each partition, each time the container goes idle.\nIt also has methods that allow arbitrary external calls to rewind partitions by one record.")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('public class SeekToLastOnIdleListener extends AbstractConsumerSeekAware {\n\n @KafkaListener(id = "seekOnIdle", topics = "seekOnIdle")\n public void listen(String in) {\n ...\n }\n\n @Override\n public void onIdleContainer(Map assignments,\n ConsumerSeekCallback callback) {\n\n assignments.keySet().forEach(tp -> callback.seekRelative(tp.topic(), tp.partition(), -1, true));\n }\n\n /**\n * Rewind all partitions one record.\n */\n public void rewindAllOneRecord() {\n getSeekCallbacks()\n .forEach((tp, callback) ->\n callback.seekRelative(tp.topic(), tp.partition(), -1, true));\n }\n\n /**\n * Rewind one partition one record.\n */\n public void rewindOnePartitionOneRecord(String topic, int partition) {\n getSeekCallbackFor(new org.apache.kafka.common.TopicPartition(topic, partition))\n .seekRelative(topic, partition, -1, true);\n }\n\n}\n')])])]),a("p",[e._v("Version 2.6 added convenience methods to the abstract class:")]),e._v(" "),a("ul",[a("li",[a("p",[a("code",[e._v("seekToBeginning()")]),e._v(" - seeks all assigned partitions to the beginning")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("seekToEnd()")]),e._v(" - seeks all assigned partitions to the end")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("seekToTimestamp(long time)")]),e._v(" - seeks all assigned partitions to the offset represented by that timestamp.")])])]),e._v(" "),a("p",[e._v("Example:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("public class MyListener extends AbstractConsumerSeekAware {\n\n @KafkaListener(...)\n void listn(...) {\n ...\n }\n}\n\npublic class SomeOtherBean {\n\n MyListener listener;\n\n ...\n\n void someMethod() {\n this.listener.seekToTimestamp(System.currentTimeMillis - 60_000);\n }\n\n}\n")])])]),a("h4",{attrs:{id:"_4-1-9-container-factory"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_4-1-9-container-factory"}},[e._v("#")]),e._v(" 4.1.9. Container factory")]),e._v(" "),a("p",[e._v("As discussed in "),a("a",{attrs:{href:"#kafka-listener-annotation"}},[a("code",[e._v("@KafkaListener")]),e._v(" Annotation")]),e._v(", a "),a("code",[e._v("ConcurrentKafkaListenerContainerFactory")]),e._v(" is used to create containers for annotated methods.")]),e._v(" "),a("p",[e._v("Starting with version 2.2, you can use the same factory to create any "),a("code",[e._v("ConcurrentMessageListenerContainer")]),e._v(".\nThis might be useful if you want to create several containers with similar properties or you wish to use some externally configured factory, such as the one provided by Spring Boot auto-configuration.\nOnce the container is created, you can further modify its properties, many of which are set by using "),a("code",[e._v("container.getContainerProperties()")]),e._v(".\nThe following example configures a "),a("code",[e._v("ConcurrentMessageListenerContainer")]),e._v(":")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@Bean\npublic ConcurrentMessageListenerContainer(\n ConcurrentKafkaListenerContainerFactory factory) {\n\n ConcurrentMessageListenerContainer container =\n factory.createContainer("topic1", "topic2");\n container.setMessageListener(m -> { ... } );\n return container;\n}\n')])])]),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("Containers created this way are not added to the endpoint registry."),a("br"),e._v("They should be created as "),a("code",[e._v("@Bean")]),e._v(" definitions so that they are registered with the application context.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("p",[e._v("Starting with version 2.3.4, you can add a "),a("code",[e._v("ContainerCustomizer")]),e._v(" to the factory to further configure each container after it has been created and configured.")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("@Bean\npublic KafkaListenerContainerFactory kafkaListenerContainerFactory() {\n ConcurrentKafkaListenerContainerFactory factory =\n new ConcurrentKafkaListenerContainerFactory<>();\n ...\n factory.setContainerCustomizer(container -> { /* customize the container */ });\n return factory;\n}\n")])])]),a("h4",{attrs:{id:"_4-1-10-thread-safety"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_4-1-10-thread-safety"}},[e._v("#")]),e._v(" 4.1.10. Thread Safety")]),e._v(" "),a("p",[e._v("When using a concurrent message listener container, a single listener instance is invoked on all consumer threads.\nListeners, therefore, need to be thread-safe, and it is preferable to use stateless listeners.\nIf it is not possible to make your listener thread-safe or adding synchronization would significantly reduce the benefit of adding concurrency, you can use one of a few techniques:")]),e._v(" "),a("ul",[a("li",[a("p",[e._v("Use "),a("code",[e._v("n")]),e._v(" containers with "),a("code",[e._v("concurrency=1")]),e._v(" with a prototype scoped "),a("code",[e._v("MessageListener")]),e._v(" bean so that each container gets its own instance (this is not possible when using "),a("code",[e._v("@KafkaListener")]),e._v(").")])]),e._v(" "),a("li",[a("p",[e._v("Keep the state in "),a("code",[e._v("ThreadLocal")]),e._v(" instances.")])]),e._v(" "),a("li",[a("p",[e._v("Have the singleton listener delegate to a bean that is declared in "),a("code",[e._v("SimpleThreadScope")]),e._v(" (or a similar scope).")])])]),e._v(" "),a("p",[e._v("To facilitate cleaning up thread state (for the second and third items in the preceding list), starting with version 2.2, the listener container publishes a "),a("code",[e._v("ConsumerStoppedEvent")]),e._v(" when each thread exits.\nYou can consume these events with an "),a("code",[e._v("ApplicationListener")]),e._v(" or "),a("code",[e._v("@EventListener")]),e._v(" method to remove "),a("code",[e._v("ThreadLocal")]),e._v(" instances or "),a("code",[e._v("remove()")]),e._v(" thread-scoped beans from the scope.\nNote that "),a("code",[e._v("SimpleThreadScope")]),e._v(" does not destroy beans that have a destruction interface (such as "),a("code",[e._v("DisposableBean")]),e._v("), so you should "),a("code",[e._v("destroy()")]),e._v(" the instance yourself.")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("By default, the application context’s event multicaster invokes event listeners on the calling thread."),a("br"),e._v("If you change the multicaster to use an async executor, thread cleanup is not effective.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("h4",{attrs:{id:"_4-1-11-monitoring"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_4-1-11-monitoring"}},[e._v("#")]),e._v(" 4.1.11. Monitoring")]),e._v(" "),a("h5",{attrs:{id:"monitoring-listener-performance"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#monitoring-listener-performance"}},[e._v("#")]),e._v(" Monitoring Listener Performance")]),e._v(" "),a("p",[e._v("Starting with version 2.3, the listener container will automatically create and update Micrometer "),a("code",[e._v("Timer")]),e._v(" s for the listener, if "),a("code",[e._v("Micrometer")]),e._v(" is detected on the class path, and a single "),a("code",[e._v("MeterRegistry")]),e._v(" is present in the application context.\nThe timers can be disabled by setting the "),a("code",[e._v("ContainerProperty")]),e._v(" "),a("code",[e._v("micrometerEnabled")]),e._v(" to "),a("code",[e._v("false")]),e._v(".")]),e._v(" "),a("p",[e._v("Two timers are maintained - one for successful calls to the listener and one for failures.")]),e._v(" "),a("p",[e._v("The timers are named "),a("code",[e._v("spring.kafka.listener")]),e._v(" and have the following tags:")]),e._v(" "),a("ul",[a("li",[a("p",[a("code",[e._v("name")]),e._v(" : (container bean name)")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("result")]),e._v(" : "),a("code",[e._v("success")]),e._v(" or "),a("code",[e._v("failure")])])]),e._v(" "),a("li",[a("p",[a("code",[e._v("exception")]),e._v(" : "),a("code",[e._v("none")]),e._v(" or "),a("code",[e._v("ListenerExecutionFailedException")])])])]),e._v(" "),a("p",[e._v("You can add additional tags using the "),a("code",[e._v("ContainerProperties")]),e._v(" "),a("code",[e._v("micrometerTags")]),e._v(" property.")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("With the concurrent container, timers are created for each thread and the "),a("code",[e._v("name")]),e._v(" tag is suffixed with "),a("code",[e._v("-n")]),e._v(" where n is "),a("code",[e._v("0")]),e._v(" to "),a("code",[e._v("concurrency-1")]),e._v(".")])])]),e._v(" "),a("tbody")]),e._v(" "),a("h5",{attrs:{id:"monitoring-kafkatemplate-performance"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#monitoring-kafkatemplate-performance"}},[e._v("#")]),e._v(" Monitoring KafkaTemplate Performance")]),e._v(" "),a("p",[e._v("Starting with version 2.5, the template will automatically create and update Micrometer "),a("code",[e._v("Timer")]),e._v(" s for send operations, if "),a("code",[e._v("Micrometer")]),e._v(" is detected on the class path, and a single "),a("code",[e._v("MeterRegistry")]),e._v(" is present in the application context.\nThe timers can be disabled by setting the template’s "),a("code",[e._v("micrometerEnabled")]),e._v(" property to "),a("code",[e._v("false")]),e._v(".")]),e._v(" "),a("p",[e._v("Two timers are maintained - one for successful calls to the listener and one for failures.")]),e._v(" "),a("p",[e._v("The timers are named "),a("code",[e._v("spring.kafka.template")]),e._v(" and have the following tags:")]),e._v(" "),a("ul",[a("li",[a("p",[a("code",[e._v("name")]),e._v(" : (template bean name)")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("result")]),e._v(" : "),a("code",[e._v("success")]),e._v(" or "),a("code",[e._v("failure")])])]),e._v(" "),a("li",[a("p",[a("code",[e._v("exception")]),e._v(" : "),a("code",[e._v("none")]),e._v(" or the exception class name for failures")])])]),e._v(" "),a("p",[e._v("You can add additional tags using the template’s "),a("code",[e._v("micrometerTags")]),e._v(" property.")]),e._v(" "),a("h5",{attrs:{id:"micrometer-native-metrics"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#micrometer-native-metrics"}},[e._v("#")]),e._v(" Micrometer Native Metrics")]),e._v(" "),a("p",[e._v("Starting with version 2.5, the framework provides "),a("a",{attrs:{href:"#factory-listeners"}},[e._v("Factory Listeners")]),e._v(" to manage a Micrometer "),a("code",[e._v("KafkaClientMetrics")]),e._v(" instance whenever producers and consumers are created and closed.")]),e._v(" "),a("p",[e._v("To enable this feature, simply add the listeners to your producer and consumer factories:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@Bean\npublic ConsumerFactory myConsumerFactory() {\n Map configs = consumerConfigs();\n ...\n DefaultKafkaConsumerFactory cf = new DefaultKafkaConsumerFactory<>(configs);\n ...\n cf.addListener(new MicrometerConsumerListener(meterRegistry(),\n Collections.singletonList(new ImmutableTag("customTag", "customTagValue"))));\n ...\n return cf;\n}\n\n@Bean\npublic ProducerFactory myProducerFactory() {\n Map configs = producerConfigs();\n configs.put(ProducerConfig.CLIENT_ID_CONFIG, "myClientId");\n ...\n DefaultKafkaProducerFactory pf = new DefaultKafkaProducerFactory<>(configs);\n ...\n pf.addListener(new MicrometerProducerListener(meterRegistry(),\n Collections.singletonList(new ImmutableTag("customTag", "customTagValue"))));\n ...\n return pf;\n}\n')])])]),a("p",[e._v("The consumer/producer "),a("code",[e._v("id")]),e._v(" passed to the listener is added to the meter’s tags with tag name "),a("code",[e._v("spring.id")]),e._v(".")]),e._v(" "),a("p",[e._v("An example of obtaining one of the Kafka metrics")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('double count = this.meterRegistry.get("kafka.producer.node.incoming.byte.total")\n .tag("customTag", "customTagValue")\n .tag("spring.id", "myProducerFactory.myClientId-1")\n .functionCounter()\n .count()\n')])])]),a("p",[e._v("A similar listener is provided for the "),a("code",[e._v("StreamsBuilderFactoryBean")]),e._v(" - see "),a("a",{attrs:{href:"#streams-micrometer"}},[e._v("KafkaStreams Micrometer Support")]),e._v(".")]),e._v(" "),a("h4",{attrs:{id:"_4-1-12-transactions"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_4-1-12-transactions"}},[e._v("#")]),e._v(" 4.1.12. Transactions")]),e._v(" "),a("p",[e._v("This section describes how Spring for Apache Kafka supports transactions.")]),e._v(" "),a("h5",{attrs:{id:"overview-2"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#overview-2"}},[e._v("#")]),e._v(" Overview")]),e._v(" "),a("p",[e._v("The 0.11.0.0 client library added support for transactions.\nSpring for Apache Kafka adds support in the following ways:")]),e._v(" "),a("ul",[a("li",[a("p",[a("code",[e._v("KafkaTransactionManager")]),e._v(": Used with normal Spring transaction support ("),a("code",[e._v("@Transactional")]),e._v(", "),a("code",[e._v("TransactionTemplate")]),e._v(" etc).")])]),e._v(" "),a("li",[a("p",[e._v("Transactional "),a("code",[e._v("KafkaMessageListenerContainer")])])]),e._v(" "),a("li",[a("p",[e._v("Local transactions with "),a("code",[e._v("KafkaTemplate")])])]),e._v(" "),a("li",[a("p",[e._v("Transaction synchronization with other transaction managers")])])]),e._v(" "),a("p",[e._v("Transactions are enabled by providing the "),a("code",[e._v("DefaultKafkaProducerFactory")]),e._v(" with a "),a("code",[e._v("transactionIdPrefix")]),e._v(".\nIn that case, instead of managing a single shared "),a("code",[e._v("Producer")]),e._v(", the factory maintains a cache of transactional producers.\nWhen the user calls "),a("code",[e._v("close()")]),e._v(" on a producer, it is returned to the cache for reuse instead of actually being closed.\nThe "),a("code",[e._v("transactional.id")]),e._v(" property of each producer is "),a("code",[e._v("transactionIdPrefix")]),e._v(" + "),a("code",[e._v("n")]),e._v(", where "),a("code",[e._v("n")]),e._v(" starts with "),a("code",[e._v("0")]),e._v(" and is incremented for each new producer, unless the transaction is started by a listener container with a record-based listener.\nIn that case, the "),a("code",[e._v("transactional.id")]),e._v(" is "),a("code",[e._v("...")]),e._v(".\nThis is to properly support fencing zombies, "),a("a",{attrs:{href:"https://www.confluent.io/blog/transactions-apache-kafka/",target:"_blank",rel:"noopener noreferrer"}},[e._v("as described here"),a("OutboundLink")],1),e._v(".\nThis new behavior was added in versions 1.3.7, 2.0.6, 2.1.10, and 2.2.0.\nIf you wish to revert to the previous behavior, you can set the "),a("code",[e._v("producerPerConsumerPartition")]),e._v(" property on the "),a("code",[e._v("DefaultKafkaProducerFactory")]),e._v(" to "),a("code",[e._v("false")]),e._v(".")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("While transactions are supported with batch listeners, by default, zombie fencing is not supported because a batch may contain records from multiple topics or partitions."),a("br"),e._v("However, starting with version 2.3.2, zombie fencing is supported if you set the container property "),a("code",[e._v("subBatchPerPartition")]),e._v(" to true."),a("br"),e._v("In that case, the batch listener is invoked once per partition received from the last poll, as if each poll only returned records for a single partition."),a("br"),e._v("This is "),a("code",[e._v("true")]),e._v(" by default since version 2.5 when transactions are enabled with "),a("code",[e._v("EOSMode.ALPHA")]),e._v("; set it to "),a("code",[e._v("false")]),e._v(" if you are using transactions but are not concerned about zombie fencing."),a("br"),e._v("Also see "),a("a",{attrs:{href:"#exactly-once"}},[e._v("Exactly Once Semantics")]),e._v(".")])])]),e._v(" "),a("tbody")]),e._v(" "),a("p",[e._v("Also see "),a("a",{attrs:{href:"#transaction-id-prefix"}},[a("code",[e._v("transactionIdPrefix")])]),e._v(".")]),e._v(" "),a("p",[e._v("With Spring Boot, it is only necessary to set the "),a("code",[e._v("spring.kafka.producer.transaction-id-prefix")]),e._v(" property - Boot will automatically configure a "),a("code",[e._v("KafkaTransactionManager")]),e._v(" bean and wire it into the listener container.")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("Starting with version 2.5.8, you can now configure the "),a("code",[e._v("maxAge")]),e._v(" property on the producer factory."),a("br"),e._v("This is useful when using transactional producers that might lay idle for the broker’s "),a("code",[e._v("transactional.id.expiration.ms")]),e._v("."),a("br"),e._v("With current "),a("code",[e._v("kafka-clients")]),e._v(", this can cause a "),a("code",[e._v("ProducerFencedException")]),e._v(" without a rebalance."),a("br"),e._v("By setting the "),a("code",[e._v("maxAge")]),e._v(" to less than "),a("code",[e._v("transactional.id.expiration.ms")]),e._v(", the factory will refresh the producer if it is past it’s max age.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("h5",{attrs:{id:"using-kafkatransactionmanager"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#using-kafkatransactionmanager"}},[e._v("#")]),e._v(" Using "),a("code",[e._v("KafkaTransactionManager")])]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("KafkaTransactionManager")]),e._v(" is an implementation of Spring Framework’s "),a("code",[e._v("PlatformTransactionManager")]),e._v(".\nIt is provided with a reference to the producer factory in its constructor.\nIf you provide a custom producer factory, it must support transactions.\nSee "),a("code",[e._v("ProducerFactory.transactionCapable()")]),e._v(".")]),e._v(" "),a("p",[e._v("You can use the "),a("code",[e._v("KafkaTransactionManager")]),e._v(" with normal Spring transaction support ("),a("code",[e._v("@Transactional")]),e._v(", "),a("code",[e._v("TransactionTemplate")]),e._v(", and others).\nIf a transaction is active, any "),a("code",[e._v("KafkaTemplate")]),e._v(" operations performed within the scope of the transaction use the transaction’s "),a("code",[e._v("Producer")]),e._v(".\nThe manager commits or rolls back the transaction, depending on success or failure.\nYou must configure the "),a("code",[e._v("KafkaTemplate")]),e._v(" to use the same "),a("code",[e._v("ProducerFactory")]),e._v(" as the transaction manager.")]),e._v(" "),a("h5",{attrs:{id:"transaction-synchronization"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#transaction-synchronization"}},[e._v("#")]),e._v(" Transaction Synchronization")]),e._v(" "),a("p",[e._v("This section refers to producer-only transactions (transactions not started by a listener container); see "),a("a",{attrs:{href:"#container-transaction-manager"}},[e._v("Using Consumer-Initiated Transactions")]),e._v(" for information about chaining transactions when the container starts the transaction.")]),e._v(" "),a("p",[e._v("If you want to send records to kafka and perform some database updates, you can use normal Spring transaction management with, say, a "),a("code",[e._v("DataSourceTransactionManager")]),e._v(".")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@Transactional\npublic void process(List things) {\n things.forEach(thing -> this.kafkaTemplate.send("topic", thing));\n updateDb(things);\n}\n')])])]),a("p",[e._v("The interceptor for the "),a("code",[e._v("@Transactional")]),e._v(" annotation starts the transaction and the "),a("code",[e._v("KafkaTemplate")]),e._v(" will synchronize a transaction with that transaction manager; each send will participate in that transaction.\nWhen the method exits, the database transaction will commit followed by the Kafka transaction.\nIf you wish the commits to be performed in the reverse order (Kafka first), use nested "),a("code",[e._v("@Transactional")]),e._v(" methods, with the outer method configured to use the "),a("code",[e._v("DataSourceTransactionManager")]),e._v(", and the inner method configured to use the "),a("code",[e._v("KafkaTransactionManager")]),e._v(".")]),e._v(" "),a("p",[e._v("See "),a("a",{attrs:{href:"#ex-jdbc-sync"}},[e._v("[ex-jdbc-sync]")]),e._v(" for examples of an application that synchronizes JDBC and Kafka transactions in Kafka-first or DB-first configurations.")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("Starting with versions 2.5.17, 2.6.12, 2.7.9 and 2.8.0, if the commit fails on the synchronized transaction (after the primary transaction has committed), the exception will be thrown to the caller."),a("br"),e._v("Previously, this was silently ignored (logged at debug)."),a("br"),e._v("Applications should take remedial action, if necessary, to compensate for the committed primary transaction.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("h5",{attrs:{id:"using-consumer-initiated-transactions"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#using-consumer-initiated-transactions"}},[e._v("#")]),e._v(" Using Consumer-Initiated Transactions")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("ChainedKafkaTransactionManager")]),e._v(" is now deprecated, since version 2.7; see the javadocs for its super class "),a("code",[e._v("ChainedTransactionManager")]),e._v(" for more information.\nInstead, use a "),a("code",[e._v("KafkaTransactionManager")]),e._v(" in the container to start the Kafka transaction and annotate the listener method with "),a("code",[e._v("@Transactional")]),e._v(" to start the other transaction.")]),e._v(" "),a("p",[e._v("See "),a("a",{attrs:{href:"#ex-jdbc-sync"}},[e._v("[ex-jdbc-sync]")]),e._v(" for an example application that chains JDBC and Kafka transactions.")]),e._v(" "),a("h5",{attrs:{id:"kafkatemplate-local-transactions"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#kafkatemplate-local-transactions"}},[e._v("#")]),e._v(" "),a("code",[e._v("KafkaTemplate")]),e._v(" Local Transactions")]),e._v(" "),a("p",[e._v("You can use the "),a("code",[e._v("KafkaTemplate")]),e._v(" to execute a series of operations within a local transaction.\nThe following example shows how to do so:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('boolean result = template.executeInTransaction(t -> {\n t.sendDefault("thing1", "thing2");\n t.sendDefault("cat", "hat");\n return true;\n});\n')])])]),a("p",[e._v("The argument in the callback is the template itself ("),a("code",[e._v("this")]),e._v(").\nIf the callback exits normally, the transaction is committed.\nIf an exception is thrown, the transaction is rolled back.")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("If there is a "),a("code",[e._v("KafkaTransactionManager")]),e._v(" (or synchronized) transaction in process, it is not used."),a("br"),e._v('Instead, a new "nested" transaction is used.')])])]),e._v(" "),a("tbody")]),e._v(" "),a("h5",{attrs:{id:"transactionidprefix"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#transactionidprefix"}},[e._v("#")]),e._v(" "),a("code",[e._v("transactionIdPrefix")])]),e._v(" "),a("p",[e._v("As mentioned in "),a("a",{attrs:{href:"#transactions"}},[e._v("the overview")]),e._v(", the producer factory is configured with this property to build the producer "),a("code",[e._v("transactional.id")]),e._v(" property.\nThere is a dichotomy when specifying this property in that, when running multiple instances of the application with "),a("code",[e._v("EOSMode.ALPHA")]),e._v(", it must be the same on all instances to satisfy fencing zombies (also mentioned in the overview) when producing records on a listener container thread.\nHowever, when producing records using transactions that are "),a("strong",[e._v("not")]),e._v(" started by a listener container, the prefix has to be different on each instance.\nVersion 2.3, makes this simpler to configure, especially in a Spring Boot application.\nIn previous versions, you had to create two producer factories and "),a("code",[e._v("KafkaTemplate")]),e._v(" s - one for producing records on a listener container thread and one for stand-alone transactions started by "),a("code",[e._v("kafkaTemplate.executeInTransaction()")]),e._v(" or by a transaction interceptor on a "),a("code",[e._v("@Transactional")]),e._v(" method.")]),e._v(" "),a("p",[e._v("Now, you can override the factory’s "),a("code",[e._v("transactionalIdPrefix")]),e._v(" on the "),a("code",[e._v("KafkaTemplate")]),e._v(" and the "),a("code",[e._v("KafkaTransactionManager")]),e._v(".")]),e._v(" "),a("p",[e._v("When using a transaction manager and template for a listener container, you would normally leave this to default to the producer factory’s property.\nThis value should be the same for all application instances when using "),a("code",[e._v("EOSMode.ALPHA")]),e._v(".\nWith "),a("code",[e._v("EOSMode.BETA")]),e._v(" it is no longer necessary to use the same "),a("code",[e._v("transactional.id")]),e._v(", even for consumer-initiated transactions; in fact, it must be unique on each instance the same as producer-initiated transactions.\nFor transactions started by the template (or the transaction manager for "),a("code",[e._v("@Transaction")]),e._v(") you should set the property on the template and transaction manager respectively.\nThis property must have a different value on each application instance.")]),e._v(" "),a("p",[e._v("This problem (different rules for "),a("code",[e._v("transactional.id")]),e._v(") has been eliminated when "),a("code",[e._v("EOSMode.BETA")]),e._v(" is being used (with broker versions >= 2.5); see "),a("a",{attrs:{href:"#exactly-once"}},[e._v("Exactly Once Semantics")]),e._v(".")]),e._v(" "),a("h5",{attrs:{id:"kafkatemplate-transactional-and-non-transactional-publishing"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#kafkatemplate-transactional-and-non-transactional-publishing"}},[e._v("#")]),e._v(" "),a("code",[e._v("KafkaTemplate")]),e._v(" Transactional and non-Transactional Publishing")]),e._v(" "),a("p",[e._v("Normally, when a "),a("code",[e._v("KafkaTemplate")]),e._v(" is transactional (configured with a transaction-capable producer factory), transactions are required.\nThe transaction can be started by a "),a("code",[e._v("TransactionTemplate")]),e._v(", a "),a("code",[e._v("@Transactional")]),e._v(" method, calling "),a("code",[e._v("executeInTransaction")]),e._v(", or by a listener container, when configured with a "),a("code",[e._v("KafkaTransactionManager")]),e._v(".\nAny attempt to use the template outside the scope of a transaction results in the template throwing an "),a("code",[e._v("IllegalStateException")]),e._v(".\nStarting with version 2.4.3, you can set the template’s "),a("code",[e._v("allowNonTransactional")]),e._v(" property to "),a("code",[e._v("true")]),e._v(".\nIn that case, the template will allow the operation to run without a transaction, by calling the "),a("code",[e._v("ProducerFactory")]),e._v(" 's "),a("code",[e._v("createNonTransactionalProducer()")]),e._v(" method; the producer will be cached, or thread-bound, as normal for reuse.\nSee "),a("a",{attrs:{href:"#producer-factory"}},[e._v("Using "),a("code",[e._v("DefaultKafkaProducerFactory")])]),e._v(".")]),e._v(" "),a("h5",{attrs:{id:"transactions-with-batch-listeners"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#transactions-with-batch-listeners"}},[e._v("#")]),e._v(" Transactions with Batch Listeners")]),e._v(" "),a("p",[e._v("When a listener fails while transactions are being used, the "),a("code",[e._v("AfterRollbackProcessor")]),e._v(" is invoked to take some action after the rollback occurs.\nWhen using the default "),a("code",[e._v("AfterRollbackProcessor")]),e._v(" with a record listener, seeks are performed so that the failed record will be redelivered.\nWith a batch listener, however, the whole batch will be redelivered because the framework doesn’t know which record in the batch failed.\nSee "),a("a",{attrs:{href:"#after-rollback"}},[e._v("After-rollback Processor")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("When using a batch listener, version 2.4.2 introduced an alternative mechanism to deal with failures while processing a batch; the "),a("code",[e._v("BatchToRecordAdapter")]),e._v(".\nWhen a container factory with "),a("code",[e._v("batchListener")]),e._v(" set to true is configured with a "),a("code",[e._v("BatchToRecordAdapter")]),e._v(", the listener is invoked with one record at a time.\nThis enables error handling within the batch, while still making it possible to stop processing the entire batch, depending on the exception type.\nA default "),a("code",[e._v("BatchToRecordAdapter")]),e._v(" is provided, that can be configured with a standard "),a("code",[e._v("ConsumerRecordRecoverer")]),e._v(" such as the "),a("code",[e._v("DeadLetterPublishingRecoverer")]),e._v(".\nThe following test case configuration snippet illustrates how to use this feature:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('public static class TestListener {\n\n final List values = new ArrayList<>();\n\n @KafkaListener(id = "batchRecordAdapter", topics = "test")\n public void listen(String data) {\n values.add(data);\n if ("bar".equals(data)) {\n throw new RuntimeException("reject partial");\n }\n }\n\n}\n\n@Configuration\n@EnableKafka\npublic static class Config {\n\n ConsumerRecord failed;\n\n @Bean\n public TestListener test() {\n return new TestListener();\n }\n\n @Bean\n public ConsumerFactory consumerFactory() {\n return mock(ConsumerFactory.class);\n }\n\n @Bean\n public ConcurrentKafkaListenerContainerFactory kafkaListenerContainerFactory() {\n ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory();\n factory.setConsumerFactory(consumerFactory());\n factory.setBatchListener(true);\n factory.setBatchToRecordAdapter(new DefaultBatchToRecordAdapter<>((record, ex) -> {\n this.failed = record;\n }));\n return factory;\n }\n\n}\n')])])]),a("h4",{attrs:{id:"_4-1-13-exactly-once-semantics"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_4-1-13-exactly-once-semantics"}},[e._v("#")]),e._v(" 4.1.13. Exactly Once Semantics")]),e._v(" "),a("p",[e._v("You can provide a listener container with a "),a("code",[e._v("KafkaAwareTransactionManager")]),e._v(" instance.\nWhen so configured, the container starts a transaction before invoking the listener.\nAny "),a("code",[e._v("KafkaTemplate")]),e._v(" operations performed by the listener participate in the transaction.\nIf the listener successfully processes the record (or multiple records, when using a "),a("code",[e._v("BatchMessageListener")]),e._v("), the container sends the offset(s) to the transaction by using "),a("code",[e._v("producer.sendOffsetsToTransaction()")]),e._v("), before the transaction manager commits the transaction.\nIf the listener throws an exception, the transaction is rolled back and the consumer is repositioned so that the rolled-back record(s) can be retrieved on the next poll.\nSee "),a("a",{attrs:{href:"#after-rollback"}},[e._v("After-rollback Processor")]),e._v(" for more information and for handling records that repeatedly fail.")]),e._v(" "),a("p",[e._v("Using transactions enables Exactly Once Semantics (EOS).")]),e._v(" "),a("p",[e._v("This means that, for a "),a("code",[e._v("read→process-write")]),e._v(" sequence, it is guaranteed that the "),a("strong",[e._v("sequence")]),e._v(" is completed exactly once.\n(The read and process are have at least once semantics).")]),e._v(" "),a("p",[e._v("Spring for Apache Kafka version 2.5 and later supports two EOS modes:")]),e._v(" "),a("ul",[a("li",[a("p",[a("code",[e._v("ALPHA")]),e._v(" - alias for "),a("code",[e._v("V1")]),e._v(" (deprecated)")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("BETA")]),e._v(" - alias for "),a("code",[e._v("V2")]),e._v(" (deprecated)")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("V1")]),e._v(" - aka "),a("code",[e._v("transactional.id")]),e._v(" fencing (since version 0.11.0.0)")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("V2")]),e._v(" - aka fetch-offset-request fencing (since version 2.5)")])])]),e._v(" "),a("p",[e._v("With mode "),a("code",[e._v("V1")]),e._v(', the producer is "fenced" if another instance with the same '),a("code",[e._v("transactional.id")]),e._v(" is started.\nSpring manages this by using a "),a("code",[e._v("Producer")]),e._v(" for each "),a("code",[e._v("group.id/topic/partition")]),e._v("; when a rebalance occurs a new instance will use the same "),a("code",[e._v("transactional.id")]),e._v(" and the old producer is fenced.")]),e._v(" "),a("p",[e._v("With mode "),a("code",[e._v("V2")]),e._v(", it is not necessary to have a producer for each "),a("code",[e._v("group.id/topic/partition")]),e._v(" because consumer metadata is sent along with the offsets to the transaction and the broker can determine if the producer is fenced using that information instead.")]),e._v(" "),a("p",[e._v("Starting with version 2.6, the default "),a("code",[e._v("EOSMode")]),e._v(" is "),a("code",[e._v("V2")]),e._v(".")]),e._v(" "),a("p",[e._v("To configure the container to use mode "),a("code",[e._v("ALPHA")]),e._v(", set the container property "),a("code",[e._v("EOSMode")]),e._v(" to "),a("code",[e._v("ALPHA")]),e._v(", to revert to the previous behavior.")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("With "),a("code",[e._v("V2")]),e._v(" (default), your brokers must be version 2.5 or later; "),a("code",[e._v("kafka-clients")]),e._v(" version 3.0, the producer will no longer fall back to "),a("code",[e._v("V1")]),e._v("; if the broker does not support "),a("code",[e._v("V2")]),e._v(", an exception is thrown."),a("br"),e._v("If your brokers are earlier than 2.5, you must set the "),a("code",[e._v("EOSMode")]),e._v(" to "),a("code",[e._v("V1")]),e._v(", leave the "),a("code",[e._v("DefaultKafkaProducerFactory")]),e._v(" "),a("code",[e._v("producerPerConsumerPartition")]),e._v(" set to "),a("code",[e._v("true")]),e._v(" and, if you are using a batch listener, you should set "),a("code",[e._v("subBatchPerPartition")]),e._v(" to "),a("code",[e._v("true")]),e._v(".")])])]),e._v(" "),a("tbody")]),e._v(" "),a("p",[e._v("When your brokers are upgraded to 2.5 or later, you should switch the mode to "),a("code",[e._v("V2")]),e._v(", but the number of producers will remain as before.\nYou can then do a rolling upgrade of your application with "),a("code",[e._v("producerPerConsumerPartition")]),e._v(" set to "),a("code",[e._v("false")]),e._v(" to reduce the number of producers; you should also no longer set the "),a("code",[e._v("subBatchPerPartition")]),e._v(" container property.")]),e._v(" "),a("p",[e._v("If your brokers are already 2.5 or newer, you should set the "),a("code",[e._v("DefaultKafkaProducerFactory")]),e._v(" "),a("code",[e._v("producerPerConsumerPartition")]),e._v(" property to "),a("code",[e._v("false")]),e._v(", to reduce the number of producers needed.")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("When using "),a("code",[e._v("EOSMode.V2")]),e._v(" with "),a("code",[e._v("producerPerConsumerPartition=false")]),e._v(" the "),a("code",[e._v("transactional.id")]),e._v(" must be unique across all application instances.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("p",[e._v("When using "),a("code",[e._v("V2")]),e._v(" mode, it is no longer necessary to set the "),a("code",[e._v("subBatchPerPartition")]),e._v(" to "),a("code",[e._v("true")]),e._v("; it will default to "),a("code",[e._v("false")]),e._v(" when the "),a("code",[e._v("EOSMode")]),e._v(" is "),a("code",[e._v("V2")]),e._v(".")]),e._v(" "),a("p",[e._v("Refer to "),a("a",{attrs:{href:"https://cwiki.apache.org/confluence/display/KAFKA/KIP-447%3A+Producer+scalability+for+exactly+once+semantics",target:"_blank",rel:"noopener noreferrer"}},[e._v("KIP-447"),a("OutboundLink")],1),e._v(" for more information.")]),e._v(" "),a("p",[a("code",[e._v("V1")]),e._v(" and "),a("code",[e._v("V2")]),e._v(" were previously "),a("code",[e._v("ALPHA")]),e._v(" and "),a("code",[e._v("BETA")]),e._v("; they have been changed to align the framework with "),a("a",{attrs:{href:"https://cwiki.apache.org/confluence/display/KAFKA/KIP-732%3A+Deprecate+eos-alpha+and+replace+eos-beta+with+eos-v2",target:"_blank",rel:"noopener noreferrer"}},[e._v("KIP-732"),a("OutboundLink")],1),e._v(".")]),e._v(" "),a("h4",{attrs:{id:"_4-1-14-wiring-spring-beans-into-producer-consumer-interceptors"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_4-1-14-wiring-spring-beans-into-producer-consumer-interceptors"}},[e._v("#")]),e._v(" 4.1.14. Wiring Spring Beans into Producer/Consumer Interceptors")]),e._v(" "),a("p",[e._v("Apache Kafka provides a mechanism to add interceptors to producers and consumers.\nThese objects are managed by Kafka, not Spring, and so normal Spring dependency injection won’t work for wiring in dependent Spring Beans.\nHowever, you can manually wire in those dependencies using the interceptor "),a("code",[e._v("config()")]),e._v(" method.\nThe following Spring Boot application shows how to do this by overriding boot’s default factories to add some dependent bean into the configuration properties.")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@SpringBootApplication\npublic class Application {\n\n public static void main(String[] args) {\n SpringApplication.run(Application.class, args);\n }\n\n @Bean\n public ConsumerFactory kafkaConsumerFactory(SomeBean someBean) {\n Map consumerProperties = new HashMap<>();\n // consumerProperties.put(..., ...)\n // ...\n consumerProperties.put(ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG, MyConsumerInterceptor.class.getName());\n consumerProperties.put("some.bean", someBean);\n return new DefaultKafkaConsumerFactory<>(consumerProperties);\n }\n\n @Bean\n public ProducerFactory kafkaProducerFactory(SomeBean someBean) {\n Map producerProperties = new HashMap<>();\n // producerProperties.put(..., ...)\n // ...\n Map producerProperties = properties.buildProducerProperties();\n producerProperties.put(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, MyProducerInterceptor.class.getName());\n producerProperties.put("some.bean", someBean);\n DefaultKafkaProducerFactory factory = new DefaultKafkaProducerFactory<>(producerProperties);\n return factory;\n }\n\n @Bean\n public SomeBean someBean() {\n return new SomeBean();\n }\n\n @KafkaListener(id = "kgk897", topics = "kgh897")\n public void listen(String in) {\n System.out.println("Received " + in);\n }\n\n @Bean\n public ApplicationRunner runner(KafkaTemplate template) {\n return args -> template.send("kgh897", "test");\n }\n\n @Bean\n public NewTopic kRequests() {\n return TopicBuilder.name("kgh897")\n .partitions(1)\n .replicas(1)\n .build();\n }\n\n}\n')])])]),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('public class SomeBean {\n\n public void someMethod(String what) {\n System.out.println(what + " in my foo bean");\n }\n\n}\n')])])]),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('public class MyProducerInterceptor implements ProducerInterceptor {\n\n private SomeBean bean;\n\n @Override\n public void configure(Map configs) {\n this.bean = (SomeBean) configs.get("some.bean");\n }\n\n @Override\n public ProducerRecord onSend(ProducerRecord record) {\n this.bean.someMethod("producer interceptor");\n return record;\n }\n\n @Override\n public void onAcknowledgement(RecordMetadata metadata, Exception exception) {\n }\n\n @Override\n public void close() {\n }\n\n}\n')])])]),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('public class MyConsumerInterceptor implements ConsumerInterceptor {\n\n private SomeBean bean;\n\n @Override\n public void configure(Map configs) {\n this.bean = (SomeBean) configs.get("some.bean");\n }\n\n @Override\n public ConsumerRecords onConsume(ConsumerRecords records) {\n this.bean.someMethod("consumer interceptor");\n return records;\n }\n\n @Override\n public void onCommit(Map offsets) {\n }\n\n @Override\n public void close() {\n }\n\n}\n')])])]),a("p",[e._v("Result:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("producer interceptor in my foo bean\nconsumer interceptor in my foo bean\nReceived test\n")])])]),a("h4",{attrs:{id:"_4-1-15-pausing-and-resuming-listener-containers"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_4-1-15-pausing-and-resuming-listener-containers"}},[e._v("#")]),e._v(" 4.1.15. Pausing and Resuming Listener Containers")]),e._v(" "),a("p",[e._v("Version 2.1.3 added "),a("code",[e._v("pause()")]),e._v(" and "),a("code",[e._v("resume()")]),e._v(" methods to listener containers.\nPreviously, you could pause a consumer within a "),a("code",[e._v("ConsumerAwareMessageListener")]),e._v(" and resume it by listening for a "),a("code",[e._v("ListenerContainerIdleEvent")]),e._v(", which provides access to the "),a("code",[e._v("Consumer")]),e._v(" object.\nWhile you could pause a consumer in an idle container by using an event listener, in some cases, this was not thread-safe, since there is no guarantee that the event listener is invoked on the consumer thread.\nTo safely pause and resume consumers, you should use the "),a("code",[e._v("pause")]),e._v(" and "),a("code",[e._v("resume")]),e._v(" methods on the listener containers.\nA "),a("code",[e._v("pause()")]),e._v(" takes effect just before the next "),a("code",[e._v("poll()")]),e._v("; a "),a("code",[e._v("resume()")]),e._v(" takes effect just after the current "),a("code",[e._v("poll()")]),e._v(" returns.\nWhen a container is paused, it continues to "),a("code",[e._v("poll()")]),e._v(" the consumer, avoiding a rebalance if group management is being used, but it does not retrieve any records.\nSee the Kafka documentation for more information.")]),e._v(" "),a("p",[e._v("Starting with version 2.1.5, you can call "),a("code",[e._v("isPauseRequested()")]),e._v(" to see if "),a("code",[e._v("pause()")]),e._v(" has been called.\nHowever, the consumers might not have actually paused yet."),a("code",[e._v("isConsumerPaused()")]),e._v(" returns true if all "),a("code",[e._v("Consumer")]),e._v(" instances have actually paused.")]),e._v(" "),a("p",[e._v("In addition (also since 2.1.5), "),a("code",[e._v("ConsumerPausedEvent")]),e._v(" and "),a("code",[e._v("ConsumerResumedEvent")]),e._v(" instances are published with the container as the "),a("code",[e._v("source")]),e._v(" property and the "),a("code",[e._v("TopicPartition")]),e._v(" instances involved in the "),a("code",[e._v("partitions")]),e._v(" property.")]),e._v(" "),a("p",[e._v("The following simple Spring Boot application demonstrates by using the container registry to get a reference to a "),a("code",[e._v("@KafkaListener")]),e._v(" method’s container and pausing or resuming its consumers as well as receiving the corresponding events:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@SpringBootApplication\npublic class Application implements ApplicationListener {\n\n public static void main(String[] args) {\n SpringApplication.run(Application.class, args).close();\n }\n\n @Override\n public void onApplicationEvent(KafkaEvent event) {\n System.out.println(event);\n }\n\n @Bean\n public ApplicationRunner runner(KafkaListenerEndpointRegistry registry,\n KafkaTemplate template) {\n return args -> {\n template.send("pause.resume.topic", "thing1");\n Thread.sleep(10_000);\n System.out.println("pausing");\n registry.getListenerContainer("pause.resume").pause();\n Thread.sleep(10_000);\n template.send("pause.resume.topic", "thing2");\n Thread.sleep(10_000);\n System.out.println("resuming");\n registry.getListenerContainer("pause.resume").resume();\n Thread.sleep(10_000);\n };\n }\n\n @KafkaListener(id = "pause.resume", topics = "pause.resume.topic")\n public void listen(String in) {\n System.out.println(in);\n }\n\n @Bean\n public NewTopic topic() {\n return TopicBuilder.name("pause.resume.topic")\n .partitions(2)\n .replicas(1)\n .build();\n }\n\n}\n')])])]),a("p",[e._v("The following listing shows the results of the preceding example:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("partitions assigned: [pause.resume.topic-1, pause.resume.topic-0]\nthing1\npausing\nConsumerPausedEvent [partitions=[pause.resume.topic-1, pause.resume.topic-0]]\nresuming\nConsumerResumedEvent [partitions=[pause.resume.topic-1, pause.resume.topic-0]]\nthing2\n")])])]),a("h4",{attrs:{id:"_4-1-16-pausing-and-resuming-partitions-on-listener-containers"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_4-1-16-pausing-and-resuming-partitions-on-listener-containers"}},[e._v("#")]),e._v(" 4.1.16. Pausing and Resuming Partitions on Listener Containers")]),e._v(" "),a("p",[e._v("Since version 2.7 you can pause and resume the consumption of specific partitions assigned to that consumer by using the "),a("code",[e._v("pausePartition(TopicPartition topicPartition)")]),e._v(" and "),a("code",[e._v("resumePartition(TopicPartition topicPartition)")]),e._v(" methods in the listener containers.\nThe pausing and resuming takes place respectively before and after the "),a("code",[e._v("poll()")]),e._v(" similar to the "),a("code",[e._v("pause()")]),e._v(" and "),a("code",[e._v("resume()")]),e._v(" methods.\nThe "),a("code",[e._v("isPartitionPauseRequested()")]),e._v(" method returns true if pause for that partition has been requested.\nThe "),a("code",[e._v("isPartitionPaused()")]),e._v(" method returns true if that partition has effectively been paused.")]),e._v(" "),a("p",[e._v("Also since version 2.7 "),a("code",[e._v("ConsumerPartitionPausedEvent")]),e._v(" and "),a("code",[e._v("ConsumerPartitionResumedEvent")]),e._v(" instances are published with the container as the "),a("code",[e._v("source")]),e._v(" property and the "),a("code",[e._v("TopicPartition")]),e._v(" instance.")]),e._v(" "),a("h4",{attrs:{id:"_4-1-17-serialization-deserialization-and-message-conversion"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_4-1-17-serialization-deserialization-and-message-conversion"}},[e._v("#")]),e._v(" 4.1.17. Serialization, Deserialization, and Message Conversion")]),e._v(" "),a("h5",{attrs:{id:"overview-3"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#overview-3"}},[e._v("#")]),e._v(" Overview")]),e._v(" "),a("p",[e._v("Apache Kafka provides a high-level API for serializing and deserializing record values as well as their keys.\nIt is present with the "),a("code",[e._v("org.apache.kafka.common.serialization.Serializer")]),e._v(" and"),a("code",[e._v("org.apache.kafka.common.serialization.Deserializer")]),e._v(" abstractions with some built-in implementations.\nMeanwhile, we can specify serializer and deserializer classes by using "),a("code",[e._v("Producer")]),e._v(" or "),a("code",[e._v("Consumer")]),e._v(" configuration properties.\nThe following example shows how to do so:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, IntegerDeserializer.class);\nprops.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);\n...\nprops.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class);\nprops.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);\n")])])]),a("p",[e._v("For more complex or particular cases, the "),a("code",[e._v("KafkaConsumer")]),e._v(" (and, therefore, "),a("code",[e._v("KafkaProducer")]),e._v(") provides overloaded\nconstructors to accept "),a("code",[e._v("Serializer")]),e._v(" and "),a("code",[e._v("Deserializer")]),e._v(" instances for "),a("code",[e._v("keys")]),e._v(" and "),a("code",[e._v("values")]),e._v(", respectively.")]),e._v(" "),a("p",[e._v("When you use this API, the "),a("code",[e._v("DefaultKafkaProducerFactory")]),e._v(" and "),a("code",[e._v("DefaultKafkaConsumerFactory")]),e._v(" also provide properties (through constructors or setter methods) to inject custom "),a("code",[e._v("Serializer")]),e._v(" and "),a("code",[e._v("Deserializer")]),e._v(" instances into the target "),a("code",[e._v("Producer")]),e._v(" or "),a("code",[e._v("Consumer")]),e._v(".\nAlso, you can pass in "),a("code",[e._v("Supplier")]),e._v(" or "),a("code",[e._v("Supplier")]),e._v(" instances through constructors - these "),a("code",[e._v("Supplier")]),e._v(" s are called on creation of each "),a("code",[e._v("Producer")]),e._v(" or "),a("code",[e._v("Consumer")]),e._v(".")]),e._v(" "),a("h5",{attrs:{id:"string-serialization"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#string-serialization"}},[e._v("#")]),e._v(" String serialization")]),e._v(" "),a("p",[e._v("Since version 2.5, Spring for Apache Kafka provides "),a("code",[e._v("ToStringSerializer")]),e._v(" and "),a("code",[e._v("ParseStringDeserializer")]),e._v(" classes that use String representation of entities.\nThey rely on methods "),a("code",[e._v("toString")]),e._v(" and some "),a("code",[e._v("Function")]),e._v(" or "),a("code",[e._v("BiFunction")]),e._v(" to parse the String and populate properties of an instance.\nUsually, this would invoke some static method on the class, such as "),a("code",[e._v("parse")]),e._v(":")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("ToStringSerializer thingSerializer = new ToStringSerializer<>();\n//...\nParseStringDeserializer deserializer = new ParseStringDeserializer<>(Thing::parse);\n")])])]),a("p",[e._v("By default, the "),a("code",[e._v("ToStringSerializer")]),e._v(" is configured to convey type information about the serialized entity in the record "),a("code",[e._v("Headers")]),e._v(".\nYou can disable this by setting the "),a("code",[e._v("addTypeInfo")]),e._v(" property to false.\nThis information can be used by "),a("code",[e._v("ParseStringDeserializer")]),e._v(" on the receiving side.")]),e._v(" "),a("ul",[a("li",[a("code",[e._v("ToStringSerializer.ADD_TYPE_INFO_HEADERS")]),e._v(" (default "),a("code",[e._v("true")]),e._v("): You can set it to "),a("code",[e._v("false")]),e._v(" to disable this feature on the "),a("code",[e._v("ToStringSerializer")]),e._v(" (sets the "),a("code",[e._v("addTypeInfo")]),e._v(" property).")])]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('ParseStringDeserializer deserializer = new ParseStringDeserializer<>((str, headers) -> {\n byte[] header = headers.lastHeader(ToStringSerializer.VALUE_TYPE).value();\n String entityType = new String(header);\n\n if (entityType.contains("Thing")) {\n return Thing.parse(str);\n }\n else {\n // ...parsing logic\n }\n});\n')])])]),a("p",[e._v("You can configure the "),a("code",[e._v("Charset")]),e._v(" used to convert "),a("code",[e._v("String")]),e._v(" to/from "),a("code",[e._v("byte[]")]),e._v(" with the default being "),a("code",[e._v("UTF-8")]),e._v(".")]),e._v(" "),a("p",[e._v("You can configure the deserializer with the name of the parser method using "),a("code",[e._v("ConsumerConfig")]),e._v(" properties:")]),e._v(" "),a("ul",[a("li",[a("p",[a("code",[e._v("ParseStringDeserializer.KEY_PARSER")])])]),e._v(" "),a("li",[a("p",[a("code",[e._v("ParseStringDeserializer.VALUE_PARSER")])])])]),e._v(" "),a("p",[e._v("The properties must contain the fully qualified name of the class followed by the method name, separated by a period "),a("code",[e._v(".")]),e._v(".\nThe method must be static and have a signature of either "),a("code",[e._v("(String, Headers)")]),e._v(" or "),a("code",[e._v("(String)")]),e._v(".")]),e._v(" "),a("p",[e._v("A "),a("code",[e._v("ToFromStringSerde")]),e._v(" is also provided, for use with Kafka Streams.")]),e._v(" "),a("h5",{attrs:{id:"json"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#json"}},[e._v("#")]),e._v(" JSON")]),e._v(" "),a("p",[e._v("Spring for Apache Kafka also provides "),a("code",[e._v("JsonSerializer")]),e._v(" and "),a("code",[e._v("JsonDeserializer")]),e._v(" implementations that are based on the\nJackson JSON object mapper.\nThe "),a("code",[e._v("JsonSerializer")]),e._v(" allows writing any Java object as a JSON "),a("code",[e._v("byte[]")]),e._v(".\nThe "),a("code",[e._v("JsonDeserializer")]),e._v(" requires an additional "),a("code",[e._v("Class targetType")]),e._v(" argument to allow the deserialization of a consumed "),a("code",[e._v("byte[]")]),e._v(" to the proper target object.\nThe following example shows how to create a "),a("code",[e._v("JsonDeserializer")]),e._v(":")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("JsonDeserializer thingDeserializer = new JsonDeserializer<>(Thing.class);\n")])])]),a("p",[e._v("You can customize both "),a("code",[e._v("JsonSerializer")]),e._v(" and "),a("code",[e._v("JsonDeserializer")]),e._v(" with an "),a("code",[e._v("ObjectMapper")]),e._v(".\nYou can also extend them to implement some particular configuration logic in the "),a("code",[e._v("configure(Map configs, boolean isKey)")]),e._v(" method.")]),e._v(" "),a("p",[e._v("Starting with version 2.3, all the JSON-aware components are configured by default with a "),a("code",[e._v("JacksonUtils.enhancedObjectMapper()")]),e._v(" instance, which comes with the "),a("code",[e._v("MapperFeature.DEFAULT_VIEW_INCLUSION")]),e._v(" and "),a("code",[e._v("DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES")]),e._v(" features disabled.\nAlso such an instance is supplied with well-known modules for custom data types, such a Java time and Kotlin support.\nSee "),a("code",[e._v("JacksonUtils.enhancedObjectMapper()")]),e._v(" JavaDocs for more information.\nThis method also registers a "),a("code",[e._v("org.springframework.kafka.support.JacksonMimeTypeModule")]),e._v(" for "),a("code",[e._v("org.springframework.util.MimeType")]),e._v(" objects serialization into the plain string for inter-platform compatibility over the network.\nA "),a("code",[e._v("JacksonMimeTypeModule")]),e._v(" can be registered as a bean in the application context and it will be auto-configured into "),a("a",{attrs:{href:"https://docs.spring.io/spring-boot/docs/current/reference/html/howto-spring-mvc.html#howto-customize-the-jackson-objectmapper",target:"_blank",rel:"noopener noreferrer"}},[e._v("Spring Boot "),a("code",[e._v("ObjectMapper")]),e._v(" instance"),a("OutboundLink")],1),e._v(".")]),e._v(" "),a("p",[e._v("Also starting with version 2.3, the "),a("code",[e._v("JsonDeserializer")]),e._v(" provides "),a("code",[e._v("TypeReference")]),e._v("-based constructors for better handling of target generic container types.")]),e._v(" "),a("p",[e._v("Starting with version 2.1, you can convey type information in record "),a("code",[e._v("Headers")]),e._v(", allowing the handling of multiple types.\nIn addition, you can configure the serializer and deserializer by using the following Kafka properties.\nThey have no effect if you have provided "),a("code",[e._v("Serializer")]),e._v(" and "),a("code",[e._v("Deserializer")]),e._v(" instances for "),a("code",[e._v("KafkaConsumer")]),e._v(" and "),a("code",[e._v("KafkaProducer")]),e._v(", respectively.")]),e._v(" "),a("h6",{attrs:{id:"configuration-properties"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#configuration-properties"}},[e._v("#")]),e._v(" Configuration Properties")]),e._v(" "),a("ul",[a("li",[a("p",[a("code",[e._v("JsonSerializer.ADD_TYPE_INFO_HEADERS")]),e._v(" (default "),a("code",[e._v("true")]),e._v("): You can set it to "),a("code",[e._v("false")]),e._v(" to disable this feature on the "),a("code",[e._v("JsonSerializer")]),e._v(" (sets the "),a("code",[e._v("addTypeInfo")]),e._v(" property).")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("JsonSerializer.TYPE_MAPPINGS")]),e._v(" (default "),a("code",[e._v("empty")]),e._v("): See "),a("a",{attrs:{href:"#serdes-mapping-types"}},[e._v("Mapping Types")]),e._v(".")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("JsonDeserializer.USE_TYPE_INFO_HEADERS")]),e._v(" (default "),a("code",[e._v("true")]),e._v("): You can set it to "),a("code",[e._v("false")]),e._v(" to ignore headers set by the serializer.")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("JsonDeserializer.REMOVE_TYPE_INFO_HEADERS")]),e._v(" (default "),a("code",[e._v("true")]),e._v("): You can set it to "),a("code",[e._v("false")]),e._v(" to retain headers set by the serializer.")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("JsonDeserializer.KEY_DEFAULT_TYPE")]),e._v(": Fallback type for deserialization of keys if no header information is present.")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("JsonDeserializer.VALUE_DEFAULT_TYPE")]),e._v(": Fallback type for deserialization of values if no header information is present.")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("JsonDeserializer.TRUSTED_PACKAGES")]),e._v(" (default "),a("code",[e._v("java.util")]),e._v(", "),a("code",[e._v("java.lang")]),e._v("): Comma-delimited list of package patterns allowed for deserialization."),a("code",[e._v("*")]),e._v(" means deserialize all.")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("JsonDeserializer.TYPE_MAPPINGS")]),e._v(" (default "),a("code",[e._v("empty")]),e._v("): See "),a("a",{attrs:{href:"#serdes-mapping-types"}},[e._v("Mapping Types")]),e._v(".")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("JsonDeserializer.KEY_TYPE_METHOD")]),e._v(" (default "),a("code",[e._v("empty")]),e._v("): See "),a("a",{attrs:{href:"#serdes-type-methods"}},[e._v("Using Methods to Determine Types")]),e._v(".")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("JsonDeserializer.VALUE_TYPE_METHOD")]),e._v(" (default "),a("code",[e._v("empty")]),e._v("): See "),a("a",{attrs:{href:"#serdes-type-methods"}},[e._v("Using Methods to Determine Types")]),e._v(".")])])]),e._v(" "),a("p",[e._v("Starting with version 2.2, the type information headers (if added by the serializer) are removed by the deserializer.\nYou can revert to the previous behavior by setting the "),a("code",[e._v("removeTypeHeaders")]),e._v(" property to "),a("code",[e._v("false")]),e._v(", either directly on the deserializer or with the configuration property described earlier.")]),e._v(" "),a("p",[e._v("See also "),a("a",{attrs:{href:"#tip-json"}},[e._v("[tip-json]")]),e._v(".")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("Starting with version 2.8, if you construct the serializer or deserializer programmatically as shown in "),a("a",{attrs:{href:"#prog-json"}},[e._v("Programmatic Construction")]),e._v(", the above properties will be applied by the factories, as long as you have not set any properties explicitly (using "),a("code",[e._v("set*()")]),e._v(" methods or using the fluent API)."),a("br"),e._v("Previously, when creating programmatically, the configuration properties were never applied; this is still the case if you explicitly set properties on the object directly.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("h6",{attrs:{id:"mapping-types"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#mapping-types"}},[e._v("#")]),e._v(" Mapping Types")]),e._v(" "),a("p",[e._v("Starting with version 2.2, when using JSON, you can now provide type mappings by using the properties in the preceding list.\nPreviously, you had to customize the type mapper within the serializer and deserializer.\nMappings consist of a comma-delimited list of "),a("code",[e._v("token:className")]),e._v(" pairs.\nOn outbound, the payload’s class name is mapped to the corresponding token.\nOn inbound, the token in the type header is mapped to the corresponding class name.")]),e._v(" "),a("p",[e._v("The following example creates a set of mappings:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('senderProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, JsonSerializer.class);\nsenderProps.put(JsonSerializer.TYPE_MAPPINGS, "cat:com.mycat.Cat, hat:com.myhat.hat");\n...\nconsumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, JsonDeserializer.class);\nconsumerProps.put(JsonDeSerializer.TYPE_MAPPINGS, "cat:com.yourcat.Cat, hat:com.yourhat.hat");\n')])])]),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("The corresponding objects must be compatible.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("p",[e._v("If you use "),a("a",{attrs:{href:"https://docs.spring.io/spring-boot/docs/current/reference/html/boot-features-messaging.html#boot-features-kafka",target:"_blank",rel:"noopener noreferrer"}},[e._v("Spring Boot"),a("OutboundLink")],1),e._v(", you can provide these properties in the "),a("code",[e._v("application.properties")]),e._v(" (or yaml) file.\nThe following example shows how to do so:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("spring.kafka.producer.value-serializer=org.springframework.kafka.support.serializer.JsonSerializer\nspring.kafka.producer.properties.spring.json.type.mapping=cat:com.mycat.Cat,hat:com.myhat.Hat\n")])])]),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("You can perform only simple configuration with properties."),a("br"),e._v("For more advanced configuration (such as using a custom "),a("code",[e._v("ObjectMapper")]),e._v(" in the serializer and deserializer), you should use the producer and consumer factory constructors that accept a pre-built serializer and deserializer."),a("br"),e._v("The following Spring Boot example overrides the default factories:"),a("br"),a("br"),a("code",[e._v("
@Bean
public ConsumerFactory kafkaConsumerFactory(JsonDeserializer customValueDeserializer) {
Map properties = new HashMap<>();
// properties.put(..., ...)
// ...
return new DefaultKafkaConsumerFactory<>(properties,
new StringDeserializer(), customValueDeserializer);
}

@Bean
public ProducerFactory kafkaProducerFactory(JsonSerializer customValueSerializer) {

return new DefaultKafkaProducerFactory<>(properties.buildProducerProperties(),
new StringSerializer(), customValueSerializer);
}
")]),a("br"),a("br"),e._v("Setters are also provided, as an alternative to using these constructors.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("p",[e._v("Starting with version 2.2, you can explicitly configure the deserializer to use the supplied target type and ignore type information in headers by using one of the overloaded constructors that have a boolean "),a("code",[e._v("useHeadersIfPresent")]),e._v(" (which is "),a("code",[e._v("true")]),e._v(" by default).\nThe following example shows how to do so:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("DefaultKafkaConsumerFactory cf = new DefaultKafkaConsumerFactory<>(props,\n new IntegerDeserializer(), new JsonDeserializer<>(Cat1.class, false));\n")])])]),a("h6",{attrs:{id:"using-methods-to-determine-types"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#using-methods-to-determine-types"}},[e._v("#")]),e._v(" Using Methods to Determine Types")]),e._v(" "),a("p",[e._v("Starting with version 2.5, you can now configure the deserializer, via properties, to invoke a method to determine the target type.\nIf present, this will override any of the other techniques discussed above.\nThis can be useful if the data is published by an application that does not use the Spring serializer and you need to deserialize to different types depending on the data, or other headers.\nSet these properties to the method name - a fully qualified class name followed by the method name, separated by a period "),a("code",[e._v(".")]),e._v(".\nThe method must be declared as "),a("code",[e._v("public static")]),e._v(", have one of three signatures "),a("code",[e._v("(String topic, byte[] data, Headers headers)")]),e._v(", "),a("code",[e._v("(byte[] data, Headers headers)")]),e._v(" or "),a("code",[e._v("(byte[] data)")]),e._v(" and return a Jackson "),a("code",[e._v("JavaType")]),e._v(".")]),e._v(" "),a("ul",[a("li",[a("p",[a("code",[e._v("JsonDeserializer.KEY_TYPE_METHOD")]),e._v(" : "),a("code",[e._v("spring.json.key.type.method")])])]),e._v(" "),a("li",[a("p",[a("code",[e._v("JsonDeserializer.VALUE_TYPE_METHOD")]),e._v(" : "),a("code",[e._v("spring.json.value.type.method")])])])]),e._v(" "),a("p",[e._v("You can use arbitrary headers or inspect the data to determine the type.")]),e._v(" "),a("p",[e._v("Example")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('JavaType thing1Type = TypeFactory.defaultInstance().constructType(Thing1.class);\n\nJavaType thing2Type = TypeFactory.defaultInstance().constructType(Thing2.class);\n\npublic static JavaType thingOneOrThingTwo(byte[] data, Headers headers) {\n // {"thisIsAFieldInThing1":"value", ...\n if (data[21] == \'1\') {\n return thing1Type;\n }\n else {\n return thing2Type;\n }\n}\n')])])]),a("p",[e._v("For more sophisticated data inspection consider using "),a("code",[e._v("JsonPath")]),e._v(" or similar but, the simpler the test to determine the type, the more efficient the process will be.")]),e._v(" "),a("p",[e._v("The following is an example of creating the deserializer programmatically (when providing the consumer factory with the deserializer in the constructor):")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('JsonDeserializer deser = new JsonDeserializer<>()\n .trustedPackages("*")\n .typeResolver(SomeClass::thing1Thing2JavaTypeForTopic);\n\n...\n\npublic static JavaType thing1Thing2JavaTypeForTopic(String topic, byte[] data, Headers headers) {\n ...\n}\n')])])]),a("h6",{attrs:{id:"programmatic-construction"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#programmatic-construction"}},[e._v("#")]),e._v(" Programmatic Construction")]),e._v(" "),a("p",[e._v("When constructing the serializer/deserializer programmatically for use in the producer/consumer factory, since version 2.3, you can use the fluent API, which simplifies configuration.")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("@Bean\npublic ProducerFactory pf() {\n Map props = new HashMap<>();\n // props.put(..., ...)\n // ...\n DefaultKafkaProducerFactory pf = new DefaultKafkaProducerFactory<>(props,\n new JsonSerializer()\n .forKeys()\n .noTypeInfo(),\n new JsonSerializer()\n .noTypeInfo());\n return pf;\n}\n\n@Bean\npublic ConsumerFactory cf() {\n Map props = new HashMap<>();\n // props.put(..., ...)\n // ...\n DefaultKafkaConsumerFactory cf = new DefaultKafkaConsumerFactory<>(props,\n new JsonDeserializer<>(MyKeyType.class)\n .forKeys()\n .ignoreTypeHeaders(),\n new JsonDeserializer<>(MyValueType.class)\n .ignoreTypeHeaders());\n return cf;\n}\n")])])]),a("p",[e._v("To provide type mapping programmatically, similar to "),a("a",{attrs:{href:"#serdes-type-methods"}},[e._v("Using Methods to Determine Types")]),e._v(", use the "),a("code",[e._v("typeFunction")]),e._v(" property.")]),e._v(" "),a("p",[e._v("Example")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('JsonDeserializer deser = new JsonDeserializer<>()\n .trustedPackages("*")\n .typeFunction(MyUtils::thingOneOrThingTwo);\n')])])]),a("p",[e._v("Alternatively, as long as you don’t use the fluent API to configure properties, or set them using "),a("code",[e._v("set*()")]),e._v(" methods, the factories will configure the serializer/deserializer using the configuration properties; see "),a("a",{attrs:{href:"#serdes-json-config"}},[e._v("Configuration Properties")]),e._v(".")]),e._v(" "),a("h5",{attrs:{id:"delegating-serializer-and-deserializer"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#delegating-serializer-and-deserializer"}},[e._v("#")]),e._v(" Delegating Serializer and Deserializer")]),e._v(" "),a("h6",{attrs:{id:"using-headers"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#using-headers"}},[e._v("#")]),e._v(" Using Headers")]),e._v(" "),a("p",[e._v("Version 2.3 introduced the "),a("code",[e._v("DelegatingSerializer")]),e._v(" and "),a("code",[e._v("DelegatingDeserializer")]),e._v(", which allow producing and consuming records with different key and/or value types.\nProducers must set a header "),a("code",[e._v("DelegatingSerializer.VALUE_SERIALIZATION_SELECTOR")]),e._v(" to a selector value that is used to select which serializer to use for the value and "),a("code",[e._v("DelegatingSerializer.KEY_SERIALIZATION_SELECTOR")]),e._v(" for the key; if a match is not found, an "),a("code",[e._v("IllegalStateException")]),e._v(" is thrown.")]),e._v(" "),a("p",[e._v("For incoming records, the deserializer uses the same headers to select the deserializer to use; if a match is not found or the header is not present, the raw "),a("code",[e._v("byte[]")]),e._v(" is returned.")]),e._v(" "),a("p",[e._v("You can configure the map of selector to "),a("code",[e._v("Serializer")]),e._v(" / "),a("code",[e._v("Deserializer")]),e._v(" via a constructor, or you can configure it via Kafka producer/consumer properties with the keys "),a("code",[e._v("DelegatingSerializer.VALUE_SERIALIZATION_SELECTOR_CONFIG")]),e._v(" and "),a("code",[e._v("DelegatingSerializer.KEY_SERIALIZATION_SELECTOR_CONFIG")]),e._v(".\nFor the serializer, the producer property can be a "),a("code",[e._v("Map")]),e._v(" where the key is the selector and the value is a "),a("code",[e._v("Serializer")]),e._v(" instance, a serializer "),a("code",[e._v("Class")]),e._v(" or the class name.\nThe property can also be a String of comma-delimited map entries, as shown below.")]),e._v(" "),a("p",[e._v("For the deserializer, the consumer property can be a "),a("code",[e._v("Map")]),e._v(" where the key is the selector and the value is a "),a("code",[e._v("Deserializer")]),e._v(" instance, a deserializer "),a("code",[e._v("Class")]),e._v(" or the class name.\nThe property can also be a String of comma-delimited map entries, as shown below.")]),e._v(" "),a("p",[e._v("To configure using properties, use the following syntax:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('producerProps.put(DelegatingSerializer.VALUE_SERIALIZATION_SELECTOR_CONFIG,\n "thing1:com.example.MyThing1Serializer, thing2:com.example.MyThing2Serializer")\n\nconsumerProps.put(DelegatingDeserializer.VALUE_SERIALIZATION_SELECTOR_CONFIG,\n "thing1:com.example.MyThing1Deserializer, thing2:com.example.MyThing2Deserializer")\n')])])]),a("p",[e._v("Producers would then set the "),a("code",[e._v("DelegatingSerializer.VALUE_SERIALIZATION_SELECTOR")]),e._v(" header to "),a("code",[e._v("thing1")]),e._v(" or "),a("code",[e._v("thing2")]),e._v(".")]),e._v(" "),a("p",[e._v("This technique supports sending different types to the same topic (or different topics).")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("Starting with version 2.5.1, it is not necessary to set the selector header, if the type (key or value) is one of the standard types supported by "),a("code",[e._v("Serdes")]),e._v(" ("),a("code",[e._v("Long")]),e._v(", "),a("code",[e._v("Integer")]),e._v(", etc)."),a("br"),e._v("Instead, the serializer will set the header to the class name of the type."),a("br"),e._v("It is not necessary to configure serializers or deserializers for these types, they will be created (once) dynamically.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("p",[e._v("For another technique to send different types to different topics, see "),a("a",{attrs:{href:"#routing-template"}},[e._v("Using "),a("code",[e._v("RoutingKafkaTemplate")])]),e._v(".")]),e._v(" "),a("h6",{attrs:{id:"by-type"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#by-type"}},[e._v("#")]),e._v(" By Type")]),e._v(" "),a("p",[e._v("Version 2.8 introduced the "),a("code",[e._v("DelegatingByTypeSerializer")]),e._v(".")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("@Bean\npublic ProducerFactory producerFactory(Map config) {\n return new DefaultKafkaProducerFactory<>(config,\n null, new DelegatingByTypeSerializer(Map.of(\n byte[].class, new ByteArraySerializer(),\n Bytes.class, new BytesSerializer(),\n String.class, new StringSerializer())));\n}\n")])])]),a("p",[e._v("Starting with version 2.8.3, you can configure the serializer to check if the map key is assignable from the target object, useful when a delegate serializer can serialize sub classes.\nIn this case, if there are amiguous matches, an ordered "),a("code",[e._v("Map")]),e._v(", such as a "),a("code",[e._v("LinkedHashMap")]),e._v(" should be provided.")]),e._v(" "),a("h6",{attrs:{id:"by-topic"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#by-topic"}},[e._v("#")]),e._v(" By Topic")]),e._v(" "),a("p",[e._v("Starting with version 2.8, the "),a("code",[e._v("DelegatingByTopicSerializer")]),e._v(" and "),a("code",[e._v("DelegatingByTopicDeserializer")]),e._v(" allow selection of a serializer/deserializer based on the topic name.\nRegex "),a("code",[e._v("Pattern")]),e._v(" s are used to lookup the instance to use.\nThe map can be configured using a constructor, or via properties (a comma delimited list of "),a("code",[e._v("pattern:serializer")]),e._v(").")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('producerConfigs.put(DelegatingByTopicSerializer.VALUE_SERIALIZATION_TOPIC_CONFIG,\n "topic[0-4]:" + ByteArraySerializer.class.getName()\n + ", topic[5-9]:" + StringSerializer.class.getName());\n...\nConsumerConfigs.put(DelegatingByTopicDeserializer.VALUE_SERIALIZATION_TOPIC_CONFIG,\n "topic[0-4]:" + ByteArrayDeserializer.class.getName()\n + ", topic[5-9]:" + StringDeserializer.class.getName());\n')])])]),a("p",[e._v("Use "),a("code",[e._v("KEY_SERIALIZATION_TOPIC_CONFIG")]),e._v(" when using this for keys.")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@Bean\npublic ProducerFactory producerFactory(Map config) {\n return new DefaultKafkaProducerFactory<>(config,\n null,\n new DelegatingByTopicSerializer(Map.of(\n Pattern.compile("topic[0-4]"), new ByteArraySerializer(),\n Pattern.compile("topic[5-9]"), new StringSerializer())),\n new JsonSerializer()); // default\n}\n')])])]),a("p",[e._v("You can specify a default serializer/deserializer to use when there is no pattern match using "),a("code",[e._v("DelegatingByTopicSerialization.KEY_SERIALIZATION_TOPIC_DEFAULT")]),e._v(" and "),a("code",[e._v("DelegatingByTopicSerialization.VALUE_SERIALIZATION_TOPIC_DEFAULT")]),e._v(".")]),e._v(" "),a("p",[e._v("An additional property "),a("code",[e._v("DelegatingByTopicSerialization.CASE_SENSITIVE")]),e._v(" (default "),a("code",[e._v("true")]),e._v("), when set to "),a("code",[e._v("false")]),e._v(" makes the topic lookup case insensitive.")]),e._v(" "),a("h5",{attrs:{id:"retrying-deserializer"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#retrying-deserializer"}},[e._v("#")]),e._v(" Retrying Deserializer")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("RetryingDeserializer")]),e._v(" uses a delegate "),a("code",[e._v("Deserializer")]),e._v(" and "),a("code",[e._v("RetryTemplate")]),e._v(" to retry deserialization when the delegate might have transient errors, such a network issues, during deserialization.")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("ConsumerFactory cf = new DefaultKafkaConsumerFactory(myConsumerConfigs,\n new RetryingDeserializer(myUnreliableKeyDeserializer, retryTemplate),\n new RetryingDeserializer(myUnreliableValueDeserializer, retryTemplate));\n")])])]),a("p",[e._v("Refer to the "),a("a",{attrs:{href:"https://github.com/spring-projects/spring-retry",target:"_blank",rel:"noopener noreferrer"}},[e._v("spring-retry"),a("OutboundLink")],1),e._v(" project for configuration of the "),a("code",[e._v("RetryTemplate")]),e._v(" with a retry policy, back off policy, etc.")]),e._v(" "),a("h5",{attrs:{id:"spring-messaging-message-conversion"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#spring-messaging-message-conversion"}},[e._v("#")]),e._v(" Spring Messaging Message Conversion")]),e._v(" "),a("p",[e._v("Although the "),a("code",[e._v("Serializer")]),e._v(" and "),a("code",[e._v("Deserializer")]),e._v(" API is quite simple and flexible from the low-level Kafka "),a("code",[e._v("Consumer")]),e._v(" and "),a("code",[e._v("Producer")]),e._v(" perspective, you might need more flexibility at the Spring Messaging level, when using either "),a("code",[e._v("@KafkaListener")]),e._v(" or "),a("a",{attrs:{href:"https://docs.spring.io/spring-integration/docs/current/reference/html/kafka.html#kafka",target:"_blank",rel:"noopener noreferrer"}},[e._v("Spring Integration’s Apache Kafka Support"),a("OutboundLink")],1),e._v(".\nTo let you easily convert to and from "),a("code",[e._v("org.springframework.messaging.Message")]),e._v(", Spring for Apache Kafka provides a "),a("code",[e._v("MessageConverter")]),e._v(" abstraction with the "),a("code",[e._v("MessagingMessageConverter")]),e._v(" implementation and its "),a("code",[e._v("JsonMessageConverter")]),e._v(" (and subclasses) customization.\nYou can inject the "),a("code",[e._v("MessageConverter")]),e._v(" into a "),a("code",[e._v("KafkaTemplate")]),e._v(" instance directly and by using "),a("code",[e._v("AbstractKafkaListenerContainerFactory")]),e._v(" bean definition for the "),a("code",[e._v("@KafkaListener.containerFactory()")]),e._v(" property.\nThe following example shows how to do so:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@Bean\npublic KafkaListenerContainerFactory kafkaJsonListenerContainerFactory() {\n ConcurrentKafkaListenerContainerFactory factory =\n new ConcurrentKafkaListenerContainerFactory<>();\n factory.setConsumerFactory(consumerFactory());\n factory.setMessageConverter(new JsonMessageConverter());\n return factory;\n}\n...\n@KafkaListener(topics = "jsonData",\n containerFactory = "kafkaJsonListenerContainerFactory")\npublic void jsonListener(Cat cat) {\n...\n}\n')])])]),a("p",[e._v("When using Spring Boot, simply define the converter as a "),a("code",[e._v("@Bean")]),e._v(" and Spring Boot auto configuration will wire it into the auto-configured template and container factory.")]),e._v(" "),a("p",[e._v("When you use a "),a("code",[e._v("@KafkaListener")]),e._v(", the parameter type is provided to the message converter to assist with the conversion.")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("This type inference can be achieved only when the "),a("code",[e._v("@KafkaListener")]),e._v(" annotation is declared at the method level."),a("br"),e._v("With a class-level "),a("code",[e._v("@KafkaListener")]),e._v(", the payload type is used to select which "),a("code",[e._v("@KafkaHandler")]),e._v(" method to invoke, so it must already have been converted before the method can be chosen.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("On the consumer side, you can configure a "),a("code",[e._v("JsonMessageConverter")]),e._v("; it can handle "),a("code",[e._v("ConsumerRecord")]),e._v(" values of type "),a("code",[e._v("byte[]")]),e._v(", "),a("code",[e._v("Bytes")]),e._v(" and "),a("code",[e._v("String")]),e._v(" so should be used in conjunction with a "),a("code",[e._v("ByteArrayDeserializer")]),e._v(", "),a("code",[e._v("BytesDeserializer")]),e._v(" or "),a("code",[e._v("StringDeserializer")]),e._v("."),a("br"),e._v("("),a("code",[e._v("byte[]")]),e._v(" and "),a("code",[e._v("Bytes")]),e._v(" are more efficient because they avoid an unnecessary "),a("code",[e._v("byte[]")]),e._v(" to "),a("code",[e._v("String")]),e._v(" conversion)."),a("br"),e._v("You can also configure the specific subclass of "),a("code",[e._v("JsonMessageConverter")]),e._v(" corresponding to the deserializer, if you so wish."),a("br"),a("br"),e._v("On the producer side, when you use Spring Integration or the "),a("code",[e._v("KafkaTemplate.send(Message message)")]),e._v(" method (see "),a("a",{attrs:{href:"#kafka-template"}},[e._v("Using "),a("code",[e._v("KafkaTemplate")])]),e._v("), you must configure a message converter that is compatible with the configured Kafka "),a("code",[e._v("Serializer")]),e._v("."),a("br"),a("br"),e._v("* "),a("code",[e._v("StringJsonMessageConverter")]),e._v(" with "),a("code",[e._v("StringSerializer")]),a("br"),a("br"),e._v("* "),a("code",[e._v("BytesJsonMessageConverter")]),e._v(" with "),a("code",[e._v("BytesSerializer")]),a("br"),a("br"),e._v("* "),a("code",[e._v("ByteArrayJsonMessageConverter")]),e._v(" with "),a("code",[e._v("ByteArraySerializer")]),a("br"),a("br"),e._v("Again, using "),a("code",[e._v("byte[]")]),e._v(" or "),a("code",[e._v("Bytes")]),e._v(" is more efficient because they avoid a "),a("code",[e._v("String")]),e._v(" to "),a("code",[e._v("byte[]")]),e._v(" conversion."),a("br"),a("br"),e._v("For convenience, starting with version 2.3, the framework also provides a "),a("code",[e._v("StringOrBytesSerializer")]),e._v(" which can serialize all three value types so it can be used with any of the message converters.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("p",[e._v("Starting with version 2.7.1, message payload conversion can be delegated to a "),a("code",[e._v("spring-messaging")]),e._v(" "),a("code",[e._v("SmartMessageConverter")]),e._v("; this enables conversion, for example, to be based on the "),a("code",[e._v("MessageHeaders.CONTENT_TYPE")]),e._v(" header.")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("The "),a("code",[e._v("KafkaMessageConverter.fromMessage()")]),e._v(" method is called for outbound conversion to a "),a("code",[e._v("ProducerRecord")]),e._v(" with the message payload in the "),a("code",[e._v("ProducerRecord.value()")]),e._v(" property."),a("br"),e._v("The "),a("code",[e._v("KafkaMessageConverter.toMessage()")]),e._v(" method is called for inbound conversion from "),a("code",[e._v("ConsumerRecord")]),e._v(" with the payload being the "),a("code",[e._v("ConsumerRecord.value()")]),e._v(" property."),a("br"),e._v("The "),a("code",[e._v("SmartMessageConverter.toMessage()")]),e._v(" method is called to create a new outbound "),a("code",[e._v("Message")]),e._v(" from the "),a("code",[e._v("Message")]),e._v(" passed to"),a("code",[e._v("fromMessage()")]),e._v(" (usually by "),a("code",[e._v("KafkaTemplate.send(Message msg)")]),e._v(")."),a("br"),e._v("Similarly, in the "),a("code",[e._v("KafkaMessageConverter.toMessage()")]),e._v(" method, after the converter has created a new "),a("code",[e._v("Message")]),e._v(" from the "),a("code",[e._v("ConsumerRecord")]),e._v(", the "),a("code",[e._v("SmartMessageConverter.fromMessage()")]),e._v(" method is called and then the final inbound message is created with the newly converted payload."),a("br"),e._v("In either case, if the "),a("code",[e._v("SmartMessageConverter")]),e._v(" returns "),a("code",[e._v("null")]),e._v(", the original message is used.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("p",[e._v("When the default converter is used in the "),a("code",[e._v("KafkaTemplate")]),e._v(" and listener container factory, you configure the "),a("code",[e._v("SmartMessageConverter")]),e._v(" by calling "),a("code",[e._v("setMessagingConverter()")]),e._v(" on the template and via the "),a("code",[e._v("contentMessageConverter")]),e._v(" property on "),a("code",[e._v("@KafkaListener")]),e._v(" methods.")]),e._v(" "),a("p",[e._v("Examples:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("template.setMessagingConverter(mySmartConverter);\n")])])]),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@KafkaListener(id = "withSmartConverter", topics = "someTopic",\n contentTypeConverter = "mySmartConverter")\npublic void smart(Thing thing) {\n ...\n}\n')])])]),a("h6",{attrs:{id:"using-spring-data-projection-interfaces"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#using-spring-data-projection-interfaces"}},[e._v("#")]),e._v(" Using Spring Data Projection Interfaces")]),e._v(" "),a("p",[e._v("Starting with version 2.1.1, you can convert JSON to a Spring Data Projection interface instead of a concrete type.\nThis allows very selective, and low-coupled bindings to data, including the lookup of values from multiple places inside the JSON document.\nFor example the following interface can be defined as message payload type:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('interface SomeSample {\n\n @JsonPath({ "$.username", "$.user.name" })\n String getUsername();\n\n}\n')])])]),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@KafkaListener(id="projection.listener", topics = "projection")\npublic void projection(SomeSample in) {\n String username = in.getUsername();\n ...\n}\n')])])]),a("p",[e._v("Accessor methods will be used to lookup the property name as field in the received JSON document by default.\nThe "),a("code",[e._v("@JsonPath")]),e._v(" expression allows customization of the value lookup, and even to define multiple JSON Path expressions, to lookup values from multiple places until an expression returns an actual value.")]),e._v(" "),a("p",[e._v("To enable this feature, use a "),a("code",[e._v("ProjectingMessageConverter")]),e._v(" configured with an appropriate delegate converter (used for outbound conversion and converting non-projection interfaces).\nYou must also add "),a("code",[e._v("spring-data:spring-data-commons")]),e._v(" and "),a("code",[e._v("com.jayway.jsonpath:json-path")]),e._v(" to the class path.")]),e._v(" "),a("p",[e._v("When used as the parameter to a "),a("code",[e._v("@KafkaListener")]),e._v(" method, the interface type is automatically passed to the converter as normal.")]),e._v(" "),a("h5",{attrs:{id:"using-errorhandlingdeserializer"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#using-errorhandlingdeserializer"}},[e._v("#")]),e._v(" Using "),a("code",[e._v("ErrorHandlingDeserializer")])]),e._v(" "),a("p",[e._v("When a deserializer fails to deserialize a message, Spring has no way to handle the problem, because it occurs before the "),a("code",[e._v("poll()")]),e._v(" returns.\nTo solve this problem, the "),a("code",[e._v("ErrorHandlingDeserializer")]),e._v(" has been introduced.\nThis deserializer delegates to a real deserializer (key or value).\nIf the delegate fails to deserialize the record content, the "),a("code",[e._v("ErrorHandlingDeserializer")]),e._v(" returns a "),a("code",[e._v("null")]),e._v(" value and a "),a("code",[e._v("DeserializationException")]),e._v(" in a header that contains the cause and the raw bytes.\nWhen you use a record-level "),a("code",[e._v("MessageListener")]),e._v(", if the "),a("code",[e._v("ConsumerRecord")]),e._v(" contains a "),a("code",[e._v("DeserializationException")]),e._v(" header for either the key or value, the container’s "),a("code",[e._v("ErrorHandler")]),e._v(" is called with the failed "),a("code",[e._v("ConsumerRecord")]),e._v(".\nThe record is not passed to the listener.")]),e._v(" "),a("p",[e._v("Alternatively, you can configure the "),a("code",[e._v("ErrorHandlingDeserializer")]),e._v(" to create a custom value by providing a "),a("code",[e._v("failedDeserializationFunction")]),e._v(", which is a "),a("code",[e._v("Function")]),e._v(".\nThis function is invoked to create an instance of "),a("code",[e._v("T")]),e._v(", which is passed to the listener in the usual fashion.\nAn object of type "),a("code",[e._v("FailedDeserializationInfo")]),e._v(", which contains all the contextual information is provided to the function.\nYou can find the "),a("code",[e._v("DeserializationException")]),e._v(" (as a serialized Java object) in headers.\nSee the "),a("a",{attrs:{href:"https://docs.spring.io/spring-kafka/api/org/springframework/kafka/support/serializer/ErrorHandlingDeserializer.html",target:"_blank",rel:"noopener noreferrer"}},[e._v("Javadoc"),a("OutboundLink")],1),e._v(" for the "),a("code",[e._v("ErrorHandlingDeserializer")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("You can use the "),a("code",[e._v("DefaultKafkaConsumerFactory")]),e._v(" constructor that takes key and value "),a("code",[e._v("Deserializer")]),e._v(" objects and wire in appropriate "),a("code",[e._v("ErrorHandlingDeserializer")]),e._v(" instances that you have configured with the proper delegates.\nAlternatively, you can use consumer configuration properties (which are used by the "),a("code",[e._v("ErrorHandlingDeserializer")]),e._v(") to instantiate the delegates.\nThe property names are "),a("code",[e._v("ErrorHandlingDeserializer.KEY_DESERIALIZER_CLASS")]),e._v(" and "),a("code",[e._v("ErrorHandlingDeserializer.VALUE_DESERIALIZER_CLASS")]),e._v(".\nThe property value can be a class or class name.\nThe following example shows how to set these properties:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('... // other props\nprops.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ErrorHandlingDeserializer.class);\nprops.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ErrorHandlingDeserializer.class);\nprops.put(ErrorHandlingDeserializer.KEY_DESERIALIZER_CLASS, JsonDeserializer.class);\nprops.put(JsonDeserializer.KEY_DEFAULT_TYPE, "com.example.MyKey")\nprops.put(ErrorHandlingDeserializer.VALUE_DESERIALIZER_CLASS, JsonDeserializer.class.getName());\nprops.put(JsonDeserializer.VALUE_DEFAULT_TYPE, "com.example.MyValue")\nprops.put(JsonDeserializer.TRUSTED_PACKAGES, "com.example")\nreturn new DefaultKafkaConsumerFactory<>(props);\n')])])]),a("p",[e._v("The following example uses a "),a("code",[e._v("failedDeserializationFunction")]),e._v(".")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("public class BadFoo extends Foo {\n\n private final FailedDeserializationInfo failedDeserializationInfo;\n\n public BadFoo(FailedDeserializationInfo failedDeserializationInfo) {\n this.failedDeserializationInfo = failedDeserializationInfo;\n }\n\n public FailedDeserializationInfo getFailedDeserializationInfo() {\n return this.failedDeserializationInfo;\n }\n\n}\n\npublic class FailedFooProvider implements Function {\n\n @Override\n public Foo apply(FailedDeserializationInfo info) {\n return new BadFoo(info);\n }\n\n}\n")])])]),a("p",[e._v("The preceding example uses the following configuration:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("...\nconsumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ErrorHandlingDeserializer.class);\nconsumerProps.put(ErrorHandlingDeserializer.VALUE_DESERIALIZER_CLASS, JsonDeserializer.class);\nconsumerProps.put(ErrorHandlingDeserializer.VALUE_FUNCTION, FailedFooProvider.class);\n...\n")])])]),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("If the consumer is configured with an "),a("code",[e._v("ErrorHandlingDeserializer")]),e._v(" it is important to configure the "),a("code",[e._v("KafkaTemplate")]),e._v(" and its producer with a serializer that can handle normal objects as well as raw "),a("code",[e._v("byte[]")]),e._v(" values, which result from deserialization exceptions."),a("br"),e._v("The generic value type of the template should be "),a("code",[e._v("Object")]),e._v("."),a("br"),e._v("One technique is to use the "),a("code",[e._v("DelegatingByTypeSerializer")]),e._v("; an example follows:")])])]),e._v(" "),a("tbody")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("@Bean\npublic ProducerFactory producerFactory() {\n return new DefaultKafkaProducerFactory<>(producerConfiguration(), new StringSerializer(),\n new DelegatingByTypeSerializer(Map.of(byte[].class, new ByteArraySerializer(),\n MyNormalObject.class, new JsonSerializer())));\n}\n\n@Bean\npublic KafkaTemplate kafkaTemplate() {\n return new KafkaTemplate<>(producerFactory());\n}\n")])])]),a("p",[e._v("When using an "),a("code",[e._v("ErrorHandlingDeserializer")]),e._v(" with a batch listener, you must check for the deserialization exceptions in message headers.\nWhen used with a "),a("code",[e._v("DefaultBatchErrorHandler")]),e._v(", you can use that header to determine which record the exception failed on and communicate to the error handler via a "),a("code",[e._v("BatchListenerFailedException")]),e._v(".")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@KafkaListener(id = "test", topics = "test")\nvoid listen(List in, @Header(KafkaHeaders.BATCH_CONVERTED_HEADERS) List> headers) {\n for (int i = 0; i < in.size(); i++) {\n Thing thing = in.get(i);\n if (thing == null\n && headers.get(i).get(SerializationUtils.VALUE_DESERIALIZER_EXCEPTION_HEADER) != null) {\n DeserializationException deserEx = ListenerUtils.byteArrayToDeserializationException(this.logger,\n (byte[]) headers.get(i).get(SerializationUtils.VALUE_DESERIALIZER_EXCEPTION_HEADER));\n if (deserEx != null) {\n logger.error(deserEx, "Record at index " + i + " could not be deserialized");\n }\n throw new BatchListenerFailedException("Deserialization", deserEx, i);\n }\n process(thing);\n }\n}\n')])])]),a("p",[a("code",[e._v("ListenerUtils.byteArrayToDeserializationException()")]),e._v(" can be used to convert the header to a "),a("code",[e._v("DeserializationException")]),e._v(".")]),e._v(" "),a("p",[e._v("When consuming "),a("code",[e._v("List")]),e._v(", "),a("code",[e._v("ListenerUtils.getExceptionFromHeader()")]),e._v(" is used instead:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@KafkaListener(id = "kgh2036", topics = "kgh2036")\nvoid listen(List> in) {\n for (int i = 0; i < in.size(); i++) {\n ConsumerRecord rec = in.get(i);\n if (rec.value() == null) {\n DeserializationException deserEx = ListenerUtils.getExceptionFromHeader(rec,\n SerializationUtils.VALUE_DESERIALIZER_EXCEPTION_HEADER, this.logger);\n if (deserEx != null) {\n logger.error(deserEx, "Record at offset " + rec.offset() + " could not be deserialized");\n throw new BatchListenerFailedException("Deserialization", deserEx, i);\n }\n }\n process(rec.value());\n }\n}\n')])])]),a("h5",{attrs:{id:"payload-conversion-with-batch-listeners"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#payload-conversion-with-batch-listeners"}},[e._v("#")]),e._v(" Payload Conversion with Batch Listeners")]),e._v(" "),a("p",[e._v("You can also use a "),a("code",[e._v("JsonMessageConverter")]),e._v(" within a "),a("code",[e._v("BatchMessagingMessageConverter")]),e._v(" to convert batch messages when you use a batch listener container factory.\nSee "),a("a",{attrs:{href:"#serdes"}},[e._v("Serialization, Deserialization, and Message Conversion")]),e._v(" and "),a("a",{attrs:{href:"#messaging-message-conversion"}},[e._v("Spring Messaging Message Conversion")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("By default, the type for the conversion is inferred from the listener argument.\nIf you configure the "),a("code",[e._v("JsonMessageConverter")]),e._v(" with a "),a("code",[e._v("DefaultJackson2TypeMapper")]),e._v(" that has its "),a("code",[e._v("TypePrecedence")]),e._v(" set to "),a("code",[e._v("TYPE_ID")]),e._v(" (instead of the default "),a("code",[e._v("INFERRED")]),e._v("), the converter uses the type information in headers (if present) instead.\nThis allows, for example, listener methods to be declared with interfaces instead of concrete classes.\nAlso, the type converter supports mapping, so the deserialization can be to a different type than the source (as long as the data is compatible).\nThis is also useful when you use "),a("a",{attrs:{href:"#class-level-kafkalistener"}},[e._v("class-level "),a("code",[e._v("@KafkaListener")]),e._v(" instances")]),e._v(" where the payload must have already been converted to determine which method to invoke.\nThe following example creates beans that use this method:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("@Bean\npublic KafkaListenerContainerFactory kafkaListenerContainerFactory() {\n ConcurrentKafkaListenerContainerFactory factory =\n new ConcurrentKafkaListenerContainerFactory<>();\n factory.setConsumerFactory(consumerFactory());\n factory.setBatchListener(true);\n factory.setMessageConverter(new BatchMessagingMessageConverter(converter()));\n return factory;\n}\n\n@Bean\npublic JsonMessageConverter converter() {\n return new JsonMessageConverter();\n}\n")])])]),a("p",[e._v("Note that, for this to work, the method signature for the conversion target must be a container object with a single generic parameter type, such as the following:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@KafkaListener(topics = "blc1")\npublic void listen(List foos, @Header(KafkaHeaders.OFFSET) List offsets) {\n ...\n}\n')])])]),a("p",[e._v("Note that you can still access the batch headers.")]),e._v(" "),a("p",[e._v("If the batch converter has a record converter that supports it, you can also receive a list of messages where the payloads are converted according to the generic type.\nThe following example shows how to do so:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@KafkaListener(topics = "blc3", groupId = "blc3")\npublic void listen1(List> fooMessages) {\n ...\n}\n')])])]),a("h5",{attrs:{id:"conversionservice-customization"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#conversionservice-customization"}},[e._v("#")]),e._v(" "),a("code",[e._v("ConversionService")]),e._v(" Customization")]),e._v(" "),a("p",[e._v("Starting with version 2.1.1, the "),a("code",[e._v("org.springframework.core.convert.ConversionService")]),e._v(" used by the default "),a("code",[e._v("o.s.messaging.handler.annotation.support.MessageHandlerMethodFactory")]),e._v(" to resolve parameters for the invocation of a listener method is supplied with all beans that implement any of the following interfaces:")]),e._v(" "),a("ul",[a("li",[a("p",[a("code",[e._v("org.springframework.core.convert.converter.Converter")])])]),e._v(" "),a("li",[a("p",[a("code",[e._v("org.springframework.core.convert.converter.GenericConverter")])])]),e._v(" "),a("li",[a("p",[a("code",[e._v("org.springframework.format.Formatter")])])])]),e._v(" "),a("p",[e._v("This lets you further customize listener deserialization without changing the default configuration for "),a("code",[e._v("ConsumerFactory")]),e._v(" and "),a("code",[e._v("KafkaListenerContainerFactory")]),e._v(".")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("Setting a custom "),a("code",[e._v("MessageHandlerMethodFactory")]),e._v(" on the "),a("code",[e._v("KafkaListenerEndpointRegistrar")]),e._v(" through a "),a("code",[e._v("KafkaListenerConfigurer")]),e._v(" bean disables this feature.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("h5",{attrs:{id:"adding-custom-handlermethodargumentresolver-to-kafkalistener"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#adding-custom-handlermethodargumentresolver-to-kafkalistener"}},[e._v("#")]),e._v(" Adding custom "),a("code",[e._v("HandlerMethodArgumentResolver")]),e._v(" to "),a("code",[e._v("@KafkaListener")])]),e._v(" "),a("p",[e._v("Starting with version 2.4.2 you are able to add your own "),a("code",[e._v("HandlerMethodArgumentResolver")]),e._v(" and resolve custom method parameters.\nAll you need is to implement "),a("code",[e._v("KafkaListenerConfigurer")]),e._v(" and use method "),a("code",[e._v("setCustomMethodArgumentResolvers()")]),e._v(" from class "),a("code",[e._v("KafkaListenerEndpointRegistrar")]),e._v(".")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("@Configuration\nclass CustomKafkaConfig implements KafkaListenerConfigurer {\n\n @Override\n public void configureKafkaListeners(KafkaListenerEndpointRegistrar registrar) {\n registrar.setCustomMethodArgumentResolvers(\n new HandlerMethodArgumentResolver() {\n\n @Override\n public boolean supportsParameter(MethodParameter parameter) {\n return CustomMethodArgument.class.isAssignableFrom(parameter.getParameterType());\n }\n\n @Override\n public Object resolveArgument(MethodParameter parameter, Message message) {\n return new CustomMethodArgument(\n message.getHeaders().get(KafkaHeaders.RECEIVED_TOPIC, String.class)\n );\n }\n }\n );\n }\n\n}\n")])])]),a("p",[e._v("You can also completely replace the framework’s argument resolution by adding a custom "),a("code",[e._v("MessageHandlerMethodFactory")]),e._v(" to the "),a("code",[e._v("KafkaListenerEndpointRegistrar")]),e._v(" bean.\nIf you do this, and your application needs to handle tombstone records, with a "),a("code",[e._v("null")]),e._v(" "),a("code",[e._v("value()")]),e._v(" (e.g. from a compacted topic), you should add a "),a("code",[e._v("KafkaNullAwarePayloadArgumentResolver")]),e._v(" to the factory; it must be the last resolver because it supports all types and can match arguments without a "),a("code",[e._v("@Payload")]),e._v(" annotation.\nIf you are using a "),a("code",[e._v("DefaultMessageHandlerMethodFactory")]),e._v(", set this resolver as the last custom resolver; the factory will ensure that this resolver will be used before the standard "),a("code",[e._v("PayloadMethodArgumentResolver")]),e._v(", which has no knowledge of "),a("code",[e._v("KafkaNull")]),e._v(" payloads.")]),e._v(" "),a("p",[e._v("See also "),a("a",{attrs:{href:"#tombstones"}},[e._v("Null Payloads and Log Compaction of 'Tombstone' Records")]),e._v(".")]),e._v(" "),a("h4",{attrs:{id:"_4-1-18-message-headers"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_4-1-18-message-headers"}},[e._v("#")]),e._v(" 4.1.18. Message Headers")]),e._v(" "),a("p",[e._v("The 0.11.0.0 client introduced support for headers in messages.\nAs of version 2.0, Spring for Apache Kafka now supports mapping these headers to and from "),a("code",[e._v("spring-messaging")]),e._v(" "),a("code",[e._v("MessageHeaders")]),e._v(".")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("Previous versions mapped "),a("code",[e._v("ConsumerRecord")]),e._v(" and "),a("code",[e._v("ProducerRecord")]),e._v(" to spring-messaging "),a("code",[e._v("Message")]),e._v(", where the value property is mapped to and from the "),a("code",[e._v("payload")]),e._v(" and other properties ("),a("code",[e._v("topic")]),e._v(", "),a("code",[e._v("partition")]),e._v(", and so on) were mapped to headers."),a("br"),e._v("This is still the case, but additional (arbitrary) headers can now be mapped.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("p",[e._v("Apache Kafka headers have a simple API, shown in the following interface definition:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("public interface Header {\n\n String key();\n\n byte[] value();\n\n}\n")])])]),a("p",[e._v("The "),a("code",[e._v("KafkaHeaderMapper")]),e._v(" strategy is provided to map header entries between Kafka "),a("code",[e._v("Headers")]),e._v(" and "),a("code",[e._v("MessageHeaders")]),e._v(".\nIts interface definition is as follows:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("public interface KafkaHeaderMapper {\n\n void fromHeaders(MessageHeaders headers, Headers target);\n\n void toHeaders(Headers source, Map target);\n\n}\n")])])]),a("p",[e._v("The "),a("code",[e._v("DefaultKafkaHeaderMapper")]),e._v(" maps the key to the "),a("code",[e._v("MessageHeaders")]),e._v(" header name and, in order to support rich header types for outbound messages, JSON conversion is performed.\nA “special” header (with a key of "),a("code",[e._v("spring_json_header_types")]),e._v(") contains a JSON map of "),a("code",[e._v(":")]),e._v(".\nThis header is used on the inbound side to provide appropriate conversion of each header value to the original type.")]),e._v(" "),a("p",[e._v("On the inbound side, all Kafka "),a("code",[e._v("Header")]),e._v(" instances are mapped to "),a("code",[e._v("MessageHeaders")]),e._v(".\nOn the outbound side, by default, all "),a("code",[e._v("MessageHeaders")]),e._v(" are mapped, except "),a("code",[e._v("id")]),e._v(", "),a("code",[e._v("timestamp")]),e._v(", and the headers that map to "),a("code",[e._v("ConsumerRecord")]),e._v(" properties.")]),e._v(" "),a("p",[e._v("You can specify which headers are to be mapped for outbound messages, by providing patterns to the mapper.\nThe following listing shows a number of example mappings:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("public DefaultKafkaHeaderMapper() { (1)\n ...\n}\n\npublic DefaultKafkaHeaderMapper(ObjectMapper objectMapper) { (2)\n ...\n}\n\npublic DefaultKafkaHeaderMapper(String... patterns) { (3)\n ...\n}\n\npublic DefaultKafkaHeaderMapper(ObjectMapper objectMapper, String... patterns) { (4)\n ...\n}\n")])])]),a("table",[a("thead",[a("tr",[a("th",[a("strong",[e._v("1")])]),e._v(" "),a("th",[e._v("Uses a default Jackson "),a("code",[e._v("ObjectMapper")]),e._v(" and maps most headers, as discussed before the example.")])])]),e._v(" "),a("tbody",[a("tr",[a("td",[a("strong",[e._v("2")])]),e._v(" "),a("td",[e._v("Uses the provided Jackson "),a("code",[e._v("ObjectMapper")]),e._v(" and maps most headers, as discussed before the example.")])]),e._v(" "),a("tr",[a("td",[a("strong",[e._v("3")])]),e._v(" "),a("td",[e._v("Uses a default Jackson "),a("code",[e._v("ObjectMapper")]),e._v(" and maps headers according to the provided patterns.")])]),e._v(" "),a("tr",[a("td",[a("strong",[e._v("4")])]),e._v(" "),a("td",[e._v("Uses the provided Jackson "),a("code",[e._v("ObjectMapper")]),e._v(" and maps headers according to the provided patterns.")])])])]),e._v(" "),a("p",[e._v("Patterns are rather simple and can contain a leading wildcard ("),a("code",[e._v("**), a trailing wildcard, or both (for example,")]),e._v("**"),a("code",[e._v(".cat.*")]),e._v(").\nYou can negate patterns with a leading "),a("code",[e._v("!")]),e._v(".\nThe first pattern that matches a header name (whether positive or negative) wins.")]),e._v(" "),a("p",[e._v("When you provide your own patterns, we recommend including "),a("code",[e._v("!id")]),e._v(" and "),a("code",[e._v("!timestamp")]),e._v(", since these headers are read-only on the inbound side.")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("By default, the mapper deserializes only classes in "),a("code",[e._v("java.lang")]),e._v(" and "),a("code",[e._v("java.util")]),e._v("."),a("br"),e._v("You can trust other (or all) packages by adding trusted packages with the "),a("code",[e._v("addTrustedPackages")]),e._v(" method."),a("br"),e._v("If you receive messages from untrusted sources, you may wish to add only those packages you trust."),a("br"),e._v("To trust all packages, you can use "),a("code",[e._v('mapper.addTrustedPackages("*")')]),e._v(".")])])]),e._v(" "),a("tbody")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("Mapping "),a("code",[e._v("String")]),e._v(" header values in a raw form is useful when communicating with systems that are not aware of the mapper’s JSON format.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("p",[e._v("Starting with version 2.2.5, you can specify that certain string-valued headers should not be mapped using JSON, but to/from a raw "),a("code",[e._v("byte[]")]),e._v(".\nThe "),a("code",[e._v("AbstractKafkaHeaderMapper")]),e._v(" has new properties; "),a("code",[e._v("mapAllStringsOut")]),e._v(" when set to true, all string-valued headers will be converted to "),a("code",[e._v("byte[]")]),e._v(" using the "),a("code",[e._v("charset")]),e._v(" property (default "),a("code",[e._v("UTF-8")]),e._v(").\nIn addition, there is a property "),a("code",[e._v("rawMappedHeaders")]),e._v(", which is a map of "),a("code",[e._v("header name : boolean")]),e._v("; if the map contains a header name, and the header contains a "),a("code",[e._v("String")]),e._v(" value, it will be mapped as a raw "),a("code",[e._v("byte[]")]),e._v(" using the charset.\nThis map is also used to map raw incoming "),a("code",[e._v("byte[]")]),e._v(" headers to "),a("code",[e._v("String")]),e._v(" using the charset if, and only if, the boolean in the map value is "),a("code",[e._v("true")]),e._v(".\nIf the boolean is "),a("code",[e._v("false")]),e._v(", or the header name is not in the map with a "),a("code",[e._v("true")]),e._v(" value, the incoming header is simply mapped as the raw unmapped header.")]),e._v(" "),a("p",[e._v("The following test case illustrates this mechanism.")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@Test\npublic void testSpecificStringConvert() {\n DefaultKafkaHeaderMapper mapper = new DefaultKafkaHeaderMapper();\n Map rawMappedHeaders = new HashMap<>();\n rawMappedHeaders.put("thisOnesAString", true);\n rawMappedHeaders.put("thisOnesBytes", false);\n mapper.setRawMappedHeaders(rawMappedHeaders);\n Map headersMap = new HashMap<>();\n headersMap.put("thisOnesAString", "thing1");\n headersMap.put("thisOnesBytes", "thing2");\n headersMap.put("alwaysRaw", "thing3".getBytes());\n MessageHeaders headers = new MessageHeaders(headersMap);\n Headers target = new RecordHeaders();\n mapper.fromHeaders(headers, target);\n assertThat(target).containsExactlyInAnyOrder(\n new RecordHeader("thisOnesAString", "thing1".getBytes()),\n new RecordHeader("thisOnesBytes", "thing2".getBytes()),\n new RecordHeader("alwaysRaw", "thing3".getBytes()));\n headersMap.clear();\n mapper.toHeaders(target, headersMap);\n assertThat(headersMap).contains(\n entry("thisOnesAString", "thing1"),\n entry("thisOnesBytes", "thing2".getBytes()),\n entry("alwaysRaw", "thing3".getBytes()));\n}\n')])])]),a("p",[e._v("By default, the "),a("code",[e._v("DefaultKafkaHeaderMapper")]),e._v(" is used in the "),a("code",[e._v("MessagingMessageConverter")]),e._v(" and "),a("code",[e._v("BatchMessagingMessageConverter")]),e._v(", as long as Jackson is on the class path.")]),e._v(" "),a("p",[e._v("With the batch converter, the converted headers are available in the "),a("code",[e._v("KafkaHeaders.BATCH_CONVERTED_HEADERS")]),e._v(" as a "),a("code",[e._v("List>")]),e._v(" where the map in a position of the list corresponds to the data position in the payload.")]),e._v(" "),a("p",[e._v("If there is no converter (either because Jackson is not present or it is explicitly set to "),a("code",[e._v("null")]),e._v("), the headers from the consumer record are provided unconverted in the "),a("code",[e._v("KafkaHeaders.NATIVE_HEADERS")]),e._v(" header.\nThis header is a "),a("code",[e._v("Headers")]),e._v(" object (or a "),a("code",[e._v("List")]),e._v(" in the case of the batch converter), where the position in the list corresponds to the data position in the payload).")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("Certain types are not suitable for JSON serialization, and a simple "),a("code",[e._v("toString()")]),e._v(" serialization might be preferred for these types."),a("br"),e._v("The "),a("code",[e._v("DefaultKafkaHeaderMapper")]),e._v(" has a method called "),a("code",[e._v("addToStringClasses()")]),e._v(" that lets you supply the names of classes that should be treated this way for outbound mapping."),a("br"),e._v("During inbound mapping, they are mapped as "),a("code",[e._v("String")]),e._v("."),a("br"),e._v("By default, only "),a("code",[e._v("org.springframework.util.MimeType")]),e._v(" and "),a("code",[e._v("org.springframework.http.MediaType")]),e._v(" are mapped this way.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("Starting with version 2.3, handling of String-valued headers is simplified."),a("br"),e._v("Such headers are no longer JSON encoded, by default (i.e. they do not have enclosing "),a("code",[e._v('"…​"')]),e._v(" added)."),a("br"),e._v("The type is still added to the JSON_TYPES header so the receiving system can convert back to a String (from "),a("code",[e._v("byte[]")]),e._v(")."),a("br"),e._v("The mapper can handle (decode) headers produced by older versions (it checks for a leading "),a("code",[e._v('"')]),e._v("); in this way an application using 2.3 can consume records from older versions.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("To be compatible with earlier versions, set "),a("code",[e._v("encodeStrings")]),e._v(" to "),a("code",[e._v("true")]),e._v(", if records produced by a version using 2.3 might be consumed by applications using earlier versions."),a("br"),e._v("When all applications are using 2.3 or higher, you can leave the property at its default value of "),a("code",[e._v("false")]),e._v(".")])])]),e._v(" "),a("tbody")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("@Bean\nMessagingMessageConverter converter() {\n MessagingMessageConverter converter = new MessagingMessageConverter();\n DefaultKafkaHeaderMapper mapper = new DefaultKafkaHeaderMapper();\n mapper.setEncodeStrings(true);\n converter.setHeaderMapper(mapper);\n return converter;\n}\n")])])]),a("p",[e._v("If using Spring Boot, it will auto configure this converter bean into the auto-configured "),a("code",[e._v("KafkaTemplate")]),e._v("; otherwise you should add this converter to the template.")]),e._v(" "),a("h4",{attrs:{id:"_4-1-19-null-payloads-and-log-compaction-of-tombstone-records"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_4-1-19-null-payloads-and-log-compaction-of-tombstone-records"}},[e._v("#")]),e._v(" 4.1.19. Null Payloads and Log Compaction of 'Tombstone' Records")]),e._v(" "),a("p",[e._v("When you use "),a("a",{attrs:{href:"https://kafka.apache.org/documentation/#compaction",target:"_blank",rel:"noopener noreferrer"}},[e._v("Log Compaction"),a("OutboundLink")],1),e._v(", you can send and receive messages with "),a("code",[e._v("null")]),e._v(" payloads to identify the deletion of a key.")]),e._v(" "),a("p",[e._v("You can also receive "),a("code",[e._v("null")]),e._v(" values for other reasons, such as a "),a("code",[e._v("Deserializer")]),e._v(" that might return "),a("code",[e._v("null")]),e._v(" when it cannot deserialize a value.")]),e._v(" "),a("p",[e._v("To send a "),a("code",[e._v("null")]),e._v(" payload by using the "),a("code",[e._v("KafkaTemplate")]),e._v(", you can pass null into the value argument of the "),a("code",[e._v("send()")]),e._v(" methods.\nOne exception to this is the "),a("code",[e._v("send(Message message)")]),e._v(" variant.\nSince "),a("code",[e._v("spring-messaging")]),e._v(" "),a("code",[e._v("Message")]),e._v(" cannot have a "),a("code",[e._v("null")]),e._v(" payload, you can use a special payload type called "),a("code",[e._v("KafkaNull")]),e._v(", and the framework sends "),a("code",[e._v("null")]),e._v(".\nFor convenience, the static "),a("code",[e._v("KafkaNull.INSTANCE")]),e._v(" is provided.")]),e._v(" "),a("p",[e._v("When you use a message listener container, the received "),a("code",[e._v("ConsumerRecord")]),e._v(" has a "),a("code",[e._v("null")]),e._v(" "),a("code",[e._v("value()")]),e._v(".")]),e._v(" "),a("p",[e._v("To configure the "),a("code",[e._v("@KafkaListener")]),e._v(" to handle "),a("code",[e._v("null")]),e._v(" payloads, you must use the "),a("code",[e._v("@Payload")]),e._v(" annotation with "),a("code",[e._v("required = false")]),e._v(".\nIf it is a tombstone message for a compacted log, you usually also need the key so that your application can determine which key was “deleted”.\nThe following example shows such a configuration:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@KafkaListener(id = "deletableListener", topics = "myTopic")\npublic void listen(@Payload(required = false) String value, @Header(KafkaHeaders.RECEIVED_MESSAGE_KEY) String key) {\n // value == null represents key deletion\n}\n')])])]),a("p",[e._v("When you use a class-level "),a("code",[e._v("@KafkaListener")]),e._v(" with multiple "),a("code",[e._v("@KafkaHandler")]),e._v(" methods, some additional configuration is needed.\nSpecifically, you need a "),a("code",[e._v("@KafkaHandler")]),e._v(" method with a "),a("code",[e._v("KafkaNull")]),e._v(" payload.\nThe following example shows how to configure one:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@KafkaListener(id = "multi", topics = "myTopic")\nstatic class MultiListenerBean {\n\n @KafkaHandler\n public void listen(String cat) {\n ...\n }\n\n @KafkaHandler\n public void listen(Integer hat) {\n ...\n }\n\n @KafkaHandler\n public void delete(@Payload(required = false) KafkaNull nul, @Header(KafkaHeaders.RECEIVED_MESSAGE_KEY) int key) {\n ...\n }\n\n}\n')])])]),a("p",[e._v("Note that the argument is "),a("code",[e._v("null")]),e._v(", not "),a("code",[e._v("KafkaNull")]),e._v(".")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("See "),a("a",{attrs:{href:"#tip-assign-all-parts"}},[e._v("[tip-assign-all-parts]")]),e._v(".")])])]),e._v(" "),a("tbody")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("This feature requires the use of a "),a("code",[e._v("KafkaNullAwarePayloadArgumentResolver")]),e._v(" which the framework will configure when using the default "),a("code",[e._v("MessageHandlerMethodFactory")]),e._v("."),a("br"),e._v("When using a custom "),a("code",[e._v("MessageHandlerMethodFactory")]),e._v(", see "),a("a",{attrs:{href:"#custom-arg-resolve"}},[e._v("Adding custom "),a("code",[e._v("HandlerMethodArgumentResolver")]),e._v(" to "),a("code",[e._v("@KafkaListener")])]),e._v(".")])])]),e._v(" "),a("tbody")]),e._v(" "),a("h4",{attrs:{id:"_4-1-20-handling-exceptions"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_4-1-20-handling-exceptions"}},[e._v("#")]),e._v(" 4.1.20. Handling Exceptions")]),e._v(" "),a("p",[e._v("This section describes how to handle various exceptions that may arise when you use Spring for Apache Kafka.")]),e._v(" "),a("h5",{attrs:{id:"listener-error-handlers"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#listener-error-handlers"}},[e._v("#")]),e._v(" Listener Error Handlers")]),e._v(" "),a("p",[e._v("Starting with version 2.0, the "),a("code",[e._v("@KafkaListener")]),e._v(" annotation has a new attribute: "),a("code",[e._v("errorHandler")]),e._v(".")]),e._v(" "),a("p",[e._v("You can use the "),a("code",[e._v("errorHandler")]),e._v(" to provide the bean name of a "),a("code",[e._v("KafkaListenerErrorHandler")]),e._v(" implementation.\nThis functional interface has one method, as the following listing shows:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("@FunctionalInterface\npublic interface KafkaListenerErrorHandler {\n\n Object handleError(Message message, ListenerExecutionFailedException exception) throws Exception;\n\n}\n")])])]),a("p",[e._v("You have access to the spring-messaging "),a("code",[e._v("Message")]),e._v(" object produced by the message converter and the exception that was thrown by the listener, which is wrapped in a "),a("code",[e._v("ListenerExecutionFailedException")]),e._v(".\nThe error handler can throw the original or a new exception, which is thrown to the container.\nAnything returned by the error handler is ignored.")]),e._v(" "),a("p",[e._v("Starting with version 2.7, you can set the "),a("code",[e._v("rawRecordHeader")]),e._v(" property on the "),a("code",[e._v("MessagingMessageConverter")]),e._v(" and "),a("code",[e._v("BatchMessagingMessageConverter")]),e._v(" which causes the raw "),a("code",[e._v("ConsumerRecord")]),e._v(" to be added to the converted "),a("code",[e._v("Message")]),e._v(" in the "),a("code",[e._v("KafkaHeaders.RAW_DATA")]),e._v(" header.\nThis is useful, for example, if you wish to use a "),a("code",[e._v("DeadLetterPublishingRecoverer")]),e._v(" in a listener error handler.\nIt might be used in a request/reply scenario where you wish to send a failure result to the sender, after some number of retries, after capturing the failed record in a dead letter topic.")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@Bean\nKafkaListenerErrorHandler eh(DeadLetterPublishingRecoverer recoverer) {\n return (msg, ex) -> {\n if (msg.getHeaders().get(KafkaHeaders.DELIVERY_ATTEMPT, Integer.class) > 9) {\n recoverer.accept(msg.getHeaders().get(KafkaHeaders.RAW_DATA, ConsumerRecord.class), ex);\n return "FAILED";\n }\n throw ex;\n };\n}\n')])])]),a("p",[e._v("It has a sub-interface ("),a("code",[e._v("ConsumerAwareListenerErrorHandler")]),e._v(") that has access to the consumer object, through the following method:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("Object handleError(Message message, ListenerExecutionFailedException exception, Consumer consumer);\n")])])]),a("p",[e._v("If your error handler implements this interface, you can, for example, adjust the offsets accordingly.\nFor example, to reset the offset to replay the failed message, you could do something like the following:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("@Bean\npublic ConsumerAwareListenerErrorHandler listen3ErrorHandler() {\n return (m, e, c) -> {\n this.listen3Exception = e;\n MessageHeaders headers = m.getHeaders();\n c.seek(new org.apache.kafka.common.TopicPartition(\n headers.get(KafkaHeaders.RECEIVED_TOPIC, String.class),\n headers.get(KafkaHeaders.RECEIVED_PARTITION_ID, Integer.class)),\n headers.get(KafkaHeaders.OFFSET, Long.class));\n return null;\n };\n}\n")])])]),a("p",[e._v("Similarly, you could do something like the following for a batch listener:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("@Bean\npublic ConsumerAwareListenerErrorHandler listen10ErrorHandler() {\n return (m, e, c) -> {\n this.listen10Exception = e;\n MessageHeaders headers = m.getHeaders();\n List topics = headers.get(KafkaHeaders.RECEIVED_TOPIC, List.class);\n List partitions = headers.get(KafkaHeaders.RECEIVED_PARTITION_ID, List.class);\n List offsets = headers.get(KafkaHeaders.OFFSET, List.class);\n Map offsetsToReset = new HashMap<>();\n for (int i = 0; i < topics.size(); i++) {\n int index = i;\n offsetsToReset.compute(new TopicPartition(topics.get(i), partitions.get(i)),\n (k, v) -> v == null ? offsets.get(index) : Math.min(v, offsets.get(index)));\n }\n offsetsToReset.forEach((k, v) -> c.seek(k, v));\n return null;\n };\n}\n")])])]),a("p",[e._v("This resets each topic/partition in the batch to the lowest offset in the batch.")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("The preceding two examples are simplistic implementations, and you would probably want more checking in the error handler.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("h5",{attrs:{id:"container-error-handlers"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#container-error-handlers"}},[e._v("#")]),e._v(" Container Error Handlers")]),e._v(" "),a("p",[e._v("Starting with version 2.8, the legacy "),a("code",[e._v("ErrorHandler")]),e._v(" and "),a("code",[e._v("BatchErrorHandler")]),e._v(" interfaces have been superceded by a new "),a("code",[e._v("CommonErrorHandler")]),e._v(".\nThese error handlers can handle errors for both record and batch listeners, allowing a single listener container factory to create containers for both types of listener."),a("code",[e._v("CommonErrorHandler")]),e._v(" implementations to replace most legacy framework error handler implementations are provided and the legacy error handlers deprecated.\nThe legacy interfaces are still supported by listener containers and listener container factories; they will be deprecated in a future release.")]),e._v(" "),a("p",[e._v("When transactions are being used, no error handlers are configured, by default, so that the exception will roll back the transaction.\nError handling for transactional containers are handled by the "),a("a",{attrs:{href:"#after-rollback"}},[a("code",[e._v("AfterRollbackProcessor")])]),e._v(".\nIf you provide a custom error handler when using transactions, it must throw an exception if you want the transaction rolled back.")]),e._v(" "),a("p",[e._v("This interface has a default method "),a("code",[e._v("isAckAfterHandle()")]),e._v(" which is called by the container to determine whether the offset(s) should be committed if the error handler returns without throwing an exception; it returns true by default.")]),e._v(" "),a("p",[e._v('Typically, the error handlers provided by the framework will throw an exception when the error is not "handled" (e.g. after performing a seek operation).\nBy default, such exceptions are logged by the container at '),a("code",[e._v("ERROR")]),e._v(" level.\nAll of the framework error handlers extend "),a("code",[e._v("KafkaExceptionLogLevelAware")]),e._v(" which allows you to control the level at which these exceptions are logged.")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("/**\n * Set the level at which the exception thrown by this handler is logged.\n * @param logLevel the level (default ERROR).\n */\npublic void setLogLevel(KafkaException.Level logLevel) {\n ...\n}\n")])])]),a("p",[e._v("You can specify a global error handler to be used for all listeners in the container factory.\nThe following example shows how to do so:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("@Bean\npublic KafkaListenerContainerFactory>\n kafkaListenerContainerFactory() {\n ConcurrentKafkaListenerContainerFactory factory =\n new ConcurrentKafkaListenerContainerFactory<>();\n ...\n factory.setCommonErrorHandler(myErrorHandler);\n ...\n return factory;\n}\n")])])]),a("p",[e._v("By default, if an annotated listener method throws an exception, it is thrown to the container, and the message is handled according to the container configuration.")]),e._v(" "),a("p",[e._v("The container commits any pending offset commits before calling the error handler.")]),e._v(" "),a("p",[e._v("If you are using Spring Boot, you simply need to add the error handler as a "),a("code",[e._v("@Bean")]),e._v(" and Boot will add it to the auto-configured factory.")]),e._v(" "),a("h5",{attrs:{id:"defaulterrorhandler"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#defaulterrorhandler"}},[e._v("#")]),e._v(" DefaultErrorHandler")]),e._v(" "),a("p",[e._v("This new error handler replaces the "),a("code",[e._v("SeekToCurrentErrorHandler")]),e._v(" and "),a("code",[e._v("RecoveringBatchErrorHandler")]),e._v(", which have been the default error handlers for several releases now.\nOne difference is that the fallback behavior for batch listeners (when an exception other than a "),a("code",[e._v("BatchListenerFailedException")]),e._v(" is thrown) is the equivalent of the "),a("a",{attrs:{href:"#retrying-batch-eh"}},[e._v("Retrying Complete Batches")]),e._v(".")]),e._v(" "),a("p",[e._v("The error handler can recover (skip) a record that keeps failing.\nBy default, after ten failures, the failed record is logged (at the "),a("code",[e._v("ERROR")]),e._v(" level).\nYou can configure the handler with a custom recoverer ("),a("code",[e._v("BiConsumer")]),e._v(") and a "),a("code",[e._v("BackOff")]),e._v(" that controls the delivery attempts and delays between each.\nUsing a "),a("code",[e._v("FixedBackOff")]),e._v(" with "),a("code",[e._v("FixedBackOff.UNLIMITED_ATTEMPTS")]),e._v(" causes (effectively) infinite retries.\nThe following example configures recovery after three tries:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("DefaultErrorHandler errorHandler =\n new DefaultErrorHandler((record, exception) -> {\n // recover after 3 failures, with no back off - e.g. send to a dead-letter topic\n }, new FixedBackOff(0L, 2L));\n")])])]),a("p",[e._v("To configure the listener container with a customized instance of this handler, add it to the container factory.")]),e._v(" "),a("p",[e._v("For example, with the "),a("code",[e._v("@KafkaListener")]),e._v(" container factory, you can add "),a("code",[e._v("DefaultErrorHandler")]),e._v(" as follows:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("@Bean\npublic ConcurrentKafkaListenerContainerFactory kafkaListenerContainerFactory() {\n ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory();\n factory.setConsumerFactory(consumerFactory());\n factory.getContainerProperties().setAckOnError(false);\n factory.getContainerProperties().setAckMode(AckMode.RECORD);\n factory.setCommonErrorHandler(new DefaultErrorHandler(new FixedBackOff(1000L, 2L)));\n return factory;\n}\n")])])]),a("p",[e._v("For a record listener, this will retry a delivery up to 2 times (3 delivery attempts) with a back off of 1 second, instead of the default configuration ("),a("code",[e._v("FixedBackOff(0L, 9)")]),e._v(").\nFailures are simply logged after retries are exhausted.")]),e._v(" "),a("p",[e._v("As an example; if the "),a("code",[e._v("poll")]),e._v(" returns six records (two from each partition 0, 1, 2) and the listener throws an exception on the fourth record, the container acknowledges the first three messages by committing their offsets.\nThe "),a("code",[e._v("DefaultErrorHandler")]),e._v(" seeks to offset 1 for partition 1 and offset 0 for partition 2.\nThe next "),a("code",[e._v("poll()")]),e._v(" returns the three unprocessed records.")]),e._v(" "),a("p",[e._v("If the "),a("code",[e._v("AckMode")]),e._v(" was "),a("code",[e._v("BATCH")]),e._v(", the container commits the offsets for the first two partitions before calling the error handler.")]),e._v(" "),a("p",[e._v("For a batch listener, the listener must throw a "),a("code",[e._v("BatchListenerFailedException")]),e._v(" indicating which records in the batch failed.")]),e._v(" "),a("p",[e._v("The sequence of events is:")]),e._v(" "),a("ul",[a("li",[a("p",[e._v("Commit the offsets of the records before the index.")])]),e._v(" "),a("li",[a("p",[e._v("If retries are not exhausted, perform seeks so that all the remaining records (including the failed record) will be redelivered.")])]),e._v(" "),a("li",[a("p",[e._v("If retries are exhausted, attempt recovery of the failed record (default log only) and perform seeks so that the remaining records (excluding the failed record) will be redelivered.\nThe recovered record’s offset is committed")])]),e._v(" "),a("li",[a("p",[e._v("If retries are exhausted and recovery fails, seeks are performed as if retries are not exhausted.")])])]),e._v(" "),a("p",[e._v("The default recoverer logs the failed record after retries are exhausted.\nYou can use a custom recoverer, or one provided by the framework such as the "),a("a",{attrs:{href:"#dead-letters"}},[a("code",[e._v("DeadLetterPublishingRecoverer")])]),e._v(".")]),e._v(" "),a("p",[e._v("When using a POJO batch listener (e.g. "),a("code",[e._v("List")]),e._v("), and you don’t have the full consumer record to add to the exception, you can just add the index of the record that failed:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@KafkaListener(id = "recovering", topics = "someTopic")\npublic void listen(List things) {\n for (int i = 0; i < records.size(); i++) {\n try {\n process(things.get(i));\n }\n catch (Exception e) {\n throw new BatchListenerFailedException("Failed to process", i);\n }\n }\n}\n')])])]),a("p",[e._v("When the container is configured with "),a("code",[e._v("AckMode.MANUAL_IMMEDIATE")]),e._v(", the error handler can be configured to commit the offset of recovered records; set the "),a("code",[e._v("commitRecovered")]),e._v(" property to "),a("code",[e._v("true")]),e._v(".")]),e._v(" "),a("p",[e._v("See also "),a("a",{attrs:{href:"#dead-letters"}},[e._v("Publishing Dead-letter Records")]),e._v(".")]),e._v(" "),a("p",[e._v("When using transactions, similar functionality is provided by the "),a("code",[e._v("DefaultAfterRollbackProcessor")]),e._v(".\nSee "),a("a",{attrs:{href:"#after-rollback"}},[e._v("After-rollback Processor")]),e._v(".")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("DefaultErrorHandler")]),e._v(" considers certain exceptions to be fatal, and retries are skipped for such exceptions; the recoverer is invoked on the first failure.\nThe exceptions that are considered fatal, by default, are:")]),e._v(" "),a("ul",[a("li",[a("p",[a("code",[e._v("DeserializationException")])])]),e._v(" "),a("li",[a("p",[a("code",[e._v("MessageConversionException")])])]),e._v(" "),a("li",[a("p",[a("code",[e._v("ConversionException")])])]),e._v(" "),a("li",[a("p",[a("code",[e._v("MethodArgumentResolutionException")])])]),e._v(" "),a("li",[a("p",[a("code",[e._v("NoSuchMethodException")])])]),e._v(" "),a("li",[a("p",[a("code",[e._v("ClassCastException")])])])]),e._v(" "),a("p",[e._v("since these exceptions are unlikely to be resolved on a retried delivery.")]),e._v(" "),a("p",[e._v("You can add more exception types to the not-retryable category, or completely replace the map of classified exceptions.\nSee the Javadocs for "),a("code",[e._v("DefaultErrorHandler.addNotRetryableException()")]),e._v(" and "),a("code",[e._v("DefaultErrorHandler.setClassifications()")]),e._v(" for more information, as well as those for the "),a("code",[e._v("spring-retry")]),e._v(" "),a("code",[e._v("BinaryExceptionClassifier")]),e._v(".")]),e._v(" "),a("p",[e._v("Here is an example that adds "),a("code",[e._v("IllegalArgumentException")]),e._v(" to the not-retryable exceptions:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("@Bean\npublic DefaultErrorHandler errorHandler(ConsumerRecordRecoverer recoverer) {\n DefaultErrorHandler handler = new DefaultErrorHandler(recoverer);\n handler.addNotRetryableExceptions(IllegalArgumentException.class);\n return handler;\n}\n")])])]),a("p",[e._v("The error handler can be configured with one or more "),a("code",[e._v("RetryListener")]),e._v(" s, receiving notifications of retry and recovery progress.")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("@FunctionalInterface\npublic interface RetryListener {\n\n void failedDelivery(ConsumerRecord record, Exception ex, int deliveryAttempt);\n\n default void recovered(ConsumerRecord record, Exception ex) {\n }\n\n default void recoveryFailed(ConsumerRecord record, Exception original, Exception failure) {\n }\n\n}\n")])])]),a("p",[e._v("See the javadocs for more information.")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("If the recoverer fails (throws an exception), the failed record will be included in the seeks."),a("br"),e._v("If the recoverer fails, the "),a("code",[e._v("BackOff")]),e._v(" will be reset by default and redeliveries will again go through the back offs before recovery is attempted again."),a("br"),e._v("To skip retries after a recovery failure, set the error handler’s "),a("code",[e._v("resetStateOnRecoveryFailure")]),e._v(" to "),a("code",[e._v("false")]),e._v(".")])])]),e._v(" "),a("tbody")]),e._v(" "),a("p",[e._v("You can provide the error handler with a "),a("code",[e._v("BiFunction, Exception, BackOff>")]),e._v(" to determine the "),a("code",[e._v("BackOff")]),e._v(" to use, based on the failed record and/or the exception:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("handler.setBackOffFunction((record, ex) -> { ... });\n")])])]),a("p",[e._v("If the function returns "),a("code",[e._v("null")]),e._v(", the handler’s default "),a("code",[e._v("BackOff")]),e._v(" will be used.")]),e._v(" "),a("p",[e._v("Set "),a("code",[e._v("resetStateOnExceptionChange")]),e._v(" to "),a("code",[e._v("true")]),e._v(" and the retry sequence will be restarted (including the selection of a new "),a("code",[e._v("BackOff")]),e._v(", if so configured) if the exception type changes between failures.\nBy default, the exception type is not considered.")]),e._v(" "),a("p",[e._v("Also see "),a("a",{attrs:{href:"#delivery-header"}},[e._v("Delivery Attempts Header")]),e._v(".")]),e._v(" "),a("h4",{attrs:{id:"_4-1-21-conversion-errors-with-batch-error-handlers"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_4-1-21-conversion-errors-with-batch-error-handlers"}},[e._v("#")]),e._v(" 4.1.21. Conversion Errors with Batch Error Handlers")]),e._v(" "),a("p",[e._v("Starting with version 2.8, batch listeners can now properly handle conversion errors, when using a "),a("code",[e._v("MessageConverter")]),e._v(" with a "),a("code",[e._v("ByteArrayDeserializer")]),e._v(", a "),a("code",[e._v("BytesDeserializer")]),e._v(" or a "),a("code",[e._v("StringDeserializer")]),e._v(", as well as a "),a("code",[e._v("DefaultErrorHandler")]),e._v(".\nWhen a conversion error occurs, the payload is set to null and a deserialization exception is added to the record headers, similar to the "),a("code",[e._v("ErrorHandlingDeserializer")]),e._v(".\nA list of "),a("code",[e._v("ConversionException")]),e._v(" s is available in the listener so the listener can throw a "),a("code",[e._v("BatchListenerFailedException")]),e._v(" indicating the first index at which a conversion exception occurred.")]),e._v(" "),a("p",[e._v("Example:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@KafkaListener(id = "test", topics = "topic")\nvoid listen(List in, @Header(KafkaHeaders.CONVERSION_FAILURES) List exceptions) {\n for (int i = 0; i < in.size(); i++) {\n Foo foo = in.get(i);\n if (foo == null && exceptions.get(i) != null) {\n throw new BatchListenerFailedException("Conversion error", exceptions.get(i), i);\n }\n process(foo);\n }\n}\n')])])]),a("h5",{attrs:{id:"retrying-complete-batches"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#retrying-complete-batches"}},[e._v("#")]),e._v(" Retrying Complete Batches")]),e._v(" "),a("p",[e._v("This is now the fallback behavior of the "),a("code",[e._v("DefaultErrorHandler")]),e._v(" for a batch listener where the listener throws an exception other than a "),a("code",[e._v("BatchListenerFailedException")]),e._v(".")]),e._v(" "),a("p",[e._v("There is no guarantee that, when a batch is redelivered, the batch has the same number of records and/or the redelivered records are in the same order.\nIt is impossible, therefore, to easily maintain retry state for a batch.\nThe "),a("code",[e._v("FallbackBatchErrorHandler")]),e._v(" takes a the following approach.\nIf a batch listener throws an exception that is not a "),a("code",[e._v("BatchListenerFailedException")]),e._v(", the retries are performed from the in-memory batch of records.\nIn order to avoid a rebalance during an extended retry sequence, the error handler pauses the consumer, polls it before sleeping for the back off, for each retry, and calls the listener again.\nIf/when retries are exhausted, the "),a("code",[e._v("ConsumerRecordRecoverer")]),e._v(" is called for each record in the batch.\nIf the recoverer throws an exception, or the thread is interrupted during its sleep, the batch of records will be redelivered on the next poll.\nBefore exiting, regardless of the outcome, the consumer is resumed.")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("This mechanism cannot be used with transactions.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("p",[e._v("While waiting for a "),a("code",[e._v("BackOff")]),e._v(" interval, the error handler will loop with a short sleep until the desired delay is reached, while checking to see if the container has been stopped, allowing the sleep to exit soon after the "),a("code",[e._v("stop()")]),e._v(" rather than causing a delay.")]),e._v(" "),a("h5",{attrs:{id:"container-stopping-error-handlers"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#container-stopping-error-handlers"}},[e._v("#")]),e._v(" Container Stopping Error Handlers")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("CommonContainerStoppingErrorHandler")]),e._v(" stops the container if the listener throws an exception.\nFor record listeners, when the "),a("code",[e._v("AckMode")]),e._v(" is "),a("code",[e._v("RECORD")]),e._v(", offsets for already processed records are committed.\nFor record listeners, when the "),a("code",[e._v("AckMode")]),e._v(" is any manual value, offsets for already acknowledged records are committed.\nFor record listeners, wWhen the "),a("code",[e._v("AckMode")]),e._v(" is "),a("code",[e._v("BATCH")]),e._v(", or for batch listeners, the entire batch is replayed when the container is restarted.")]),e._v(" "),a("p",[e._v("After the container stops, an exception that wraps the "),a("code",[e._v("ListenerExecutionFailedException")]),e._v(" is thrown.\nThis is to cause the transaction to roll back (if transactions are enabled).")]),e._v(" "),a("h5",{attrs:{id:"delegating-error-handler"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#delegating-error-handler"}},[e._v("#")]),e._v(" Delegating Error Handler")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("CommonDelegatingErrorHandler")]),e._v(" can delegate to different error handlers, depending on the exception type.\nFor example, you may wish to invoke a "),a("code",[e._v("DefaultErrorHandler")]),e._v(" for most exceptions, or a "),a("code",[e._v("CommonContainerStoppingErrorHandler")]),e._v(" for others.")]),e._v(" "),a("h5",{attrs:{id:"logging-error-handler"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#logging-error-handler"}},[e._v("#")]),e._v(" Logging Error Handler")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("CommonLoggingErrorHandler")]),e._v(" simply logs the exception; with a record listener, the remaining records from the previous poll are passed to the listener.\nFor a batch listener, all the records in the batch are logged.")]),e._v(" "),a("h5",{attrs:{id:"using-different-common-error-handlers-for-record-and-batch-listeners"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#using-different-common-error-handlers-for-record-and-batch-listeners"}},[e._v("#")]),e._v(" Using Different Common Error Handlers for Record and Batch Listeners")]),e._v(" "),a("p",[e._v("If you wish to use a different error handling strategy for record and batch listeners, the "),a("code",[e._v("CommonMixedErrorHandler")]),e._v(" is provided allowing the configuration of a specific error handler for each listener type.")]),e._v(" "),a("h5",{attrs:{id:"common-error-handler-summery"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#common-error-handler-summery"}},[e._v("#")]),e._v(" Common Error Handler Summery")]),e._v(" "),a("ul",[a("li",[a("p",[a("code",[e._v("DefaultErrorHandler")])])]),e._v(" "),a("li",[a("p",[a("code",[e._v("CommonContainerStoppingErrorHandler")])])]),e._v(" "),a("li",[a("p",[a("code",[e._v("CommonDelegatingErrorHandler")])])]),e._v(" "),a("li",[a("p",[a("code",[e._v("CommonLoggingErrorHandler")])])]),e._v(" "),a("li",[a("p",[a("code",[e._v("CommonMixedErrorHandler")])])])]),e._v(" "),a("h5",{attrs:{id:"legacy-error-handlers-and-their-replacements"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#legacy-error-handlers-and-their-replacements"}},[e._v("#")]),e._v(" Legacy Error Handlers and Their Replacements")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th",[e._v("Legacy Error Handler")]),e._v(" "),a("th",[e._v("Replacement")])])]),e._v(" "),a("tbody",[a("tr",[a("td",[a("code",[e._v("LoggingErrorHandler")])]),e._v(" "),a("td",[a("code",[e._v("CommonLoggingErrorHandler")])])]),e._v(" "),a("tr",[a("td",[a("code",[e._v("BatchLoggingErrorHandler")])]),e._v(" "),a("td",[a("code",[e._v("CommonLoggingErrorHandler")])])]),e._v(" "),a("tr",[a("td",[a("code",[e._v("ConditionalDelegatingErrorHandler")])]),e._v(" "),a("td",[a("code",[e._v("DelegatingErrorHandler")])])]),e._v(" "),a("tr",[a("td",[a("code",[e._v("ConditionalDelegatingBatchErrorHandler")])]),e._v(" "),a("td",[a("code",[e._v("DelegatingErrorHandler")])])]),e._v(" "),a("tr",[a("td",[a("code",[e._v("ContainerStoppingErrorHandler")])]),e._v(" "),a("td",[a("code",[e._v("CommonContainerStoppingErrorHandler")])])]),e._v(" "),a("tr",[a("td",[a("code",[e._v("ContainerStoppingBatchErrorHandler")])]),e._v(" "),a("td",[a("code",[e._v("CommonContainerStoppingErrorHandler")])])]),e._v(" "),a("tr",[a("td",[a("code",[e._v("SeekToCurrentErrorHandler")])]),e._v(" "),a("td",[a("code",[e._v("DefaultErrorHandler")])])]),e._v(" "),a("tr",[a("td",[a("code",[e._v("SeekToCurrentBatchErrorHandler")])]),e._v(" "),a("td",[e._v("No replacement, use "),a("code",[e._v("DefaultErrorHandler")]),e._v(" with an infinite "),a("code",[e._v("BackOff")]),e._v(".")])]),e._v(" "),a("tr",[a("td",[a("code",[e._v("RecoveringBatchErrorHandler")])]),e._v(" "),a("td",[a("code",[e._v("DefaultErrorHandler")])])]),e._v(" "),a("tr",[a("td",[a("code",[e._v("RetryingBatchErrorHandler")])]),e._v(" "),a("td",[e._v("No replacements - use "),a("code",[e._v("DefaultErrorHandler")]),e._v(" and throw an exception other than "),a("code",[e._v("BatchListenerFailedException")]),e._v(".")])])])]),e._v(" "),a("h5",{attrs:{id:"after-rollback-processor"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#after-rollback-processor"}},[e._v("#")]),e._v(" After-rollback Processor")]),e._v(" "),a("p",[e._v("When using transactions, if the listener throws an exception (and an error handler, if present, throws an exception), the transaction is rolled back.\nBy default, any unprocessed records (including the failed record) are re-fetched on the next poll.\nThis is achieved by performing "),a("code",[e._v("seek")]),e._v(" operations in the "),a("code",[e._v("DefaultAfterRollbackProcessor")]),e._v(".\nWith a batch listener, the entire batch of records is reprocessed (the container has no knowledge of which record in the batch failed).\nTo modify this behavior, you can configure the listener container with a custom "),a("code",[e._v("AfterRollbackProcessor")]),e._v(".\nFor example, with a record-based listener, you might want to keep track of the failed record and give up after some number of attempts, perhaps by publishing it to a dead-letter topic.")]),e._v(" "),a("p",[e._v("Starting with version 2.2, the "),a("code",[e._v("DefaultAfterRollbackProcessor")]),e._v(" can now recover (skip) a record that keeps failing.\nBy default, after ten failures, the failed record is logged (at the "),a("code",[e._v("ERROR")]),e._v(" level).\nYou can configure the processor with a custom recoverer ("),a("code",[e._v("BiConsumer")]),e._v(") and maximum failures.\nSetting the "),a("code",[e._v("maxFailures")]),e._v(" property to a negative number causes infinite retries.\nThe following example configures recovery after three tries:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("AfterRollbackProcessor processor =\n new DefaultAfterRollbackProcessor((record, exception) -> {\n // recover after 3 failures, with no back off - e.g. send to a dead-letter topic\n }, new FixedBackOff(0L, 2L));\n")])])]),a("p",[e._v("When you do not use transactions, you can achieve similar functionality by configuring a "),a("code",[e._v("DefaultErrorHandler")]),e._v(".\nSee "),a("a",{attrs:{href:"#error-handlers"}},[e._v("Container Error Handlers")]),e._v(".")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("Recovery is not possible with a batch listener, since the framework has no knowledge about which record in the batch keeps failing."),a("br"),e._v("In such cases, the application listener must handle a record that keeps failing.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("p",[e._v("See also "),a("a",{attrs:{href:"#dead-letters"}},[e._v("Publishing Dead-letter Records")]),e._v(".")]),e._v(" "),a("p",[e._v("Starting with version 2.2.5, the "),a("code",[e._v("DefaultAfterRollbackProcessor")]),e._v(" can be invoked in a new transaction (started after the failed transaction rolls back).\nThen, if you are using the "),a("code",[e._v("DeadLetterPublishingRecoverer")]),e._v(" to publish a failed record, the processor will send the recovered record’s offset in the original topic/partition to the transaction.\nTo enable this feature, set the "),a("code",[e._v("commitRecovered")]),e._v(" and "),a("code",[e._v("kafkaTemplate")]),e._v(" properties on the "),a("code",[e._v("DefaultAfterRollbackProcessor")]),e._v(".")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("If the recoverer fails (throws an exception), the failed record will be included in the seeks."),a("br"),e._v("Starting with version 2.5.5, if the recoverer fails, the "),a("code",[e._v("BackOff")]),e._v(" will be reset by default and redeliveries will again go through the back offs before recovery is attempted again."),a("br"),e._v("With earlier versions, the "),a("code",[e._v("BackOff")]),e._v(" was not reset and recovery was re-attempted on the next failure."),a("br"),e._v("To revert to the previous behavior, set the processor’s "),a("code",[e._v("resetStateOnRecoveryFailure")]),e._v(" property to "),a("code",[e._v("false")]),e._v(".")])])]),e._v(" "),a("tbody")]),e._v(" "),a("p",[e._v("Starting with version 2.6, you can now provide the processor with a "),a("code",[e._v("BiFunction, Exception, BackOff>")]),e._v(" to determine the "),a("code",[e._v("BackOff")]),e._v(" to use, based on the failed record and/or the exception:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("handler.setBackOffFunction((record, ex) -> { ... });\n")])])]),a("p",[e._v("If the function returns "),a("code",[e._v("null")]),e._v(", the processor’s default "),a("code",[e._v("BackOff")]),e._v(" will be used.")]),e._v(" "),a("p",[e._v("Starting with version 2.6.3, set "),a("code",[e._v("resetStateOnExceptionChange")]),e._v(" to "),a("code",[e._v("true")]),e._v(" and the retry sequence will be restarted (including the selection of a new "),a("code",[e._v("BackOff")]),e._v(", if so configured) if the exception type changes between failures.\nBy default, the exception type is not considered.")]),e._v(" "),a("p",[e._v("Starting with version 2.3.1, similar to the "),a("code",[e._v("DefaultErrorHandler")]),e._v(", the "),a("code",[e._v("DefaultAfterRollbackProcessor")]),e._v(" considers certain exceptions to be fatal, and retries are skipped for such exceptions; the recoverer is invoked on the first failure.\nThe exceptions that are considered fatal, by default, are:")]),e._v(" "),a("ul",[a("li",[a("p",[a("code",[e._v("DeserializationException")])])]),e._v(" "),a("li",[a("p",[a("code",[e._v("MessageConversionException")])])]),e._v(" "),a("li",[a("p",[a("code",[e._v("ConversionException")])])]),e._v(" "),a("li",[a("p",[a("code",[e._v("MethodArgumentResolutionException")])])]),e._v(" "),a("li",[a("p",[a("code",[e._v("NoSuchMethodException")])])]),e._v(" "),a("li",[a("p",[a("code",[e._v("ClassCastException")])])])]),e._v(" "),a("p",[e._v("since these exceptions are unlikely to be resolved on a retried delivery.")]),e._v(" "),a("p",[e._v("You can add more exception types to the not-retryable category, or completely replace the map of classified exceptions.\nSee the Javadocs for "),a("code",[e._v("DefaultAfterRollbackProcessor.setClassifications()")]),e._v(" for more information, as well as those for the "),a("code",[e._v("spring-retry")]),e._v(" "),a("code",[e._v("BinaryExceptionClassifier")]),e._v(".")]),e._v(" "),a("p",[e._v("Here is an example that adds "),a("code",[e._v("IllegalArgumentException")]),e._v(" to the not-retryable exceptions:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("@Bean\npublic DefaultAfterRollbackProcessor errorHandler(BiConsumer, Exception> recoverer) {\n DefaultAfterRollbackProcessor processor = new DefaultAfterRollbackProcessor(recoverer);\n processor.addNotRetryableException(IllegalArgumentException.class);\n return processor;\n}\n")])])]),a("p",[e._v("Also see "),a("a",{attrs:{href:"#delivery-header"}},[e._v("Delivery Attempts Header")]),e._v(".")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("With current "),a("code",[e._v("kafka-clients")]),e._v(", the container cannot detect whether a "),a("code",[e._v("ProducerFencedException")]),e._v(" is caused by a rebalance or if the producer’s "),a("code",[e._v("transactional.id")]),e._v(" has been revoked due to a timeout or expiry."),a("br"),e._v("Because, in most cases, it is caused by a rebalance, the container does not call the "),a("code",[e._v("AfterRollbackProcessor")]),e._v(" (because it’s not appropriate to seek the partitions because we no longer are assigned them)."),a("br"),e._v('If you ensure the timeout is large enough to process each transaction and periodically perform an "empty" transaction (e.g. via a '),a("code",[e._v("ListenerContainerIdleEvent")]),e._v(") you can avoid fencing due to timeout and expiry."),a("br"),e._v("Or, you can set the "),a("code",[e._v("stopContainerWhenFenced")]),e._v(" container property to "),a("code",[e._v("true")]),e._v(" and the container will stop, avoiding the loss of records."),a("br"),e._v("You can consume a "),a("code",[e._v("ConsumerStoppedEvent")]),e._v(" and check the "),a("code",[e._v("Reason")]),e._v(" property for "),a("code",[e._v("FENCED")]),e._v(" to detect this condition."),a("br"),e._v("Since the event also has a reference to the container, you can restart the container using this event.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("p",[e._v("Starting with version 2.7, while waiting for a "),a("code",[e._v("BackOff")]),e._v(" interval, the error handler will loop with a short sleep until the desired delay is reached, while checking to see if the container has been stopped, allowing the sleep to exit soon after the "),a("code",[e._v("stop()")]),e._v(" rather than causing a delay.")]),e._v(" "),a("p",[e._v("Starting with version 2.7, the processor can be configured with one or more "),a("code",[e._v("RetryListener")]),e._v(" s, receiving notifications of retry and recovery progress.")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("@FunctionalInterface\npublic interface RetryListener {\n\n void failedDelivery(ConsumerRecord record, Exception ex, int deliveryAttempt);\n\n default void recovered(ConsumerRecord record, Exception ex) {\n }\n\n default void recoveryFailed(ConsumerRecord record, Exception original, Exception failure) {\n }\n\n}\n")])])]),a("p",[e._v("See the javadocs for more information.")]),e._v(" "),a("h5",{attrs:{id:"delivery-attempts-header"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#delivery-attempts-header"}},[e._v("#")]),e._v(" Delivery Attempts Header")]),e._v(" "),a("p",[e._v("The following applies to record listeners only, not batch listeners.")]),e._v(" "),a("p",[e._v("Starting with version 2.5, when using an "),a("code",[e._v("ErrorHandler")]),e._v(" or "),a("code",[e._v("AfterRollbackProcessor")]),e._v(" that implements "),a("code",[e._v("DeliveryAttemptAware")]),e._v(", it is possible to enable the addition of the "),a("code",[e._v("KafkaHeaders.DELIVERY_ATTEMPT")]),e._v(" header ("),a("code",[e._v("kafka_deliveryAttempt")]),e._v(") to the record.\nThe value of this header is an incrementing integer starting at 1.\nWhen receiving a raw "),a("code",[e._v("ConsumerRecord")]),e._v(" the integer is in a "),a("code",[e._v("byte[4]")]),e._v(".")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("int delivery = ByteBuffer.wrap(record.headers()\n .lastHeader(KafkaHeaders.DELIVERY_ATTEMPT).value())\n .getInt()\n")])])]),a("p",[e._v("When using "),a("code",[e._v("@KafkaListener")]),e._v(" with the "),a("code",[e._v("DefaultKafkaHeaderMapper")]),e._v(" or "),a("code",[e._v("SimpleKafkaHeaderMapper")]),e._v(", it can be obtained by adding "),a("code",[e._v("@Header(KafkaHeaders.DELIVERY_ATTEMPT) int delivery")]),e._v(" as a parameter to the listener method.")]),e._v(" "),a("p",[e._v("To enable population of this header, set the container property "),a("code",[e._v("deliveryAttemptHeader")]),e._v(" to "),a("code",[e._v("true")]),e._v(".\nIt is disabled by default to avoid the (small) overhead of looking up the state for each record and adding the header.")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("DefaultErrorHandler")]),e._v(" and "),a("code",[e._v("DefaultAfterRollbackProcessor")]),e._v(" support this feature.")]),e._v(" "),a("h5",{attrs:{id:"publishing-dead-letter-records"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#publishing-dead-letter-records"}},[e._v("#")]),e._v(" Publishing Dead-letter Records")]),e._v(" "),a("p",[e._v("You can configure the "),a("code",[e._v("DefaultErrorHandler")]),e._v(" and "),a("code",[e._v("DefaultAfterRollbackProcessor")]),e._v(" with a record recoverer when the maximum number of failures is reached for a record.\nThe framework provides the "),a("code",[e._v("DeadLetterPublishingRecoverer")]),e._v(", which publishes the failed message to another topic.\nThe recoverer requires a "),a("code",[e._v("KafkaTemplate")]),e._v(", which is used to send the record.\nYou can also, optionally, configure it with a "),a("code",[e._v("BiFunction, Exception, TopicPartition>")]),e._v(", which is called to resolve the destination topic and partition.")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("By default, the dead-letter record is sent to a topic named "),a("code",[e._v(".DLT")]),e._v(" (the original topic name suffixed with "),a("code",[e._v(".DLT")]),e._v(") and to the same partition as the original record."),a("br"),e._v("Therefore, when you use the default resolver, the dead-letter topic "),a("strong",[e._v("must have at least as many partitions as the original topic.")])])])]),e._v(" "),a("tbody")]),e._v(" "),a("p",[e._v("If the returned "),a("code",[e._v("TopicPartition")]),e._v(" has a negative partition, the partition is not set in the "),a("code",[e._v("ProducerRecord")]),e._v(", so the partition is selected by Kafka.\nStarting with version 2.2.4, any "),a("code",[e._v("ListenerExecutionFailedException")]),e._v(" (thrown, for example, when an exception is detected in a "),a("code",[e._v("@KafkaListener")]),e._v(" method) is enhanced with the "),a("code",[e._v("groupId")]),e._v(" property.\nThis allows the destination resolver to use this, in addition to the information in the "),a("code",[e._v("ConsumerRecord")]),e._v(" to select the dead letter topic.")]),e._v(" "),a("p",[e._v("The following example shows how to wire a custom destination resolver:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('DeadLetterPublishingRecoverer recoverer = new DeadLetterPublishingRecoverer(template,\n (r, e) -> {\n if (e instanceof FooException) {\n return new TopicPartition(r.topic() + ".Foo.failures", r.partition());\n }\n else {\n return new TopicPartition(r.topic() + ".other.failures", r.partition());\n }\n });\nErrorHandler errorHandler = new DefaultErrorHandler(recoverer, new FixedBackOff(0L, 2L));\n')])])]),a("p",[e._v("The record sent to the dead-letter topic is enhanced with the following headers:")]),e._v(" "),a("ul",[a("li",[a("p",[a("code",[e._v("KafkaHeaders.DLT_EXCEPTION_FQCN")]),e._v(": The Exception class name (generally a "),a("code",[e._v("ListenerExecutionFailedException")]),e._v(", but can be others).")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("KafkaHeaders.DLT_EXCEPTION_CAUSE_FQCN")]),e._v(": The Exception cause class name, if present (since version 2.8).")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("KafkaHeaders.DLT_EXCEPTION_STACKTRACE")]),e._v(": The Exception stack trace.")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("KafkaHeaders.DLT_EXCEPTION_MESSAGE")]),e._v(": The Exception message.")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("KafkaHeaders.DLT_KEY_EXCEPTION_FQCN")]),e._v(": The Exception class name (key deserialization errors only).")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("KafkaHeaders.DLT_KEY_EXCEPTION_STACKTRACE")]),e._v(": The Exception stack trace (key deserialization errors only).")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("KafkaHeaders.DLT_KEY_EXCEPTION_MESSAGE")]),e._v(": The Exception message (key deserialization errors only).")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("KafkaHeaders.DLT_ORIGINAL_TOPIC")]),e._v(": The original topic.")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("KafkaHeaders.DLT_ORIGINAL_PARTITION")]),e._v(": The original partition.")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("KafkaHeaders.DLT_ORIGINAL_OFFSET")]),e._v(": The original offset.")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("KafkaHeaders.DLT_ORIGINAL_TIMESTAMP")]),e._v(": The original timestamp.")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("KafkaHeaders.DLT_ORIGINAL_TIMESTAMP_TYPE")]),e._v(": The original timestamp type.")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("KafkaHeaders.DLT_ORIGINAL_CONSUMER_GROUP")]),e._v(": The original consumer group that failed to process the record (since version 2.8).")])])]),e._v(" "),a("p",[e._v("Key exceptions are only caused by "),a("code",[e._v("DeserializationException")]),e._v(" s so there is no "),a("code",[e._v("DLT_KEY_EXCEPTION_CAUSE_FQCN")]),e._v(".")]),e._v(" "),a("p",[e._v("There are two mechanisms to add more headers.")]),e._v(" "),a("ol",[a("li",[a("p",[e._v("Subclass the recoverer and override "),a("code",[e._v("createProducerRecord()")]),e._v(" - call "),a("code",[e._v("super.createProducerRecord()")]),e._v(" and add more headers.")])]),e._v(" "),a("li",[a("p",[e._v("Provide a "),a("code",[e._v("BiFunction")]),e._v(" to receive the consumer record and exception, returning a "),a("code",[e._v("Headers")]),e._v(" object; headers from there will be copied to the final producer record.\nUse "),a("code",[e._v("setHeadersFunction()")]),e._v(" to set the "),a("code",[e._v("BiFunction")]),e._v(".")])])]),e._v(" "),a("p",[e._v("The second is simpler to implement but the first has more information available, including the already assembled standard headers.")]),e._v(" "),a("p",[e._v("Starting with version 2.3, when used in conjunction with an "),a("code",[e._v("ErrorHandlingDeserializer")]),e._v(", the publisher will restore the record "),a("code",[e._v("value()")]),e._v(", in the dead-letter producer record, to the original value that failed to be deserialized.\nPreviously, the "),a("code",[e._v("value()")]),e._v(" was null and user code had to decode the "),a("code",[e._v("DeserializationException")]),e._v(" from the message headers.\nIn addition, you can provide multiple "),a("code",[e._v("KafkaTemplate")]),e._v(" s to the publisher; this might be needed, for example, if you want to publish the "),a("code",[e._v("byte[]")]),e._v(" from a "),a("code",[e._v("DeserializationException")]),e._v(", as well as values using a different serializer from records that were deserialized successfully.\nHere is an example of configuring the publisher with "),a("code",[e._v("KafkaTemplate")]),e._v(" s that use a "),a("code",[e._v("String")]),e._v(" and "),a("code",[e._v("byte[]")]),e._v(" serializer:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("@Bean\npublic DeadLetterPublishingRecoverer publisher(KafkaTemplate stringTemplate,\n KafkaTemplate bytesTemplate) {\n\n Map, KafkaTemplate> templates = new LinkedHashMap<>();\n templates.put(String.class, stringTemplate);\n templates.put(byte[].class, bytesTemplate);\n return new DeadLetterPublishingRecoverer(templates);\n}\n")])])]),a("p",[e._v("The publisher uses the map keys to locate a template that is suitable for the "),a("code",[e._v("value()")]),e._v(" about to be published.\nA "),a("code",[e._v("LinkedHashMap")]),e._v(" is recommended so that the keys are examined in order.")]),e._v(" "),a("p",[e._v("When publishing "),a("code",[e._v("null")]),e._v(" values, when there are multiple templates, the recoverer will look for a template for the "),a("code",[e._v("Void")]),e._v(" class; if none is present, the first template from the "),a("code",[e._v("values().iterator()")]),e._v(" will be used.")]),e._v(" "),a("p",[e._v("Since 2.7 you can use the "),a("code",[e._v("setFailIfSendResultIsError")]),e._v(" method so that an exception is thrown when message publishing fails.\nYou can also set a timeout for the verification of the sender success with "),a("code",[e._v("setWaitForSendResultTimeout")]),e._v(".")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("If the recoverer fails (throws an exception), the failed record will be included in the seeks."),a("br"),e._v("Starting with version 2.5.5, if the recoverer fails, the "),a("code",[e._v("BackOff")]),e._v(" will be reset by default and redeliveries will again go through the back offs before recovery is attempted again."),a("br"),e._v("With earlier versions, the "),a("code",[e._v("BackOff")]),e._v(" was not reset and recovery was re-attempted on the next failure."),a("br"),e._v("To revert to the previous behavior, set the error handler’s "),a("code",[e._v("resetStateOnRecoveryFailure")]),e._v(" property to "),a("code",[e._v("false")]),e._v(".")])])]),e._v(" "),a("tbody")]),e._v(" "),a("p",[e._v("Starting with version 2.6.3, set "),a("code",[e._v("resetStateOnExceptionChange")]),e._v(" to "),a("code",[e._v("true")]),e._v(" and the retry sequence will be restarted (including the selection of a new "),a("code",[e._v("BackOff")]),e._v(", if so configured) if the exception type changes between failures.\nBy default, the exception type is not considered.")]),e._v(" "),a("p",[e._v("Starting with version 2.3, the recoverer can also be used with Kafka Streams - see "),a("a",{attrs:{href:"#streams-deser-recovery"}},[e._v("Recovery from Deserialization Exceptions")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("ErrorHandlingDeserializer")]),e._v(" adds the deserialization exception(s) in headers "),a("code",[e._v("ErrorHandlingDeserializer.VALUE_DESERIALIZER_EXCEPTION_HEADER")]),e._v(" and "),a("code",[e._v("ErrorHandlingDeserializer.KEY_DESERIALIZER_EXCEPTION_HEADER")]),e._v(" (using java serialization).\nBy default, these headers are not retained in the message published to the dead letter topic.\nStarting with version 2.7, if both the key and value fail deserialization, the original values of both are populated in the record sent to the DLT.")]),e._v(" "),a("p",[e._v("If incoming records are dependent on each other, but may arrive out of order, it may be useful to republish a failed record to the tail of the original topic (for some number of times), instead of sending it directly to the dead letter topic.\nSee "),a("a",{attrs:{href:"https://stackoverflow.com/questions/64646996",target:"_blank",rel:"noopener noreferrer"}},[e._v("this Stack Overflow Question"),a("OutboundLink")],1),e._v(" for an example.")]),e._v(" "),a("p",[e._v("The following error handler configuration will do exactly that:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@Bean\npublic ErrorHandler eh(KafkaOperations template) {\n return new DefaultErrorHandler(new DeadLetterPublishingRecoverer(template,\n (rec, ex) -> {\n org.apache.kafka.common.header.Header retries = rec.headers().lastHeader("retries");\n if (retries == null) {\n retries = new RecordHeader("retries", new byte[] { 1 });\n rec.headers().add(retries);\n }\n else {\n retries.value()[0]++;\n }\n return retries.value()[0] > 5\n ? new TopicPartition("topic.DLT", rec.partition())\n : new TopicPartition("topic", rec.partition());\n }), new FixedBackOff(0L, 0L));\n}\n')])])]),a("p",[e._v("Starting with version 2.7, the recoverer checks that the partition selected by the destination resolver actually exists.\nIf the partition is not present, the partition in the "),a("code",[e._v("ProducerRecord")]),e._v(" is set to "),a("code",[e._v("null")]),e._v(", allowing the "),a("code",[e._v("KafkaProducer")]),e._v(" to select the partition.\nYou can disable this check by setting the "),a("code",[e._v("verifyPartition")]),e._v(" property to "),a("code",[e._v("false")]),e._v(".")]),e._v(" "),a("h5",{attrs:{id:"managing-dead-letter-record-headers"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#managing-dead-letter-record-headers"}},[e._v("#")]),e._v(" Managing Dead Letter Record Headers")]),e._v(" "),a("p",[e._v("Referring to "),a("a",{attrs:{href:"#dead-letters"}},[e._v("Publishing Dead-letter Records")]),e._v(" above, the "),a("code",[e._v("DeadLetterPublishingRecoverer")]),e._v(" has two properties used to manage headers when those headers already exist (such as when reprocessing a dead letter record that failed, including when using "),a("a",{attrs:{href:"#retry-topic"}},[e._v("Non-Blocking Retries")]),e._v(").")]),e._v(" "),a("ul",[a("li",[a("p",[a("code",[e._v("appendOriginalHeaders")]),e._v(" (default "),a("code",[e._v("true")]),e._v(")")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("stripPreviousExceptionHeaders")]),e._v(" (default "),a("code",[e._v("true")]),e._v(" since version 2.8)")])])]),e._v(" "),a("p",[e._v('Apache Kafka supports multiple headers with the same name; to obtain the "latest" value, you can use '),a("code",[e._v("headers.lastHeader(headerName)")]),e._v("; to get an iterator over multiple headers, use "),a("code",[e._v("headers.headers(headerName).iterator()")]),e._v(".")]),e._v(" "),a("p",[e._v("When repeatedly republishing a failed record, these headers can grow (and eventually cause publication to fail due to a "),a("code",[e._v("RecordTooLargeException")]),e._v("); this is especially true for the exception headers and particularly for the stack trace headers.")]),e._v(" "),a("p",[e._v("The reason for the two properties is because, while you might want to retain only the last exception information, you might want to retain the history of which topic(s) the record passed through for each failure.")]),e._v(" "),a("p",[a("code",[e._v("appendOriginalHeaders")]),e._v(" is applied to all headers named "),a("code",[e._v("**ORIGINAL**")]),e._v(" while "),a("code",[e._v("stripPreviousExceptionHeaders")]),e._v(" is applied to all headers named "),a("code",[e._v("**EXCEPTION**")]),e._v(".")]),e._v(" "),a("p",[e._v("Also see "),a("a",{attrs:{href:"#retry-headers"}},[e._v("Failure Header Management")]),e._v(" with "),a("a",{attrs:{href:"#retry-topic"}},[e._v("Non-Blocking Retries")]),e._v(".")]),e._v(" "),a("h5",{attrs:{id:"exponentialbackoffwithmaxretries-implementation"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#exponentialbackoffwithmaxretries-implementation"}},[e._v("#")]),e._v(" "),a("code",[e._v("ExponentialBackOffWithMaxRetries")]),e._v(" Implementation")]),e._v(" "),a("p",[e._v("Spring Framework provides a number of "),a("code",[e._v("BackOff")]),e._v(" implementations.\nBy default, the "),a("code",[e._v("ExponentialBackOff")]),e._v(" will retry indefinitely; to give up after some number of retry attempts requires calculating the "),a("code",[e._v("maxElapsedTime")]),e._v(".\nSince version 2.7.3, Spring for Apache Kafka provides the "),a("code",[e._v("ExponentialBackOffWithMaxRetries")]),e._v(" which is a subclass that receives the "),a("code",[e._v("maxRetries")]),e._v(" property and automatically calculates the "),a("code",[e._v("maxElapsedTime")]),e._v(", which is a little more convenient.")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("@Bean\nDefaultErrorHandler handler() {\n ExponentialBackOffWithMaxRetries bo = new ExponentialBackOffWithMaxRetries(6);\n bo.setInitialInterval(1_000L);\n bo.setMultiplier(2.0);\n bo.setMaxInterval(10_000L);\n return new DefaultErrorHandler(myRecoverer, bo);\n}\n")])])]),a("p",[e._v("This will retry after "),a("code",[e._v("1, 2, 4, 8, 10, 10")]),e._v(" seconds, before calling the recoverer.")]),e._v(" "),a("h4",{attrs:{id:"_4-1-22-jaas-and-kerberos"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_4-1-22-jaas-and-kerberos"}},[e._v("#")]),e._v(" 4.1.22. JAAS and Kerberos")]),e._v(" "),a("p",[e._v("Starting with version 2.0, a "),a("code",[e._v("KafkaJaasLoginModuleInitializer")]),e._v(" class has been added to assist with Kerberos configuration.\nYou can add this bean, with the desired configuration, to your application context.\nThe following example configures such a bean:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@Bean\npublic KafkaJaasLoginModuleInitializer jaasConfig() throws IOException {\n KafkaJaasLoginModuleInitializer jaasConfig = new KafkaJaasLoginModuleInitializer();\n jaasConfig.setControlFlag("REQUIRED");\n Map options = new HashMap<>();\n options.put("useKeyTab", "true");\n options.put("storeKey", "true");\n options.put("keyTab", "/etc/security/keytabs/kafka_client.keytab");\n options.put("principal", "[email protected]");\n jaasConfig.setOptions(options);\n return jaasConfig;\n}\n')])])]),a("h3",{attrs:{id:"_4-2-apache-kafka-streams-support"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_4-2-apache-kafka-streams-support"}},[e._v("#")]),e._v(" 4.2. Apache Kafka Streams Support")]),e._v(" "),a("p",[e._v("Starting with version 1.1.4, Spring for Apache Kafka provides first-class support for "),a("a",{attrs:{href:"https://kafka.apache.org/documentation/streams",target:"_blank",rel:"noopener noreferrer"}},[e._v("Kafka Streams"),a("OutboundLink")],1),e._v(".\nTo use it from a Spring application, the "),a("code",[e._v("kafka-streams")]),e._v(" jar must be present on classpath.\nIt is an optional dependency of the Spring for Apache Kafka project and is not downloaded transitively.")]),e._v(" "),a("h4",{attrs:{id:"_4-2-1-basics"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_4-2-1-basics"}},[e._v("#")]),e._v(" 4.2.1. Basics")]),e._v(" "),a("p",[e._v("The reference Apache Kafka Streams documentation suggests the following way of using the API:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("// Use the builders to define the actual processing topology, e.g. to specify\n// from which input topics to read, which stream operations (filter, map, etc.)\n// should be called, and so on.\n\nStreamsBuilder builder = ...; // when using the Kafka Streams DSL\n\n// Use the configuration to tell your application where the Kafka cluster is,\n// which serializers/deserializers to use by default, to specify security settings,\n// and so on.\nStreamsConfig config = ...;\n\nKafkaStreams streams = new KafkaStreams(builder, config);\n\n// Start the Kafka Streams instance\nstreams.start();\n\n// Stop the Kafka Streams instance\nstreams.close();\n")])])]),a("p",[e._v("So, we have two main components:")]),e._v(" "),a("ul",[a("li",[a("p",[a("code",[e._v("StreamsBuilder")]),e._v(": With an API to build "),a("code",[e._v("KStream")]),e._v(" (or "),a("code",[e._v("KTable")]),e._v(") instances.")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("KafkaStreams")]),e._v(": To manage the lifecycle of those instances.")])])]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("All "),a("code",[e._v("KStream")]),e._v(" instances exposed to a "),a("code",[e._v("KafkaStreams")]),e._v(" instance by a single "),a("code",[e._v("StreamsBuilder")]),e._v(" are started and stopped at the same time, even if they have different logic."),a("br"),e._v("In other words, all streams defined by a "),a("code",[e._v("StreamsBuilder")]),e._v(" are tied with a single lifecycle control."),a("br"),e._v("Once a "),a("code",[e._v("KafkaStreams")]),e._v(" instance has been closed by "),a("code",[e._v("streams.close()")]),e._v(", it cannot be restarted."),a("br"),e._v("Instead, a new "),a("code",[e._v("KafkaStreams")]),e._v(" instance to restart stream processing must be created.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("h4",{attrs:{id:"_4-2-2-spring-management"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_4-2-2-spring-management"}},[e._v("#")]),e._v(" 4.2.2. Spring Management")]),e._v(" "),a("p",[e._v("To simplify using Kafka Streams from the Spring application context perspective and use the lifecycle management through a container, the Spring for Apache Kafka introduces "),a("code",[e._v("StreamsBuilderFactoryBean")]),e._v(".\nThis is an "),a("code",[e._v("AbstractFactoryBean")]),e._v(" implementation to expose a "),a("code",[e._v("StreamsBuilder")]),e._v(" singleton instance as a bean.\nThe following example creates such a bean:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("@Bean\npublic FactoryBean myKStreamBuilder(KafkaStreamsConfiguration streamsConfig) {\n return new StreamsBuilderFactoryBean(streamsConfig);\n}\n")])])]),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("Starting with version 2.2, the stream configuration is now provided as a "),a("code",[e._v("KafkaStreamsConfiguration")]),e._v(" object rather than a "),a("code",[e._v("StreamsConfig")]),e._v(".")])])]),e._v(" "),a("tbody")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("StreamsBuilderFactoryBean")]),e._v(" also implements "),a("code",[e._v("SmartLifecycle")]),e._v(" to manage the lifecycle of an internal "),a("code",[e._v("KafkaStreams")]),e._v(" instance.\nSimilar to the Kafka Streams API, you must define the "),a("code",[e._v("KStream")]),e._v(" instances before you start the "),a("code",[e._v("KafkaStreams")]),e._v(".\nThat also applies for the Spring API for Kafka Streams.\nTherefore, when you use default "),a("code",[e._v("autoStartup = true")]),e._v(" on the "),a("code",[e._v("StreamsBuilderFactoryBean")]),e._v(", you must declare "),a("code",[e._v("KStream")]),e._v(" instances on the "),a("code",[e._v("StreamsBuilder")]),e._v(" before the application context is refreshed.\nFor example, "),a("code",[e._v("KStream")]),e._v(" can be a regular bean definition, while the Kafka Streams API is used without any impacts.\nThe following example shows how to do so:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("@Bean\npublic KStream kStream(StreamsBuilder kStreamBuilder) {\n KStream stream = kStreamBuilder.stream(STREAMING_TOPIC1);\n // Fluent KStream API\n return stream;\n}\n")])])]),a("p",[e._v("If you would like to control the lifecycle manually (for example, stopping and starting by some condition), you can reference the "),a("code",[e._v("StreamsBuilderFactoryBean")]),e._v(" bean directly by using the factory bean ("),a("code",[e._v("&")]),e._v(") "),a("a",{attrs:{href:"https://docs.spring.io/spring/docs/current/spring-framework-reference/html/beans.html#beans-factory-extension-factorybean",target:"_blank",rel:"noopener noreferrer"}},[e._v("prefix"),a("OutboundLink")],1),e._v(".\nSince "),a("code",[e._v("StreamsBuilderFactoryBean")]),e._v(" use its internal "),a("code",[e._v("KafkaStreams")]),e._v(" instance, it is safe to stop and restart it again.\nA new "),a("code",[e._v("KafkaStreams")]),e._v(" is created on each "),a("code",[e._v("start()")]),e._v(".\nYou might also consider using different "),a("code",[e._v("StreamsBuilderFactoryBean")]),e._v(" instances, if you would like to control the lifecycles for "),a("code",[e._v("KStream")]),e._v(" instances separately.")]),e._v(" "),a("p",[e._v("You also can specify "),a("code",[e._v("KafkaStreams.StateListener")]),e._v(", "),a("code",[e._v("Thread.UncaughtExceptionHandler")]),e._v(", and "),a("code",[e._v("StateRestoreListener")]),e._v(" options on the "),a("code",[e._v("StreamsBuilderFactoryBean")]),e._v(", which are delegated to the internal "),a("code",[e._v("KafkaStreams")]),e._v(" instance.\nAlso, apart from setting those options indirectly on "),a("code",[e._v("StreamsBuilderFactoryBean")]),e._v(", starting with "),a("em",[e._v("version 2.1.5")]),e._v(", you can use a "),a("code",[e._v("KafkaStreamsCustomizer")]),e._v(" callback interface to configure an inner "),a("code",[e._v("KafkaStreams")]),e._v(" instance.\nNote that "),a("code",[e._v("KafkaStreamsCustomizer")]),e._v(" overrides the options provided by "),a("code",[e._v("StreamsBuilderFactoryBean")]),e._v(".\nIf you need to perform some "),a("code",[e._v("KafkaStreams")]),e._v(" operations directly, you can access that internal "),a("code",[e._v("KafkaStreams")]),e._v(" instance by using "),a("code",[e._v("StreamsBuilderFactoryBean.getKafkaStreams()")]),e._v(".\nYou can autowire "),a("code",[e._v("StreamsBuilderFactoryBean")]),e._v(" bean by type, but you should be sure to use the full type in the bean definition, as the following example shows:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("@Bean\npublic StreamsBuilderFactoryBean myKStreamBuilder(KafkaStreamsConfiguration streamsConfig) {\n return new StreamsBuilderFactoryBean(streamsConfig);\n}\n...\n@Autowired\nprivate StreamsBuilderFactoryBean myKStreamBuilderFactoryBean;\n")])])]),a("p",[e._v("Alternatively, you can add "),a("code",[e._v("@Qualifier")]),e._v(" for injection by name if you use interface bean definition.\nThe following example shows how to do so:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@Bean\npublic FactoryBean myKStreamBuilder(KafkaStreamsConfiguration streamsConfig) {\n return new StreamsBuilderFactoryBean(streamsConfig);\n}\n...\n@Autowired\n@Qualifier("&myKStreamBuilder")\nprivate StreamsBuilderFactoryBean myKStreamBuilderFactoryBean;\n')])])]),a("p",[e._v("Starting with version 2.4.1, the factory bean has a new property "),a("code",[e._v("infrastructureCustomizer")]),e._v(" with type "),a("code",[e._v("KafkaStreamsInfrastructureCustomizer")]),e._v("; this allows customization of the "),a("code",[e._v("StreamsBuilder")]),e._v(" (e.g. to add a state store) and/or the "),a("code",[e._v("Topology")]),e._v(" before the stream is created.")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("public interface KafkaStreamsInfrastructureCustomizer {\n\n\tvoid configureBuilder(StreamsBuilder builder);\n\n\tvoid configureTopology(Topology topology);\n\n}\n")])])]),a("p",[e._v("Default no-op implementations are provided to avoid having to implement both methods if one is not required.")]),e._v(" "),a("p",[e._v("A "),a("code",[e._v("CompositeKafkaStreamsInfrastructureCustomizer")]),e._v(" is provided, for when you need to apply multiple customizers.")]),e._v(" "),a("h4",{attrs:{id:"_4-2-3-kafkastreams-micrometer-support"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_4-2-3-kafkastreams-micrometer-support"}},[e._v("#")]),e._v(" 4.2.3. KafkaStreams Micrometer Support")]),e._v(" "),a("p",[e._v("Introduced in version 2.5.3, you can configure a "),a("code",[e._v("KafkaStreamsMicrometerListener")]),e._v(" to automatically register micrometer meters for the "),a("code",[e._v("KafkaStreams")]),e._v(" object managed by the factory bean:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('streamsBuilderFactoryBean.addListener(new KafkaStreamsMicrometerListener(meterRegistry,\n Collections.singletonList(new ImmutableTag("customTag", "customTagValue"))));\n')])])]),a("h4",{attrs:{id:"_4-2-4-streams-json-serialization-and-deserialization"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_4-2-4-streams-json-serialization-and-deserialization"}},[e._v("#")]),e._v(" 4.2.4. Streams JSON Serialization and Deserialization")]),e._v(" "),a("p",[e._v("For serializing and deserializing data when reading or writing to topics or state stores in JSON format, Spring for Apache Kafka provides a "),a("code",[e._v("JsonSerde")]),e._v(" implementation that uses JSON, delegating to the "),a("code",[e._v("JsonSerializer")]),e._v(" and "),a("code",[e._v("JsonDeserializer")]),e._v(" described in "),a("a",{attrs:{href:"#serdes"}},[e._v("Serialization, Deserialization, and Message Conversion")]),e._v(".\nThe "),a("code",[e._v("JsonSerde")]),e._v(" implementation provides the same configuration options through its constructor (target type or "),a("code",[e._v("ObjectMapper")]),e._v(").\nIn the following example, we use the "),a("code",[e._v("JsonSerde")]),e._v(" to serialize and deserialize the "),a("code",[e._v("Cat")]),e._v(" payload of a Kafka stream (the "),a("code",[e._v("JsonSerde")]),e._v(" can be used in a similar fashion wherever an instance is required):")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('stream.through(Serdes.Integer(), new JsonSerde<>(Cat.class), "cats");\n')])])]),a("p",[e._v("When constructing the serializer/deserializer programmatically for use in the producer/consumer factory, since version 2.3, you can use the fluent API, which simplifies configuration.")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('stream.through(new JsonSerde<>(MyKeyType.class)\n .forKeys()\n .noTypeInfo(),\n new JsonSerde<>(MyValueType.class)\n .noTypeInfo(),\n "myTypes");\n')])])]),a("h4",{attrs:{id:"_4-2-5-using-kafkastreambrancher"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_4-2-5-using-kafkastreambrancher"}},[e._v("#")]),e._v(" 4.2.5. Using "),a("code",[e._v("KafkaStreamBrancher")])]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("KafkaStreamBrancher")]),e._v(" class introduces a more convenient way to build conditional branches on top of "),a("code",[e._v("KStream")]),e._v(".")]),e._v(" "),a("p",[e._v("Consider the following example that does not use "),a("code",[e._v("KafkaStreamBrancher")]),e._v(":")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('KStream[] branches = builder.stream("source").branch(\n (key, value) -> value.contains("A"),\n (key, value) -> value.contains("B"),\n (key, value) -> true\n );\nbranches[0].to("A");\nbranches[1].to("B");\nbranches[2].to("C");\n')])])]),a("p",[e._v("The following example uses "),a("code",[e._v("KafkaStreamBrancher")]),e._v(":")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('new KafkaStreamBrancher()\n .branch((key, value) -> value.contains("A"), ks -> ks.to("A"))\n .branch((key, value) -> value.contains("B"), ks -> ks.to("B"))\n //default branch should not necessarily be defined in the end of the chain!\n .defaultBranch(ks -> ks.to("C"))\n .onTopOf(builder.stream("source"));\n //onTopOf method returns the provided stream so we can continue with method chaining\n')])])]),a("h4",{attrs:{id:"_4-2-6-configuration"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_4-2-6-configuration"}},[e._v("#")]),e._v(" 4.2.6. Configuration")]),e._v(" "),a("p",[e._v("To configure the Kafka Streams environment, the "),a("code",[e._v("StreamsBuilderFactoryBean")]),e._v(" requires a "),a("code",[e._v("KafkaStreamsConfiguration")]),e._v(" instance.\nSee the Apache Kafka "),a("a",{attrs:{href:"https://kafka.apache.org/0102/documentation/#streamsconfigs",target:"_blank",rel:"noopener noreferrer"}},[e._v("documentation"),a("OutboundLink")],1),e._v(" for all possible options.")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("Starting with version 2.2, the stream configuration is now provided as a "),a("code",[e._v("KafkaStreamsConfiguration")]),e._v(" object, rather than as a "),a("code",[e._v("StreamsConfig")]),e._v(".")])])]),e._v(" "),a("tbody")]),e._v(" "),a("p",[e._v("To avoid boilerplate code for most cases, especially when you develop microservices, Spring for Apache Kafka provides the "),a("code",[e._v("@EnableKafkaStreams")]),e._v(" annotation, which you should place on a "),a("code",[e._v("@Configuration")]),e._v(" class.\nAll you need is to declare a "),a("code",[e._v("KafkaStreamsConfiguration")]),e._v(" bean named "),a("code",[e._v("defaultKafkaStreamsConfig")]),e._v(".\nA "),a("code",[e._v("StreamsBuilderFactoryBean")]),e._v(" bean, named "),a("code",[e._v("defaultKafkaStreamsBuilder")]),e._v(", is automatically declared in the application context.\nYou can declare and use any additional "),a("code",[e._v("StreamsBuilderFactoryBean")]),e._v(" beans as well.\nYou can perform additional customization of that bean, by providing a bean that implements "),a("code",[e._v("StreamsBuilderFactoryBeanConfigurer")]),e._v(".\nIf there are multiple such beans, they will be applied according to their "),a("code",[e._v("Ordered.order")]),e._v(" property.")]),e._v(" "),a("p",[e._v("By default, when the factory bean is stopped, the "),a("code",[e._v("KafkaStreams.cleanUp()")]),e._v(" method is called.\nStarting with version 2.1.2, the factory bean has additional constructors, taking a "),a("code",[e._v("CleanupConfig")]),e._v(" object that has properties to let you control whether the "),a("code",[e._v("cleanUp()")]),e._v(" method is called during "),a("code",[e._v("start()")]),e._v(" or "),a("code",[e._v("stop()")]),e._v(" or neither.\nStarting with version 2.7, the default is to never clean up local state.")]),e._v(" "),a("h4",{attrs:{id:"_4-2-7-header-enricher"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_4-2-7-header-enricher"}},[e._v("#")]),e._v(" 4.2.7. Header Enricher")]),e._v(" "),a("p",[e._v("Version 2.3 added the "),a("code",[e._v("HeaderEnricher")]),e._v(" implementation of "),a("code",[e._v("Transformer")]),e._v(".\nThis can be used to add headers within the stream processing; the header values are SpEL expressions; the root object of the expression evaluation has 3 properties:")]),e._v(" "),a("ul",[a("li",[a("p",[a("code",[e._v("context")]),e._v(" - the "),a("code",[e._v("ProcessorContext")]),e._v(", allowing access to the current record metadata")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("key")]),e._v(" - the key of the current record")])]),e._v(" "),a("li",[a("p",[a("code",[e._v("value")]),e._v(" - the value of the current record")])])]),e._v(" "),a("p",[e._v("The expressions must return a "),a("code",[e._v("byte[]")]),e._v(" or a "),a("code",[e._v("String")]),e._v(" (which will be converted to "),a("code",[e._v("byte[]")]),e._v(" using "),a("code",[e._v("UTF-8")]),e._v(").")]),e._v(" "),a("p",[e._v("To use the enricher within a stream:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v(".transform(() -> enricher)\n")])])]),a("p",[e._v("The transformer does not change the "),a("code",[e._v("key")]),e._v(" or "),a("code",[e._v("value")]),e._v("; it simply adds headers.")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("If your stream is multi-threaded, you need a new instance for each record.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v(".transform(() -> new HeaderEnricher<..., ...>(expressionMap))\n")])])]),a("p",[e._v("Here is a simple example, adding one literal header and one variable:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('Map headers = new HashMap<>();\nheaders.put("header1", new LiteralExpression("value1"));\nSpelExpressionParser parser = new SpelExpressionParser();\nheaders.put("header2", parser.parseExpression("context.timestamp() + \' @\' + context.offset()"));\nHeaderEnricher enricher = new HeaderEnricher<>(headers);\nKStream stream = builder.stream(INPUT);\nstream\n .transform(() -> enricher)\n .to(OUTPUT);\n')])])]),a("h4",{attrs:{id:"_4-2-8-messagingtransformer"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_4-2-8-messagingtransformer"}},[e._v("#")]),e._v(" 4.2.8. "),a("code",[e._v("MessagingTransformer")])]),e._v(" "),a("p",[e._v("Version 2.3 added the "),a("code",[e._v("MessagingTransformer")]),e._v(" this allows a Kafka Streams topology to interact with a Spring Messaging component, such as a Spring Integration flow.\nThe transformer requires an implementation of "),a("code",[e._v("MessagingFunction")]),e._v(".")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("@FunctionalInterface\npublic interface MessagingFunction {\n\n\tMessage exchange(Message message);\n\n}\n")])])]),a("p",[e._v("Spring Integration automatically provides an implementation using its "),a("code",[e._v("GatewayProxyFactoryBean")]),e._v(".\nIt also requires a "),a("code",[e._v("MessagingMessageConverter")]),e._v(" to convert the key, value and metadata (including headers) to/from a Spring Messaging "),a("code",[e._v("Message")]),e._v(".\nSee ["),a("a",{attrs:{href:"https://docs.spring.io/spring-integration/docs/current/reference/html/kafka.html#streams-integration",target:"_blank",rel:"noopener noreferrer"}},[e._v("Calling a Spring Integration Flow from a "),a("code",[e._v("KStream")]),a("OutboundLink")],1),e._v("] for more information.")]),e._v(" "),a("h4",{attrs:{id:"_4-2-9-recovery-from-deserialization-exceptions"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_4-2-9-recovery-from-deserialization-exceptions"}},[e._v("#")]),e._v(" 4.2.9. Recovery from Deserialization Exceptions")]),e._v(" "),a("p",[e._v("Version 2.3 introduced the "),a("code",[e._v("RecoveringDeserializationExceptionHandler")]),e._v(" which can take some action when a deserialization exception occurs.\nRefer to the Kafka documentation about "),a("code",[e._v("DeserializationExceptionHandler")]),e._v(", of which the "),a("code",[e._v("RecoveringDeserializationExceptionHandler")]),e._v(" is an implementation.\nThe "),a("code",[e._v("RecoveringDeserializationExceptionHandler")]),e._v(" is configured with a "),a("code",[e._v("ConsumerRecordRecoverer")]),e._v(" implementation.\nThe framework provides the "),a("code",[e._v("DeadLetterPublishingRecoverer")]),e._v(" which sends the failed record to a dead-letter topic.\nSee "),a("a",{attrs:{href:"#dead-letters"}},[e._v("Publishing Dead-letter Records")]),e._v(" for more information about this recoverer.")]),e._v(" "),a("p",[e._v("To configure the recoverer, add the following properties to your streams configuration:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@Bean(name = KafkaStreamsDefaultConfiguration.DEFAULT_STREAMS_CONFIG_BEAN_NAME)\npublic KafkaStreamsConfiguration kStreamsConfigs() {\n Map props = new HashMap<>();\n ...\n props.put(StreamsConfig.DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG,\n RecoveringDeserializationExceptionHandler.class);\n props.put(RecoveringDeserializationExceptionHandler.KSTREAM_DESERIALIZATION_RECOVERER, recoverer());\n ...\n return new KafkaStreamsConfiguration(props);\n}\n\n@Bean\npublic DeadLetterPublishingRecoverer recoverer() {\n return new DeadLetterPublishingRecoverer(kafkaTemplate(),\n (record, ex) -> new TopicPartition("recovererDLQ", -1));\n}\n')])])]),a("p",[e._v("Of course, the "),a("code",[e._v("recoverer()")]),e._v(" bean can be your own implementation of "),a("code",[e._v("ConsumerRecordRecoverer")]),e._v(".")]),e._v(" "),a("h4",{attrs:{id:"_4-2-10-kafka-streams-example"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_4-2-10-kafka-streams-example"}},[e._v("#")]),e._v(" 4.2.10. Kafka Streams Example")]),e._v(" "),a("p",[e._v("The following example combines all the topics we have covered in this chapter:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@Configuration\n@EnableKafka\n@EnableKafkaStreams\npublic static class KafkaStreamsConfig {\n\n @Bean(name = KafkaStreamsDefaultConfiguration.DEFAULT_STREAMS_CONFIG_BEAN_NAME)\n public KafkaStreamsConfiguration kStreamsConfigs() {\n Map props = new HashMap<>();\n props.put(StreamsConfig.APPLICATION_ID_CONFIG, "testStreams");\n props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");\n props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.Integer().getClass().getName());\n props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());\n props.put(StreamsConfig.DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG, WallclockTimestampExtractor.class.getName());\n return new KafkaStreamsConfiguration(props);\n }\n\n @Bean\n public StreamsBuilderFactoryBeanConfigurer configurer() {\n return fb -> fb.setStateListener((newState, oldState) -> {\n System.out.println("State transition from " + oldState + " to " + newState);\n });\n }\n\n @Bean\n public KStream kStream(StreamsBuilder kStreamBuilder) {\n KStream stream = kStreamBuilder.stream("streamingTopic1");\n stream\n .mapValues((ValueMapper) String::toUpperCase)\n .groupByKey()\n .windowedBy(TimeWindows.of(Duration.ofMillis(1000)))\n .reduce((String value1, String value2) -> value1 + value2,\n \t\tNamed.as("windowStore"))\n .toStream()\n .map((windowedId, value) -> new KeyValue<>(windowedId.key(), value))\n .filter((i, s) -> s.length() > 40)\n .to("streamingTopic2");\n\n stream.print(Printed.toSysOut());\n\n return stream;\n }\n\n}\n')])])]),a("h3",{attrs:{id:"_4-3-testing-applications"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_4-3-testing-applications"}},[e._v("#")]),e._v(" 4.3. Testing Applications")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("spring-kafka-test")]),e._v(" jar contains some useful utilities to assist with testing your applications.")]),e._v(" "),a("h4",{attrs:{id:"_4-3-1-kafkatestutils"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_4-3-1-kafkatestutils"}},[e._v("#")]),e._v(" 4.3.1. KafkaTestUtils")]),e._v(" "),a("p",[a("code",[e._v("o.s.kafka.test.utils.KafkaTestUtils")]),e._v(" provides a number of static helper methods to consume records, retrieve various record offsets, and others.\nRefer to its "),a("a",{attrs:{href:"https://docs.spring.io/spring-kafka/docs/current/api/org/springframework/kafka/test/utils/KafkaTestUtils.html",target:"_blank",rel:"noopener noreferrer"}},[e._v("Javadocs"),a("OutboundLink")],1),e._v(" for complete details.")]),e._v(" "),a("h4",{attrs:{id:"_4-3-2-junit"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_4-3-2-junit"}},[e._v("#")]),e._v(" 4.3.2. JUnit")]),e._v(" "),a("p",[a("code",[e._v("o.s.kafka.test.utils.KafkaTestUtils")]),e._v(" also provides some static methods to set up producer and consumer properties.\nThe following listing shows those method signatures:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("/**\n * Set up test properties for an {@code } consumer.\n * @param group the group id.\n * @param autoCommit the auto commit.\n * @param embeddedKafka a {@link EmbeddedKafkaBroker} instance.\n * @return the properties.\n */\npublic static Map consumerProps(String group, String autoCommit,\n EmbeddedKafkaBroker embeddedKafka) { ... }\n\n/**\n * Set up test properties for an {@code } producer.\n * @param embeddedKafka a {@link EmbeddedKafkaBroker} instance.\n * @return the properties.\n */\npublic static Map producerProps(EmbeddedKafkaBroker embeddedKafka) { ... }\n")])])]),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("Starting with version 2.5, the "),a("code",[e._v("consumerProps")]),e._v(" method sets the "),a("code",[e._v("ConsumerConfig.AUTO_OFFSET_RESET_CONFIG")]),e._v(" to "),a("code",[e._v("earliest")]),e._v("."),a("br"),e._v("This is because, in most cases, you want the consumer to consume any messages sent in a test case."),a("br"),e._v("The "),a("code",[e._v("ConsumerConfig")]),e._v(" default is "),a("code",[e._v("latest")]),e._v(" which means that messages already sent by a test, before the consumer starts, will not receive those records."),a("br"),e._v("To revert to the previous behavior, set the property to "),a("code",[e._v("latest")]),e._v(" after calling the method."),a("br"),a("br"),e._v("When using the embedded broker, it is generally best practice to use a different topic for each test, to prevent cross-talk."),a("br"),e._v("If this is not possible for some reason, note that the "),a("code",[e._v("consumeFromEmbeddedTopics")]),e._v(" method’s default behavior is to seek the assigned partitions to the beginning after assignment."),a("br"),e._v("Since it does not have access to the consumer properties, you must use the overloaded method that takes a "),a("code",[e._v("seekToEnd")]),e._v(" boolean parameter to seek to the end instead of the beginning.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("p",[e._v("A JUnit 4 "),a("code",[e._v("@Rule")]),e._v(" wrapper for the "),a("code",[e._v("EmbeddedKafkaBroker")]),e._v(" is provided to create an embedded Kafka and an embedded Zookeeper server.\n(See "),a("a",{attrs:{href:"#embedded-kafka-annotation"}},[e._v("@EmbeddedKafka Annotation")]),e._v(" for information about using "),a("code",[e._v("@EmbeddedKafka")]),e._v(" with JUnit 5).\nThe following listing shows the signatures of those methods:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("/**\n * Create embedded Kafka brokers.\n * @param count the number of brokers.\n * @param controlledShutdown passed into TestUtils.createBrokerConfig.\n * @param topics the topics to create (2 partitions per).\n */\npublic EmbeddedKafkaRule(int count, boolean controlledShutdown, String... topics) { ... }\n\n/**\n *\n * Create embedded Kafka brokers.\n * @param count the number of brokers.\n * @param controlledShutdown passed into TestUtils.createBrokerConfig.\n * @param partitions partitions per topic.\n * @param topics the topics to create.\n */\npublic EmbeddedKafkaRule(int count, boolean controlledShutdown, int partitions, String... topics) { ... }\n")])])]),a("p",[e._v("The "),a("code",[e._v("EmbeddedKafkaBroker")]),e._v(" class has a utility method that lets you consume for all the topics it created.\nThe following example shows how to use it:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('Map consumerProps = KafkaTestUtils.consumerProps("testT", "false", embeddedKafka);\nDefaultKafkaConsumerFactory cf = new DefaultKafkaConsumerFactory(\n consumerProps);\nConsumer consumer = cf.createConsumer();\nembeddedKafka.consumeFromAllEmbeddedTopics(consumer);\n')])])]),a("p",[e._v("The "),a("code",[e._v("KafkaTestUtils")]),e._v(" has some utility methods to fetch results from the consumer.\nThe following listing shows those method signatures:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("/**\n * Poll the consumer, expecting a single record for the specified topic.\n * @param consumer the consumer.\n * @param topic the topic.\n * @return the record.\n * @throws org.junit.ComparisonFailure if exactly one record is not received.\n */\npublic static ConsumerRecord getSingleRecord(Consumer consumer, String topic) { ... }\n\n/**\n * Poll the consumer for records.\n * @param consumer the consumer.\n * @return the records.\n */\npublic static ConsumerRecords getRecords(Consumer consumer) { ... }\n")])])]),a("p",[e._v("The following example shows how to use "),a("code",[e._v("KafkaTestUtils")]),e._v(":")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('...\ntemplate.sendDefault(0, 2, "bar");\nConsumerRecord received = KafkaTestUtils.getSingleRecord(consumer, "topic");\n...\n')])])]),a("p",[e._v("When the embedded Kafka and embedded Zookeeper server are started by the "),a("code",[e._v("EmbeddedKafkaBroker")]),e._v(", a system property named "),a("code",[e._v("spring.embedded.kafka.brokers")]),e._v(" is set to the address of the Kafka brokers and a system property named "),a("code",[e._v("spring.embedded.zookeeper.connect")]),e._v(" is set to the address of Zookeeper.\nConvenient constants ("),a("code",[e._v("EmbeddedKafkaBroker.SPRING_EMBEDDED_KAFKA_BROKERS")]),e._v(" and "),a("code",[e._v("EmbeddedKafkaBroker.SPRING_EMBEDDED_ZOOKEEPER_CONNECT")]),e._v(") are provided for this property.")]),e._v(" "),a("p",[e._v("With the "),a("code",[e._v("EmbeddedKafkaBroker.brokerProperties(Map)")]),e._v(", you can provide additional properties for the Kafka servers.\nSee "),a("a",{attrs:{href:"https://kafka.apache.org/documentation/#brokerconfigs",target:"_blank",rel:"noopener noreferrer"}},[e._v("Kafka Config"),a("OutboundLink")],1),e._v(" for more information about possible broker properties.")]),e._v(" "),a("h4",{attrs:{id:"_4-3-3-configuring-topics"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_4-3-3-configuring-topics"}},[e._v("#")]),e._v(" 4.3.3. Configuring Topics")]),e._v(" "),a("p",[e._v("The following example configuration creates topics called "),a("code",[e._v("cat")]),e._v(" and "),a("code",[e._v("hat")]),e._v(" with five partitions, a topic called "),a("code",[e._v("thing1")]),e._v(" with 10 partitions, and a topic called "),a("code",[e._v("thing2")]),e._v(" with 15 partitions:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('public class MyTests {\n\n @ClassRule\n private static EmbeddedKafkaRule embeddedKafka = new EmbeddedKafkaRule(1, false, 5, "cat", "hat");\n\n @Test\n public void test() {\n embeddedKafkaRule.getEmbeddedKafka()\n .addTopics(new NewTopic("thing1", 10, (short) 1), new NewTopic("thing2", 15, (short) 1));\n ...\n }\n\n}\n')])])]),a("p",[e._v("By default, "),a("code",[e._v("addTopics")]),e._v(" will throw an exception when problems arise (such as adding a topic that already exists).\nVersion 2.6 added a new version of that method that returns a "),a("code",[e._v("Map")]),e._v("; the key is the topic name and the value is "),a("code",[e._v("null")]),e._v(" for success, or an "),a("code",[e._v("Exception")]),e._v(" for a failure.")]),e._v(" "),a("h4",{attrs:{id:"for-multiple-test-classes"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#for-multiple-test-classes"}},[e._v("#")]),e._v(" for Multiple Test Classes")]),e._v(" "),a("p",[e._v("There is no built-in support for doing so, but you can use the same broker for multiple test classes with something similar to the following:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('public final class EmbeddedKafkaHolder {\n\n private static EmbeddedKafkaBroker embeddedKafka = new EmbeddedKafkaBroker(1, false)\n .brokerListProperty("spring.kafka.bootstrap-servers");\n\n private static boolean started;\n\n public static EmbeddedKafkaBroker getEmbeddedKafka() {\n if (!started) {\n try {\n embeddedKafka.afterPropertiesSet();\n }\n catch (Exception e) {\n throw new KafkaException("Embedded broker failed to start", e);\n }\n started = true;\n }\n return embeddedKafka;\n }\n\n private EmbeddedKafkaHolder() {\n super();\n }\n\n}\n')])])]),a("p",[e._v("This assumes a Spring Boot environment and the embedded broker replaces the bootstrap servers property.")]),e._v(" "),a("p",[e._v("Then, in each test class, you can use something similar to the following:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('static {\n EmbeddedKafkaHolder.getEmbeddedKafka().addTopics("topic1", "topic2");\n}\n\nprivate static final EmbeddedKafkaBroker broker = EmbeddedKafkaHolder.getEmbeddedKafka();\n')])])]),a("p",[e._v("If you are not using Spring Boot, you can obtain the bootstrap servers using "),a("code",[e._v("broker.getBrokersAsString()")]),e._v(".")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("The preceding example provides no mechanism for shutting down the broker(s) when all tests are complete."),a("br"),e._v("This could be a problem if, say, you run your tests in a Gradle daemon."),a("br"),e._v("You should not use this technique in such a situation, or you should use something to call "),a("code",[e._v("destroy()")]),e._v(" on the "),a("code",[e._v("EmbeddedKafkaBroker")]),e._v(" when your tests are complete.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("h4",{attrs:{id:"_4-3-5-embeddedkafka-annotation"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_4-3-5-embeddedkafka-annotation"}},[e._v("#")]),e._v(" 4.3.5. @EmbeddedKafka Annotation")]),e._v(" "),a("p",[e._v("We generally recommend that you use the rule as a "),a("code",[e._v("@ClassRule")]),e._v(" to avoid starting and stopping the broker between tests (and use a different topic for each test).\nStarting with version 2.0, if you use Spring’s test application context caching, you can also declare a "),a("code",[e._v("EmbeddedKafkaBroker")]),e._v(" bean, so a single broker can be used across multiple test classes.\nFor convenience, we provide a test class-level annotation called "),a("code",[e._v("@EmbeddedKafka")]),e._v(" to register the "),a("code",[e._v("EmbeddedKafkaBroker")]),e._v(" bean.\nThe following example shows how to use it:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@RunWith(SpringRunner.class)\n@DirtiesContext\n@EmbeddedKafka(partitions = 1,\n topics = {\n KafkaStreamsTests.STREAMING_TOPIC1,\n KafkaStreamsTests.STREAMING_TOPIC2 })\npublic class KafkaStreamsTests {\n\n @Autowired\n private EmbeddedKafkaBroker embeddedKafka;\n\n @Test\n public void someTest() {\n Map consumerProps = KafkaTestUtils.consumerProps("testGroup", "true", this.embeddedKafka);\n consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");\n ConsumerFactory cf = new DefaultKafkaConsumerFactory<>(consumerProps);\n Consumer consumer = cf.createConsumer();\n this.embeddedKafka.consumeFromAnEmbeddedTopic(consumer, KafkaStreamsTests.STREAMING_TOPIC2);\n ConsumerRecords replies = KafkaTestUtils.getRecords(consumer);\n assertThat(replies.count()).isGreaterThanOrEqualTo(1);\n }\n\n @Configuration\n @EnableKafkaStreams\n public static class KafkaStreamsConfiguration {\n\n @Value("${" + EmbeddedKafkaBroker.SPRING_EMBEDDED_KAFKA_BROKERS + "}")\n private String brokerAddresses;\n\n @Bean(name = KafkaStreamsDefaultConfiguration.DEFAULT_STREAMS_CONFIG_BEAN_NAME)\n public KafkaStreamsConfiguration kStreamsConfigs() {\n Map props = new HashMap<>();\n props.put(StreamsConfig.APPLICATION_ID_CONFIG, "testStreams");\n props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, this.brokerAddresses);\n return new KafkaStreamsConfiguration(props);\n }\n\n }\n\n}\n')])])]),a("p",[e._v("Starting with version 2.2.4, you can also use the "),a("code",[e._v("@EmbeddedKafka")]),e._v(" annotation to specify the Kafka ports property.")]),e._v(" "),a("p",[e._v("The following example sets the "),a("code",[e._v("topics")]),e._v(", "),a("code",[e._v("brokerProperties")]),e._v(", and "),a("code",[e._v("brokerPropertiesLocation")]),e._v(" attributes of "),a("code",[e._v("@EmbeddedKafka")]),e._v(" support property placeholder resolutions:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@TestPropertySource(locations = "classpath:/test.properties")\n@EmbeddedKafka(topics = { "any-topic", "${kafka.topics.another-topic}" },\n brokerProperties = { "log.dir=${kafka.broker.logs-dir}",\n "listeners=PLAINTEXT://localhost:${kafka.broker.port}",\n "auto.create.topics.enable=${kafka.broker.topics-enable:true}" },\n brokerPropertiesLocation = "classpath:/broker.properties")\n')])])]),a("p",[e._v("In the preceding example, the property placeholders "),a("code",[e._v("${kafka.topics.another-topic}")]),e._v(", "),a("code",[e._v("${kafka.broker.logs-dir}")]),e._v(", and "),a("code",[e._v("${kafka.broker.port}")]),e._v(" are resolved from the Spring "),a("code",[e._v("Environment")]),e._v(".\nIn addition, the broker properties are loaded from the "),a("code",[e._v("broker.properties")]),e._v(" classpath resource specified by the "),a("code",[e._v("brokerPropertiesLocation")]),e._v(".\nProperty placeholders are resolved for the "),a("code",[e._v("brokerPropertiesLocation")]),e._v(" URL and for any property placeholders found in the resource.\nProperties defined by "),a("code",[e._v("brokerProperties")]),e._v(" override properties found in "),a("code",[e._v("brokerPropertiesLocation")]),e._v(".")]),e._v(" "),a("p",[e._v("You can use the "),a("code",[e._v("@EmbeddedKafka")]),e._v(" annotation with JUnit 4 or JUnit 5.")]),e._v(" "),a("h4",{attrs:{id:"_4-3-6-embeddedkafka-annotation-with-junit5"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_4-3-6-embeddedkafka-annotation-with-junit5"}},[e._v("#")]),e._v(" 4.3.6. @EmbeddedKafka Annotation with JUnit5")]),e._v(" "),a("p",[e._v("Starting with version 2.3, there are two ways to use the "),a("code",[e._v("@EmbeddedKafka")]),e._v(" annotation with JUnit5.\nWhen used with the "),a("code",[e._v("@SpringJunitConfig")]),e._v(" annotation, the embedded broker is added to the test application context.\nYou can auto wire the broker into your test, at the class or method level, to get the broker address list.")]),e._v(" "),a("p",[e._v("When "),a("strong",[e._v("not")]),e._v(" using the spring test context, the "),a("code",[e._v("EmbdeddedKafkaCondition")]),e._v(" creates a broker; the condition includes a parameter resolver so you can access the broker in your test method…​")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("@EmbeddedKafka\npublic class EmbeddedKafkaConditionTests {\n\n @Test\n public void test(EmbeddedKafkaBroker broker) {\n String brokerList = broker.getBrokersAsString();\n ...\n }\n\n}\n")])])]),a("p",[e._v("A stand-alone (not Spring test context) broker will be created if the class annotated with "),a("code",[e._v("@EmbeddedBroker")]),e._v(" is not also annotated (or meta annotated) with "),a("code",[e._v("ExtendedWith(SpringExtension.class)")]),e._v("."),a("code",[e._v("@SpringJunitConfig")]),e._v(" and "),a("code",[e._v("@SpringBootTest")]),e._v(" are so meta annotated and the context-based broker will be used when either of those annotations are also present.")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("When there is a Spring test application context available, the topics and broker properties can contain property placeholders, which will be resolved as long as the property is defined somewhere."),a("br"),e._v("If there is no Spring context available, these placeholders won’t be resolved.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("h4",{attrs:{id:"_4-3-7-embedded-broker-in-springboottest-annotations"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_4-3-7-embedded-broker-in-springboottest-annotations"}},[e._v("#")]),e._v(" 4.3.7. Embedded Broker in "),a("code",[e._v("@SpringBootTest")]),e._v(" Annotations")]),e._v(" "),a("p",[a("a",{attrs:{href:"https://start.spring.io/",target:"_blank",rel:"noopener noreferrer"}},[e._v("Spring Initializr"),a("OutboundLink")],1),e._v(" now automatically adds the "),a("code",[e._v("spring-kafka-test")]),e._v(" dependency in test scope to the project configuration.")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("If your application uses the Kafka binder in "),a("code",[e._v("spring-cloud-stream")]),e._v(" and if you want to use an embedded broker for tests, you must remove the "),a("code",[e._v("spring-cloud-stream-test-support")]),e._v(" dependency, because it replaces the real binder with a test binder for test cases."),a("br"),e._v("If you wish some tests to use the test binder and some to use the embedded broker, tests that use the real binder need to disable the test binder by excluding the binder auto configuration in the test class."),a("br"),e._v("The following example shows how to do so:"),a("br"),a("br"),a("code",[e._v('
@RunWith(SpringRunner.class)
@SpringBootTest(properties = "spring.autoconfigure.exclude="
+ "org.springframework.cloud.stream.test.binder.TestSupportBinderAutoConfiguration")
public class MyApplicationTests {
...
}
')])])])]),e._v(" "),a("tbody")]),e._v(" "),a("p",[e._v("There are several ways to use an embedded broker in a Spring Boot application test.")]),e._v(" "),a("p",[e._v("They include:")]),e._v(" "),a("ul",[a("li",[a("p",[a("a",{attrs:{href:"#kafka-testing-junit4-class-rule"}},[e._v("JUnit4 Class Rule")])])]),e._v(" "),a("li",[a("p",[a("a",{attrs:{href:"#kafka-testing-embeddedkafka-annotation"}},[a("code",[e._v("@EmbeddedKafka")]),e._v(" Annotation or "),a("code",[e._v("EmbeddedKafkaBroker")]),e._v(" Bean")])])])]),e._v(" "),a("h5",{attrs:{id:"junit4-class-rule"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#junit4-class-rule"}},[e._v("#")]),e._v(" JUnit4 Class Rule")]),e._v(" "),a("p",[e._v("The following example shows how to use a JUnit4 class rule to create an embedded broker:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@RunWith(SpringRunner.class)\n@SpringBootTest\npublic class MyApplicationTests {\n\n @ClassRule\n public static EmbeddedKafkaRule broker = new EmbeddedKafkaRule(1,\n false, "someTopic")\n .brokerListProperty("spring.kafka.bootstrap-servers");\n }\n\n @Autowired\n private KafkaTemplate template;\n\n @Test\n public void test() {\n ...\n }\n\n}\n')])])]),a("p",[e._v("Notice that, since this is a Spring Boot application, we override the broker list property to set Boot’s property.")]),e._v(" "),a("h5",{attrs:{id:"embeddedkafka-annotation-or-embeddedkafkabroker-bean"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#embeddedkafka-annotation-or-embeddedkafkabroker-bean"}},[e._v("#")]),e._v(" "),a("code",[e._v("@EmbeddedKafka")]),e._v(" Annotation or "),a("code",[e._v("EmbeddedKafkaBroker")]),e._v(" Bean")]),e._v(" "),a("p",[e._v("The following example shows how to use an "),a("code",[e._v("@EmbeddedKafka")]),e._v(" Annotation to create an embedded broker:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@RunWith(SpringRunner.class)\n@EmbeddedKafka(topics = "someTopic",\n bootstrapServersProperty = "spring.kafka.bootstrap-servers")\npublic class MyApplicationTests {\n\n @Autowired\n private KafkaTemplate template;\n\n @Test\n public void test() {\n ...\n }\n\n}\n')])])]),a("h4",{attrs:{id:"_4-3-8-hamcrest-matchers"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_4-3-8-hamcrest-matchers"}},[e._v("#")]),e._v(" 4.3.8. Hamcrest Matchers")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("o.s.kafka.test.hamcrest.KafkaMatchers")]),e._v(" provides the following matchers:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("/**\n * @param key the key\n * @param the type.\n * @return a Matcher that matches the key in a consumer record.\n */\npublic static Matcher> hasKey(K key) { ... }\n\n/**\n * @param value the value.\n * @param the type.\n * @return a Matcher that matches the value in a consumer record.\n */\npublic static Matcher> hasValue(V value) { ... }\n\n/**\n * @param partition the partition.\n * @return a Matcher that matches the partition in a consumer record.\n */\npublic static Matcher> hasPartition(int partition) { ... }\n\n/**\n * Matcher testing the timestamp of a {@link ConsumerRecord} assuming the topic has been set with\n * {@link org.apache.kafka.common.record.TimestampType#CREATE_TIME CreateTime}.\n *\n * @param ts timestamp of the consumer record.\n * @return a Matcher that matches the timestamp in a consumer record.\n */\npublic static Matcher> hasTimestamp(long ts) {\n return hasTimestamp(TimestampType.CREATE_TIME, ts);\n}\n\n/**\n * Matcher testing the timestamp of a {@link ConsumerRecord}\n * @param type timestamp type of the record\n * @param ts timestamp of the consumer record.\n * @return a Matcher that matches the timestamp in a consumer record.\n */\npublic static Matcher> hasTimestamp(TimestampType type, long ts) {\n return new ConsumerRecordTimestampMatcher(type, ts);\n}\n")])])]),a("h4",{attrs:{id:"_4-3-9-assertj-conditions"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_4-3-9-assertj-conditions"}},[e._v("#")]),e._v(" 4.3.9. AssertJ Conditions")]),e._v(" "),a("p",[e._v("You can use the following AssertJ conditions:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("/**\n * @param key the key\n * @param the type.\n * @return a Condition that matches the key in a consumer record.\n */\npublic static Condition> key(K key) { ... }\n\n/**\n * @param value the value.\n * @param the type.\n * @return a Condition that matches the value in a consumer record.\n */\npublic static Condition> value(V value) { ... }\n\n/**\n * @param key the key.\n * @param value the value.\n * @param the key type.\n * @param the value type.\n * @return a Condition that matches the key in a consumer record.\n * @since 2.2.12\n */\npublic static Condition> keyValue(K key, V value) { ... }\n\n/**\n * @param partition the partition.\n * @return a Condition that matches the partition in a consumer record.\n */\npublic static Condition> partition(int partition) { ... }\n\n/**\n * @param value the timestamp.\n * @return a Condition that matches the timestamp value in a consumer record.\n */\npublic static Condition> timestamp(long value) {\n return new ConsumerRecordTimestampCondition(TimestampType.CREATE_TIME, value);\n}\n\n/**\n * @param type the type of timestamp\n * @param value the timestamp.\n * @return a Condition that matches the timestamp value in a consumer record.\n */\npublic static Condition> timestamp(TimestampType type, long value) {\n return new ConsumerRecordTimestampCondition(type, value);\n}\n")])])]),a("h4",{attrs:{id:"_4-3-10-example"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_4-3-10-example"}},[e._v("#")]),e._v(" 4.3.10. Example")]),e._v(" "),a("p",[e._v("The following example brings together most of the topics covered in this chapter:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('public class KafkaTemplateTests {\n\n private static final String TEMPLATE_TOPIC = "templateTopic";\n\n @ClassRule\n public static EmbeddedKafkaRule embeddedKafka = new EmbeddedKafkaRule(1, true, TEMPLATE_TOPIC);\n\n @Test\n public void testTemplate() throws Exception {\n Map consumerProps = KafkaTestUtils.consumerProps("testT", "false",\n embeddedKafka.getEmbeddedKafka());\n DefaultKafkaConsumerFactory cf =\n new DefaultKafkaConsumerFactory(consumerProps);\n ContainerProperties containerProperties = new ContainerProperties(TEMPLATE_TOPIC);\n KafkaMessageListenerContainer container =\n new KafkaMessageListenerContainer<>(cf, containerProperties);\n final BlockingQueue> records = new LinkedBlockingQueue<>();\n container.setupMessageListener(new MessageListener() {\n\n @Override\n public void onMessage(ConsumerRecord record) {\n System.out.println(record);\n records.add(record);\n }\n\n });\n container.setBeanName("templateTests");\n container.start();\n ContainerTestUtils.waitForAssignment(container,\n embeddedKafka.getEmbeddedKafka().getPartitionsPerTopic());\n Map producerProps =\n KafkaTestUtils.producerProps(embeddedKafka.getEmbeddedKafka());\n ProducerFactory pf =\n new DefaultKafkaProducerFactory(producerProps);\n KafkaTemplate template = new KafkaTemplate<>(pf);\n template.setDefaultTopic(TEMPLATE_TOPIC);\n template.sendDefault("foo");\n assertThat(records.poll(10, TimeUnit.SECONDS), hasValue("foo"));\n template.sendDefault(0, 2, "bar");\n ConsumerRecord received = records.poll(10, TimeUnit.SECONDS);\n assertThat(received, hasKey(2));\n assertThat(received, hasPartition(0));\n assertThat(received, hasValue("bar"));\n template.send(TEMPLATE_TOPIC, 0, 2, "baz");\n received = records.poll(10, TimeUnit.SECONDS);\n assertThat(received, hasKey(2));\n assertThat(received, hasPartition(0));\n assertThat(received, hasValue("baz"));\n }\n\n}\n')])])]),a("p",[e._v("The preceding example uses the Hamcrest matchers.\nWith "),a("code",[e._v("AssertJ")]),e._v(", the final part looks like the following code:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('assertThat(records.poll(10, TimeUnit.SECONDS)).has(value("foo"));\ntemplate.sendDefault(0, 2, "bar");\nConsumerRecord received = records.poll(10, TimeUnit.SECONDS);\n// using individual assertions\nassertThat(received).has(key(2));\nassertThat(received).has(value("bar"));\nassertThat(received).has(partition(0));\ntemplate.send(TEMPLATE_TOPIC, 0, 2, "baz");\nreceived = records.poll(10, TimeUnit.SECONDS);\n// using allOf()\nassertThat(received).has(allOf(keyValue(2, "baz"), partition(0)));\n')])])]),a("h3",{attrs:{id:"_4-4-non-blocking-retries"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_4-4-non-blocking-retries"}},[e._v("#")]),e._v(" 4.4. Non-Blocking Retries")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("This is an experimental feature and the usual rule of no breaking API changes does not apply to this feature until the experimental designation is removed."),a("br"),e._v("Users are encouraged to try out the feature and provide feedback via GitHub Issues or GitHub discussions."),a("br"),e._v("This is regarding the API only; the feature is considered to be complete, and robust.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("p",[e._v("Achieving non-blocking retry / dlt functionality with Kafka usually requires setting up extra topics and creating and configuring the corresponding listeners.\nSince 2.7 Spring for Apache Kafka offers support for that via the "),a("code",[e._v("@RetryableTopic")]),e._v(" annotation and "),a("code",[e._v("RetryTopicConfiguration")]),e._v(" class to simplify that bootstrapping.")]),e._v(" "),a("h4",{attrs:{id:"_4-4-1-how-the-pattern-works"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_4-4-1-how-the-pattern-works"}},[e._v("#")]),e._v(" 4.4.1. How The Pattern Works")]),e._v(" "),a("p",[e._v("If message processing fails, the message is forwarded to a retry topic with a back off timestamp.\nThe retry topic consumer then checks the timestamp and if it’s not due it pauses the consumption for that topic’s partition.\nWhen it is due the partition consumption is resumed, and the message is consumed again.\nIf the message processing fails again the message will be forwarded to the next retry topic, and the pattern is repeated until a successful processing occurs, or the attempts are exhausted, and the message is sent to the Dead Letter Topic (if configured).")]),e._v(" "),a("p",[e._v('To illustrate, if you have a "main-topic" topic, and want to setup non-blocking retry with an exponential backoff of 1000ms with a multiplier of 2 and 4 max attempts, it will create the main-topic-retry-1000, main-topic-retry-2000, main-topic-retry-4000 and main-topic-dlt topics and configure the respective consumers.\nThe framework also takes care of creating the topics and setting up and configuring the listeners.')]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("By using this strategy you lose Kafka’s ordering guarantees for that topic.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("You can set the "),a("code",[e._v("AckMode")]),e._v(" mode you prefer, but "),a("code",[e._v("RECORD")]),e._v(" is suggested.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("At this time this functionality doesn’t support class level "),a("code",[e._v("@KafkaListener")]),e._v(" annotations")])])]),e._v(" "),a("tbody")]),e._v(" "),a("h4",{attrs:{id:"_4-4-2-back-off-delay-precision"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_4-4-2-back-off-delay-precision"}},[e._v("#")]),e._v(" 4.4.2. Back Off Delay Precision")]),e._v(" "),a("h5",{attrs:{id:"overview-and-guarantees"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#overview-and-guarantees"}},[e._v("#")]),e._v(" Overview and Guarantees")]),e._v(" "),a("p",[e._v("All message processing and backing off is handled by the consumer thread, and, as such, delay precision is guaranteed on a best-effort basis.\nIf one message’s processing takes longer than the next message’s back off period for that consumer, the next message’s delay will be higher than expected.\nAlso, for short delays (about 1s or less), the maintenance work the thread has to do, such as committing offsets, may delay the message processing execution.\nThe precision can also be affected if the retry topic’s consumer is handling more than one partition, because we rely on waking up the consumer from polling and having full pollTimeouts to make timing adjustments.")]),e._v(" "),a("p",[e._v("That being said, for consumers handling a single partition the message’s processing should happen under 100ms after it’s exact due time for most situations.")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("It is guaranteed that a message will never be processed before its due time.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("h5",{attrs:{id:"tuning-the-delay-precision"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#tuning-the-delay-precision"}},[e._v("#")]),e._v(" Tuning the Delay Precision")]),e._v(" "),a("p",[e._v("The message’s processing delay precision relies on two "),a("code",[e._v("ContainerProperties")]),e._v(": "),a("code",[e._v("ContainerProperties.pollTimeout")]),e._v(" and "),a("code",[e._v("ContainerProperties.idlePartitionEventInterval")]),e._v(".\nBoth properties will be automatically set in the retry topic and dlt’s "),a("code",[e._v("ListenerContainerFactory")]),e._v(" to one quarter of the smallest delay value for that topic, with a minimum value of 250ms and a maximum value of 5000ms.\nThese values will only be set if the property has its default values - if you change either value yourself your change will not be overridden.\nThis way you can tune the precision and performance for the retry topics if you need to.")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("You can have separate "),a("code",[e._v("ListenerContainerFactory")]),e._v(" instances for the main and retry topics - this way you can have different settings to better suit your needs, such as having a higher polling timeout setting for the main topics and a lower one for the retry topics.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("h4",{attrs:{id:"_4-4-3-configuration"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_4-4-3-configuration"}},[e._v("#")]),e._v(" 4.4.3. Configuration")]),e._v(" "),a("h5",{attrs:{id:"using-the-retryabletopic-annotation"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#using-the-retryabletopic-annotation"}},[e._v("#")]),e._v(" Using the "),a("code",[e._v("@RetryableTopic")]),e._v(" annotation")]),e._v(" "),a("p",[e._v("To configure the retry topic and dlt for a "),a("code",[e._v("@KafkaListener")]),e._v(" annotated method, you just have to add the "),a("code",[e._v("@RetryableTopic")]),e._v(" annotation to it and Spring for Apache Kafka will bootstrap all the necessary topics and consumers with the default configurations.")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@RetryableTopic(kafkaTemplate = "myRetryableTopicKafkaTemplate")\n@KafkaListener(topics = "my-annotated-topic", groupId = "myGroupId")\npublic void processMessage(MyPojo message) {\n // ... message processing\n}\n')])])]),a("p",[e._v("You can specify a method in the same class to process the dlt messages by annotating it with the "),a("code",[e._v("@DltHandler")]),e._v(" annotation.\nIf no DltHandler method is provided a default consumer is created which only logs the consumption.")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("@DltHandler\npublic void processMessage(MyPojo message) {\n// ... message processing, persistence, etc\n}\n")])])]),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("If you don’t specify a kafkaTemplate name a bean with name "),a("code",[e._v("retryTopicDefaultKafkaTemplate")]),e._v(" will be looked up."),a("br"),e._v("If no bean is found an exception is thrown.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("h5",{attrs:{id:"using-retrytopicconfiguration-beans"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#using-retrytopicconfiguration-beans"}},[e._v("#")]),e._v(" Using "),a("code",[e._v("RetryTopicConfiguration")]),e._v(" beans")]),e._v(" "),a("p",[e._v("You can also configure the non-blocking retry support by creating "),a("code",[e._v("RetryTopicConfiguration")]),e._v(" beans in a "),a("code",[e._v("@Configuration")]),e._v(" annotated class.")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("@Bean\npublic RetryTopicConfiguration myRetryTopic(KafkaTemplate template) {\n return RetryTopicConfigurationBuilder\n .newInstance()\n .create(template);\n}\n")])])]),a("p",[e._v("This will create retry topics and a dlt, as well as the corresponding consumers, for all topics in methods annotated with '@KafkaListener' using the default configurations. The "),a("code",[e._v("KafkaTemplate")]),e._v(" instance is required for message forwarding.")]),e._v(" "),a("p",[e._v("To achieve more fine-grained control over how to handle non-blocking retrials for each topic, more than one "),a("code",[e._v("RetryTopicConfiguration")]),e._v(" bean can be provided.")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@Bean\npublic RetryTopicConfiguration myRetryTopic(KafkaTemplate template) {\n return RetryTopicConfigurationBuilder\n .newInstance()\n .fixedBackoff(3000)\n .maxAttempts(5)\n .includeTopics("my-topic", "my-other-topic")\n .create(template);\n}\n\n@Bean\npublic RetryTopicConfiguration myOtherRetryTopic(KafkaTemplate template) {\n return RetryTopicConfigurationBuilder\n .newInstance()\n .exponentialBackoff(1000, 2, 5000)\n .maxAttempts(4)\n .excludeTopics("my-topic", "my-other-topic")\n .retryOn(MyException.class)\n .create(template);\n}\n')])])]),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("The retry topics' and dlt’s consumers will be assigned to a consumer group with a group id that is the combination of the one with you provide in the "),a("code",[e._v("groupId")]),e._v(" parameter of the "),a("code",[e._v("@KafkaListener")]),e._v(" annotation with the topic’s suffix. If you don’t provide any they’ll all belong to the same group, and rebalance on a retry topic will cause an unnecessary rebalance on the main topic.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("If the consumer is configured with an "),a("a",{attrs:{href:"#error-handling-deserializer"}},[a("code",[e._v("ErrorHandlingDeserializer")])]),e._v(", to handle deserilialization exceptions, it is important to configure the "),a("code",[e._v("KafkaTemplate")]),e._v(" and its producer with a serializer that can handle normal objects as well as raw "),a("code",[e._v("byte[]")]),e._v(" values, which result from deserialization exceptions."),a("br"),e._v("The generic value type of the template should be "),a("code",[e._v("Object")]),e._v("."),a("br"),e._v("One technique is to use the "),a("code",[e._v("DelegatingByTypeSerializer")]),e._v("; an example follows:")])])]),e._v(" "),a("tbody")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("@Bean\npublic ProducerFactory producerFactory() {\n return new DefaultKafkaProducerFactory<>(producerConfiguration(), new StringSerializer(),\n new DelegatingByTypeSerializer(Map.of(byte[].class, new ByteArraySerializer(),\n MyNormalObject.class, new JsonSerializer())));\n}\n\n@Bean\npublic KafkaTemplate kafkaTemplate() {\n return new KafkaTemplate<>(producerFactory());\n}\n")])])]),a("h4",{attrs:{id:"_4-4-4-features"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_4-4-4-features"}},[e._v("#")]),e._v(" 4.4.4. Features")]),e._v(" "),a("p",[e._v("Most of the features are available both for the "),a("code",[e._v("@RetryableTopic")]),e._v(" annotation and the "),a("code",[e._v("RetryTopicConfiguration")]),e._v(" beans.")]),e._v(" "),a("h5",{attrs:{id:"backoff-configuration"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#backoff-configuration"}},[e._v("#")]),e._v(" BackOff Configuration")]),e._v(" "),a("p",[e._v("The BackOff configuration relies on the "),a("code",[e._v("BackOffPolicy")]),e._v(" interface from the "),a("code",[e._v("Spring Retry")]),e._v(" project.")]),e._v(" "),a("p",[e._v("It includes:")]),e._v(" "),a("ul",[a("li",[a("p",[e._v("Fixed Back Off")])]),e._v(" "),a("li",[a("p",[e._v("Exponential Back Off")])]),e._v(" "),a("li",[a("p",[e._v("Random Exponential Back Off")])]),e._v(" "),a("li",[a("p",[e._v("Uniform Random Back Off")])]),e._v(" "),a("li",[a("p",[e._v("No Back Off")])]),e._v(" "),a("li",[a("p",[e._v("Custom Back Off")])])]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@RetryableTopic(attempts = 5,\n backoff = @Backoff(delay = 1000, multiplier = 2, maxDelay = 5000))\n@KafkaListener(topics = "my-annotated-topic")\npublic void processMessage(MyPojo message) {\n // ... message processing\n}\n')])])]),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("@Bean\npublic RetryTopicConfiguration myRetryTopic(KafkaTemplate template) {\n return RetryTopicConfigurationBuilder\n .newInstance()\n .fixedBackoff(3000)\n .maxAttempts(4)\n .build();\n}\n")])])]),a("p",[e._v("You can also provide a custom implementation of Spring Retry’s "),a("code",[e._v("SleepingBackOffPolicy")]),e._v(":")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("@Bean\npublic RetryTopicConfiguration myRetryTopic(KafkaTemplate template) {\n return RetryTopicConfigurationBuilder\n .newInstance()\n .customBackOff(new MyCustomBackOffPolicy())\n .maxAttempts(5)\n .build();\n}\n")])])]),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("The default backoff policy is FixedBackOffPolicy with a maximum of 3 attempts and 1000ms intervals.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("The first attempt counts against the maxAttempts, so if you provide a maxAttempts value of 4 there’ll be the original attempt plus 3 retries.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("h5",{attrs:{id:"single-topic-fixed-delay-retries"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#single-topic-fixed-delay-retries"}},[e._v("#")]),e._v(" Single Topic Fixed Delay Retries")]),e._v(" "),a("p",[e._v("If you’re using fixed delay policies such as "),a("code",[e._v("FixedBackOffPolicy")]),e._v(" or "),a("code",[e._v("NoBackOffPolicy")]),e._v(" you can use a single topic to accomplish the non-blocking retries.\nThis topic will be suffixed with the provided or default suffix, and will not have either the index or the delay values appended.")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@RetryableTopic(backoff = @Backoff(2000), fixedDelayTopicStrategy = FixedDelayStrategy.SINGLE_TOPIC)\n@KafkaListener(topics = "my-annotated-topic")\npublic void processMessage(MyPojo message) {\n // ... message processing\n}\n')])])]),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("@Bean\npublic RetryTopicConfiguration myRetryTopic(KafkaTemplate template) {\n return RetryTopicConfigurationBuilder\n .newInstance()\n .fixedBackoff(3000)\n .maxAttempts(5)\n .useSingleTopicForFixedDelays()\n .build();\n}\n")])])]),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("The default behavior is creating separate retry topics for each attempt, appended with their index value: retry-0, retry-1, …​")])])]),e._v(" "),a("tbody")]),e._v(" "),a("h5",{attrs:{id:"global-timeout"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#global-timeout"}},[e._v("#")]),e._v(" Global timeout")]),e._v(" "),a("p",[e._v("You can set the global timeout for the retrying process.\nIf that time is reached, the next time the consumer throws an exception the message goes straight to the DLT, or just ends the processing if no DLT is available.")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@RetryableTopic(backoff = @Backoff(2000), timeout = 5000)\n@KafkaListener(topics = "my-annotated-topic")\npublic void processMessage(MyPojo message) {\n // ... message processing\n}\n')])])]),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("@Bean\npublic RetryTopicConfiguration myRetryTopic(KafkaTemplate template) {\n return RetryTopicConfigurationBuilder\n .newInstance()\n .fixedBackoff(2000)\n .timeoutAfter(5000)\n .build();\n}\n")])])]),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("The default is having no timeout set, which can also be achieved by providing -1 as the timout value.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("h5",{attrs:{id:"exception-classifier"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#exception-classifier"}},[e._v("#")]),e._v(" Exception Classifier")]),e._v(" "),a("p",[e._v("You can specify which exceptions you want to retry on and which not to.\nYou can also set it to traverse the causes to lookup nested exceptions.")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@RetryableTopic(include = {MyRetryException.class, MyOtherRetryException.class}, traversingCauses = true)\n@KafkaListener(topics = "my-annotated-topic")\npublic void processMessage(MyPojo message) {\n throw new RuntimeException(new MyRetryException()); // Will retry\n}\n')])])]),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("@Bean\npublic RetryTopicConfiguration myRetryTopic(KafkaTemplate template) {\n return RetryTopicConfigurationBuilder\n .newInstance()\n .notRetryOn(MyDontRetryException.class)\n .create(template);\n}\n")])])]),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("The default behavior is retrying on all exceptions and not traversing causes.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("p",[e._v("Since 2.8.3 there’s a global list of fatal exceptions which will cause the record to be sent to the DLT without any retries.\nSee "),a("a",{attrs:{href:"#default-eh"}},[e._v("DefaultErrorHandler")]),e._v(" for the default list of fatal exceptions.\nYou can add or remove exceptions to and from this list with:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("@Bean(name = RetryTopicInternalBeanNames.DESTINATION_TOPIC_CONTAINER_NAME)\npublic DefaultDestinationTopicResolver topicResolver(ApplicationContext applicationContext,\n @Qualifier(RetryTopicInternalBeanNames\n .INTERNAL_BACKOFF_CLOCK_BEAN_NAME) Clock clock) {\n DefaultDestinationTopicResolver ddtr = new DefaultDestinationTopicResolver(clock, applicationContext);\n ddtr.addNotRetryableExceptions(MyFatalException.class);\n ddtr.removeNotRetryableException(ConversionException.class);\n return ddtr;\n}\n")])])]),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("To disable fatal exceptions' classification, clear the default list using the "),a("code",[e._v("setClassifications")]),e._v(" method in "),a("code",[e._v("DefaultDestinationTopicResolver")]),e._v(".")])])]),e._v(" "),a("tbody")]),e._v(" "),a("h5",{attrs:{id:"include-and-exclude-topics"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#include-and-exclude-topics"}},[e._v("#")]),e._v(" Include and Exclude Topics")]),e._v(" "),a("p",[e._v("You can decide which topics will and will not be handled by a "),a("code",[e._v("RetryTopicConfiguration")]),e._v(" bean via the .includeTopic(String topic), .includeTopics(Collection topics) .excludeTopic(String topic) and .excludeTopics(Collection topics) methods.")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@Bean\npublic RetryTopicConfiguration myRetryTopic(KafkaTemplate template) {\n return RetryTopicConfigurationBuilder\n .newInstance()\n .includeTopics(List.of("my-included-topic", "my-other-included-topic"))\n .create(template);\n}\n\n@Bean\npublic RetryTopicConfiguration myOtherRetryTopic(KafkaTemplate template) {\n return RetryTopicConfigurationBuilder\n .newInstance()\n .excludeTopic("my-excluded-topic")\n .create(template);\n}\n')])])]),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("The default behavior is to include all topics.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("h5",{attrs:{id:"topics-autocreation"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#topics-autocreation"}},[e._v("#")]),e._v(" Topics AutoCreation")]),e._v(" "),a("p",[e._v("Unless otherwise specified the framework will auto create the required topics using "),a("code",[e._v("NewTopic")]),e._v(" beans that are consumed by the "),a("code",[e._v("KafkaAdmin")]),e._v(" bean.\nYou can specify the number of partitions and the replication factor with which the topics will be created, and you can turn this feature off.")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("Note that if you’re not using Spring Boot you’ll have to provide a KafkaAdmin bean in order to use this feature.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@RetryableTopic(numPartitions = 2, replicationFactor = 3)\n@KafkaListener(topics = "my-annotated-topic")\npublic void processMessage(MyPojo message) {\n // ... message processing\n}\n\n@RetryableTopic(autoCreateTopics = false)\n@KafkaListener(topics = "my-annotated-topic")\npublic void processMessage(MyPojo message) {\n // ... message processing\n}\n')])])]),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("@Bean\npublic RetryTopicConfiguration myRetryTopic(KafkaTemplate template) {\n return RetryTopicConfigurationBuilder\n .newInstance()\n .autoCreateTopicsWith(2, 3)\n .create(template);\n}\n\n@Bean\npublic RetryTopicConfiguration myOtherRetryTopic(KafkaTemplate template) {\n return RetryTopicConfigurationBuilder\n .newInstance()\n .doNotAutoCreateRetryTopics()\n .create(template);\n}\n")])])]),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("By default the topics are autocreated with one partition and a replication factor of one.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("h5",{attrs:{id:"failure-header-management"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#failure-header-management"}},[e._v("#")]),e._v(" Failure Header Management")]),e._v(" "),a("p",[e._v("When considering how to manage failure headers (original headers and exception headers), the framework delegates to the "),a("code",[e._v("DeadLetterPublishingRecover")]),e._v(" to decide whether to append or replace the headers.")]),e._v(" "),a("p",[e._v("By default, it explicitly sets "),a("code",[e._v("appendOriginalHeaders")]),e._v(" to "),a("code",[e._v("false")]),e._v(" and leaves "),a("code",[e._v("stripPreviousExceptionHeaders")]),e._v(" to the default used by the "),a("code",[e._v("DeadLetterPublishingRecover")]),e._v(".")]),e._v(" "),a("p",[e._v('This means that only the first "original" and last exception headers are retained with the default configuration.\nThis is to avoid creation of excessively large messages (due to the stack trace header, for example) when many retry steps are involved.')]),e._v(" "),a("p",[e._v("See "),a("a",{attrs:{href:"#dlpr-headers"}},[e._v("Managing Dead Letter Record Headers")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("To reconfigure the framework to use different settings for these properties, replace the standard "),a("code",[e._v("DeadLetterPublishingRecovererFactory")]),e._v(" bean by adding a "),a("code",[e._v("recovererCustomizer")]),e._v(":")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("@Bean(RetryTopicInternalBeanNames.DEAD_LETTER_PUBLISHING_RECOVERER_FACTORY_BEAN_NAME)\nDeadLetterPublishingRecovererFactory factory(DestinationTopicResolver resolver) {\n DeadLetterPublishingRecovererFactory factory = new DeadLetterPublishingRecovererFactory(resolver);\n factory.setDeadLetterPublishingRecovererCustomizer(dlpr -> {\n dlpr.appendOriginalHeaders(true);\n dlpr.setStripPreviousExceptionHeaders(false);\n });\n return factory;\n}\n")])])]),a("h4",{attrs:{id:"_4-4-5-topic-naming"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_4-4-5-topic-naming"}},[e._v("#")]),e._v(" 4.4.5. Topic Naming")]),e._v(" "),a("p",[e._v("Retry topics and DLT are named by suffixing the main topic with a provided or default value, appended by either the delay or index for that topic.")]),e._v(" "),a("p",[e._v("Examples:")]),e._v(" "),a("p",[e._v('"my-topic" → "my-topic-retry-0", "my-topic-retry-1", …​, "my-topic-dlt"')]),e._v(" "),a("p",[e._v('"my-other-topic" → "my-topic-myRetrySuffix-1000", "my-topic-myRetrySuffix-2000", …​, "my-topic-myDltSuffix".')]),e._v(" "),a("h5",{attrs:{id:"retry-topics-and-dlt-suffixes"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#retry-topics-and-dlt-suffixes"}},[e._v("#")]),e._v(" Retry Topics and Dlt Suffixes")]),e._v(" "),a("p",[e._v("You can specify the suffixes that will be used by the retry and dlt topics.")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@RetryableTopic(retryTopicSuffix = "-my-retry-suffix", dltTopicSuffix = "-my-dlt-suffix")\n@KafkaListener(topics = "my-annotated-topic")\npublic void processMessage(MyPojo message) {\n // ... message processing\n}\n')])])]),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@Bean\npublic RetryTopicConfiguration myRetryTopic(KafkaTemplate template) {\n return RetryTopicConfigurationBuilder\n .newInstance()\n .retryTopicSuffix("-my-retry-suffix")\n .dltTopicSuffix("-my-dlt-suffix")\n .create(template);\n}\n')])])]),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v('The default suffixes are "-retry" and "-dlt", for retry topics and dlt respectively.')])])]),e._v(" "),a("tbody")]),e._v(" "),a("h5",{attrs:{id:"appending-the-topic-s-index-or-delay"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#appending-the-topic-s-index-or-delay"}},[e._v("#")]),e._v(" Appending the Topic’s Index or Delay")]),e._v(" "),a("p",[e._v("You can either append the topic’s index or delay values after the suffix.")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@RetryableTopic(topicSuffixingStrategy = TopicSuffixingStrategy.SUFFIX_WITH_INDEX_VALUE)\n@KafkaListener(topics = "my-annotated-topic")\npublic void processMessage(MyPojo message) {\n // ... message processing\n}\n')])])]),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("@Bean\npublic RetryTopicConfiguration myRetryTopic(KafkaTemplate template) {\n return RetryTopicConfigurationBuilder\n .newInstance()\n .suffixTopicsWithIndexValues()\n .create(template);\n }\n")])])]),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("The default behavior is to suffix with the delay values, except for fixed delay configurations with multiple topics, in which case the topics are suffixed with the topic’s index.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("h5",{attrs:{id:"custom-naming-strategies"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#custom-naming-strategies"}},[e._v("#")]),e._v(" Custom naming strategies")]),e._v(" "),a("p",[e._v("More complex naming strategies can be accomplished by registering a bean that implements "),a("code",[e._v("RetryTopicNamesProviderFactory")]),e._v(". The default implementation is "),a("code",[e._v("SuffixingRetryTopicNamesProviderFactory")]),e._v(" and a different implementation can be registered in the following way:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("@Bean\npublic RetryTopicNamesProviderFactory myRetryNamingProviderFactory() {\n return new CustomRetryTopicNamesProviderFactory();\n}\n")])])]),a("p",[e._v("As an example the following implementation, in addition to the standard suffix, adds a prefix to retry/dl topics names:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('public class CustomRetryTopicNamesProviderFactory implements RetryTopicNamesProviderFactory {\n\n\t@Override\n public RetryTopicNamesProvider createRetryTopicNamesProvider(\n DestinationTopic.Properties properties) {\n\n if(properties.isMainEndpoint()) {\n return new SuffixingRetryTopicNamesProvider(properties);\n }\n else {\n return new SuffixingRetryTopicNamesProvider(properties) {\n\n @Override\n public String getTopicName(String topic) {\n return "my-prefix-" + super.getTopicName(topic);\n }\n\n };\n }\n }\n\n}\n')])])]),a("h4",{attrs:{id:"_4-4-6-dlt-strategies"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_4-4-6-dlt-strategies"}},[e._v("#")]),e._v(" 4.4.6. Dlt Strategies")]),e._v(" "),a("p",[e._v("The framework provides a few strategies for working with DLTs. You can provide a method for DLT processing, use the default logging method, or have no DLT at all. Also you can choose what happens if DLT processing fails.")]),e._v(" "),a("h5",{attrs:{id:"dlt-processing-method"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#dlt-processing-method"}},[e._v("#")]),e._v(" Dlt Processing Method")]),e._v(" "),a("p",[e._v("You can specify the method used to process the Dlt for the topic, as well as the behavior if that processing fails.")]),e._v(" "),a("p",[e._v("To do that you can use the "),a("code",[e._v("@DltHandler")]),e._v(" annotation in a method of the class with the "),a("code",[e._v("@RetryableTopic")]),e._v(" annotation(s).\nNote that the same method will be used for all the "),a("code",[e._v("@RetryableTopic")]),e._v(" annotated methods within that class.")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@RetryableTopic\n@KafkaListener(topics = "my-annotated-topic")\npublic void processMessage(MyPojo message) {\n // ... message processing\n}\n\n@DltHandler\npublic void processMessage(MyPojo message) {\n// ... message processing, persistence, etc\n}\n')])])]),a("p",[e._v("The DLT handler method can also be provided through the RetryTopicConfigurationBuilder.dltHandlerMethod(String, String) method, passing as arguments the bean name and method name that should process the DLT’s messages.")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@Bean\npublic RetryTopicConfiguration myRetryTopic(KafkaTemplate template) {\n return RetryTopicConfigurationBuilder\n .newInstance()\n .dltProcessor("myCustomDltProcessor", "processDltMessage")\n .create(template);\n}\n\n@Component\npublic class MyCustomDltProcessor {\n\n private final MyDependency myDependency;\n\n public MyCustomDltProcessor(MyDependency myDependency) {\n this.myDependency = myDependency;\n }\n\n public void processDltMessage(MyPojo message) {\n // ... message processing, persistence, etc\n }\n}\n')])])]),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("If no DLT handler is provided, the default RetryTopicConfigurer.LoggingDltListenerHandlerMethod is used.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("p",[e._v("Starting with version 2.8, if you don’t want to consume from the DLT in this application at all, including by the default handler (or you wish to defer consumption), you can control whether or not the DLT container starts, independent of the container factory’s "),a("code",[e._v("autoStartup")]),e._v(" property.")]),e._v(" "),a("p",[e._v("When using the "),a("code",[e._v("@RetryableTopic")]),e._v(" annotation, set the "),a("code",[e._v("autoStartDltHandler")]),e._v(" property to "),a("code",[e._v("false")]),e._v("; when using the configuration builder, use "),a("code",[e._v(".autoStartDltHandler(false)")]),e._v(" .")]),e._v(" "),a("p",[e._v("You can later start the DLT handler via the "),a("code",[e._v("KafkaListenerEndpointRegistry")]),e._v(".")]),e._v(" "),a("h5",{attrs:{id:"dlt-failure-behavior"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#dlt-failure-behavior"}},[e._v("#")]),e._v(" DLT Failure Behavior")]),e._v(" "),a("p",[e._v("Should the DLT processing fail, there are two possible behaviors available: "),a("code",[e._v("ALWAYS_RETRY_ON_ERROR")]),e._v(" and "),a("code",[e._v("FAIL_ON_ERROR")]),e._v(".")]),e._v(" "),a("p",[e._v("In the former the record is forwarded back to the DLT topic so it doesn’t block other DLT records' processing.\nIn the latter the consumer ends the execution without forwarding the message.")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@RetryableTopic(dltProcessingFailureStrategy =\n\t\t\tDltStrategy.FAIL_ON_ERROR)\n@KafkaListener(topics = "my-annotated-topic")\npublic void processMessage(MyPojo message) {\n // ... message processing\n}\n')])])]),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@Bean\npublic RetryTopicConfiguration myRetryTopic(KafkaTemplate template) {\n return RetryTopicConfigurationBuilder\n .newInstance()\n .dltProcessor(MyCustomDltProcessor.class, "processDltMessage")\n .doNotRetryOnDltFailure()\n .create(template);\n}\n')])])]),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("The default behavior is to "),a("code",[e._v("ALWAYS_RETRY_ON_ERROR")]),e._v(".")])])]),e._v(" "),a("tbody")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("Starting with version 2.8.3, "),a("code",[e._v("ALWAYS_RETRY_ON_ERROR")]),e._v(" will NOT route a record back to the DLT if the record causes a fatal exception to be thrown,"),a("br"),e._v("such as a "),a("code",[e._v("DeserializationException")]),e._v(" because, generally, such exceptions will always be thrown.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("p",[e._v("Exceptions that are considered fatal are:")]),e._v(" "),a("ul",[a("li",[a("p",[a("code",[e._v("DeserializationException")])])]),e._v(" "),a("li",[a("p",[a("code",[e._v("MessageConversionException")])])]),e._v(" "),a("li",[a("p",[a("code",[e._v("ConversionException")])])]),e._v(" "),a("li",[a("p",[a("code",[e._v("MethodArgumentResolutionException")])])]),e._v(" "),a("li",[a("p",[a("code",[e._v("NoSuchMethodException")])])]),e._v(" "),a("li",[a("p",[a("code",[e._v("ClassCastException")])])])]),e._v(" "),a("p",[e._v("You can add exceptions to and remove exceptions from this list using methods on the "),a("code",[e._v("DestinationTopicResolver")]),e._v(" bean.")]),e._v(" "),a("p",[e._v("See "),a("a",{attrs:{href:"#retry-topic-ex-classifier"}},[e._v("Exception Classifier")]),e._v(" for more information.")]),e._v(" "),a("h5",{attrs:{id:"configuring-no-dlt"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#configuring-no-dlt"}},[e._v("#")]),e._v(" Configuring No DLT")]),e._v(" "),a("p",[e._v("The framework also provides the possibility of not configuring a DLT for the topic.\nIn this case after retrials are exhausted the processing simply ends.")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@RetryableTopic(dltProcessingFailureStrategy =\n\t\t\tDltStrategy.NO_DLT)\n@KafkaListener(topics = "my-annotated-topic")\npublic void processMessage(MyPojo message) {\n // ... message processing\n}\n')])])]),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("@Bean\npublic RetryTopicConfiguration myRetryTopic(KafkaTemplate template) {\n return RetryTopicConfigurationBuilder\n .newInstance()\n .doNotConfigureDlt()\n .create(template);\n}\n")])])]),a("h4",{attrs:{id:"_4-4-7-specifying-a-listenercontainerfactory"}},[a("a",{staticClass:"header-anchor",attrs:{href:"#_4-4-7-specifying-a-listenercontainerfactory"}},[e._v("#")]),e._v(" 4.4.7. Specifying a ListenerContainerFactory")]),e._v(" "),a("p",[e._v("By default the RetryTopic configuration will use the provided factory from the "),a("code",[e._v("@KafkaListener")]),e._v(" annotation, but you can specify a different one to be used to create the retry topic and dlt listener containers.")]),e._v(" "),a("p",[e._v("For the "),a("code",[e._v("@RetryableTopic")]),e._v(" annotation you can provide the factory’s bean name, and using the "),a("code",[e._v("RetryTopicConfiguration")]),e._v(" bean you can either provide the bean name or the instance itself.")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@RetryableTopic(listenerContainerFactory = "my-retry-topic-factory")\n@KafkaListener(topics = "my-annotated-topic")\npublic void processMessage(MyPojo message) {\n // ... message processing\n}\n')])])]),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@Bean\npublic RetryTopicConfiguration myRetryTopic(KafkaTemplate template,\n ConcurrentKafkaListenerContainerFactory factory) {\n\n return RetryTopicConfigurationBuilder\n .newInstance()\n .listenerFactory(factory)\n .create(template);\n}\n\n@Bean\npublic RetryTopicConfiguration myOtherRetryTopic(KafkaTemplate template) {\n return RetryTopicConfigurationBuilder\n .newInstance()\n .listenerFactory("my-retry-topic-factory")\n .create(template);\n}\n')])])]),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("Since 2.8.3 you can use the same factory for retryable and non-retryable topics.")])])]),e._v(" "),a("tbody")]),e._v(" "),a("p",[e._v("If you need to revert the factory configuration behavior to prior 2.8.3, you can replace the standard "),a("code",[e._v("RetryTopicConfigurer")]),e._v(" bean and set "),a("code",[e._v("useLegacyFactoryConfigurer")]),e._v(" to "),a("code",[e._v("true")]),e._v(", such as:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("@Bean(name = RetryTopicInternalBeanNames.RETRY_TOPIC_CONFIGURER)\npublic RetryTopicConfigurer retryTopicConfigurer(DestinationTopicProcessor destinationTopicProcessor,\n ListenerContainerFactoryResolver containerFactoryResolver,\n ListenerContainerFactoryConfigurer listenerContainerFactoryConfigurer,\n BeanFactory beanFactory,\n RetryTopicNamesProviderFactory retryTopicNamesProviderFactory) {\n RetryTopicConfigurer retryTopicConfigurer = new RetryTopicConfigurer(destinationTopicProcessor, containerFactoryResolver, listenerContainerFactoryConfigurer, beanFactory, retryTopicNamesProviderFactory);\n retryTopicConfigurer.useLegacyFactoryConfigurer(true);\n return retryTopicConfigurer;\n}\n")])])]),a("p",[e._v("==== Changing KafkaBackOffException Logging Level")]),e._v(" "),a("p",[e._v("When a message in the retry topic is not due for consumption, a "),a("code",[e._v("KafkaBackOffException")]),e._v(" is thrown. Such exceptions are logged by default at "),a("code",[e._v("DEBUG")]),e._v(" level, but you can change this behavior by setting an error handler customizer in the "),a("code",[e._v("ListenerContainerFactoryConfigurer")]),e._v(" in a "),a("code",[e._v("@Configuration")]),e._v(" class.")]),e._v(" "),a("p",[e._v("For example, to change the logging level to WARN you might add:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("@Bean(name = RetryTopicInternalBeanNames.LISTENER_CONTAINER_FACTORY_CONFIGURER_NAME)\npublic ListenerContainerFactoryConfigurer listenerContainer(KafkaConsumerBackoffManager kafkaConsumerBackoffManager,\n DeadLetterPublishingRecovererFactory deadLetterPublishingRecovererFactory,\n @Qualifier(RetryTopicInternalBeanNames\n .INTERNAL_BACKOFF_CLOCK_BEAN_NAME) Clock clock) {\n ListenerContainerFactoryConfigurer configurer = new ListenerContainerFactoryConfigurer(kafkaConsumerBackoffManager, deadLetterPublishingRecovererFactory, clock);\n configurer.setErrorHandlerCustomizer(commonErrorHandler -> ((DefaultErrorHandler) commonErrorHandler).setLogLevel(KafkaException.Level.WARN));\n return configurer;\n}\n")])])]),a("p",[e._v("== Tips, Tricks and Examples")]),e._v(" "),a("p",[e._v("=== Manually Assigning All Partitions")]),e._v(" "),a("p",[e._v("Let’s say you want to always read all records from all partitions (such as when using a compacted topic to load a distributed cache), it can be useful to manually assign the partitions and not use Kafka’s group management.\nDoing so can be unwieldy when there are many partitions, because you have to list the partitions.\nIt’s also an issue if the number of partitions changes over time, because you would have to recompile your application each time the partition count changes.")]),e._v(" "),a("p",[e._v("The following is an example of how to use the power of a SpEL expression to create the partition list dynamically when the application starts:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@KafkaListener(topicPartitions = @TopicPartition(topic = "compacted",\n partitions = "#{@finder.partitions(\'compacted\')}"),\n partitionOffsets = @PartitionOffset(partition = "*", initialOffset = "0")))\npublic void listen(@Header(KafkaHeaders.RECEIVED_MESSAGE_KEY) String key, String payload) {\n ...\n}\n\n@Bean\npublic PartitionFinder finder(ConsumerFactory consumerFactory) {\n return new PartitionFinder(consumerFactory);\n}\n\npublic static class PartitionFinder {\n\n private final ConsumerFactory consumerFactory;\n\n public PartitionFinder(ConsumerFactory consumerFactory) {\n this.consumerFactory = consumerFactory;\n }\n\n public String[] partitions(String topic) {\n try (Consumer consumer = consumerFactory.createConsumer()) {\n return consumer.partitionsFor(topic).stream()\n .map(pi -> "" + pi.partition())\n .toArray(String[]::new);\n }\n }\n\n}\n')])])]),a("p",[e._v("Using this in conjunction with "),a("code",[e._v("ConsumerConfig.AUTO_OFFSET_RESET_CONFIG=earliest")]),e._v(" will load all records each time the application is started.\nYou should also set the container’s "),a("code",[e._v("AckMode")]),e._v(" to "),a("code",[e._v("MANUAL")]),e._v(" to prevent the container from committing offsets for a "),a("code",[e._v("null")]),e._v(" consumer group.\nHowewever, starting with version 2.5.5, as shown above, you can apply an initial offset to all partitions; see "),a("a",{attrs:{href:"#manual-assignment"}},[e._v("Explicit Partition Assignment")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("=== Examples of Kafka Transactions with Other Transaction Managers")]),e._v(" "),a("p",[e._v("The following Spring Boot application is an example of chaining database and Kafka transactions.\nThe listener container starts the Kafka transaction and the "),a("code",[e._v("@Transactional")]),e._v(" annotation starts the DB transaction.\nThe DB transaction is committed first; if the Kafka transaction fails to commit, the record will be redelivered so the DB update should be idempotent.")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@SpringBootApplication\npublic class Application {\n\n public static void main(String[] args) {\n SpringApplication.run(Application.class, args);\n }\n\n @Bean\n public ApplicationRunner runner(KafkaTemplate template) {\n return args -> template.executeInTransaction(t -> t.send("topic1", "test"));\n }\n\n @Bean\n public DataSourceTransactionManager dstm(DataSource dataSource) {\n return new DataSourceTransactionManager(dataSource);\n }\n\n @Component\n public static class Listener {\n\n private final JdbcTemplate jdbcTemplate;\n\n private final KafkaTemplate kafkaTemplate;\n\n public Listener(JdbcTemplate jdbcTemplate, KafkaTemplate kafkaTemplate) {\n this.jdbcTemplate = jdbcTemplate;\n this.kafkaTemplate = kafkaTemplate;\n }\n\n @KafkaListener(id = "group1", topics = "topic1")\n @Transactional("dstm")\n public void listen1(String in) {\n this.kafkaTemplate.send("topic2", in.toUpperCase());\n this.jdbcTemplate.execute("insert into mytable (data) values (\'" + in + "\')");\n }\n\n @KafkaListener(id = "group2", topics = "topic2")\n public void listen2(String in) {\n System.out.println(in);\n }\n\n }\n\n @Bean\n public NewTopic topic1() {\n return TopicBuilder.name("topic1").build();\n }\n\n @Bean\n public NewTopic topic2() {\n return TopicBuilder.name("topic2").build();\n }\n\n}\n')])])]),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("spring.datasource.url=jdbc:mysql://localhost/integration?serverTimezone=UTC\nspring.datasource.username=root\nspring.datasource.driver-class-name=com.mysql.cj.jdbc.Driver\n\nspring.kafka.consumer.auto-offset-reset=earliest\nspring.kafka.consumer.enable-auto-commit=false\nspring.kafka.consumer.properties.isolation.level=read_committed\n\nspring.kafka.producer.transaction-id-prefix=tx-\n\n#logging.level.org.springframework.transaction=trace\n#logging.level.org.springframework.kafka.transaction=debug\n#logging.level.org.springframework.jdbc=debug\n")])])]),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("create table mytable (data varchar(20));\n")])])]),a("p",[e._v("For producer-only transactions, transaction synchronization works:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@Transactional("dstm")\npublic void someMethod(String in) {\n this.kafkaTemplate.send("topic2", in.toUpperCase());\n this.jdbcTemplate.execute("insert into mytable (data) values (\'" + in + "\')");\n}\n')])])]),a("p",[e._v("The "),a("code",[e._v("KafkaTemplate")]),e._v(" will synchronize its transaction with the DB transaction and the commit/rollback occurs after the database.")]),e._v(" "),a("p",[e._v("If you wish to commit the Kafka transaction first, and only commit the DB transaction if the Kafka transaction is successful, use nested "),a("code",[e._v("@Transactional")]),e._v(" methods:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v('@Transactional("dstm")\npublic void someMethod(String in) {\n this.jdbcTemplate.execute("insert into mytable (data) values (\'" + in + "\')");\n sendToKafka(in);\n}\n\n@Transactional("kafkaTransactionManager")\npublic void sendToKafka(String in) {\n this.kafkaTemplate.send("topic2", in.toUpperCase());\n}\n')])])]),a("p",[e._v("=== Customizing the JsonSerializer and JsonDeserializer")]),e._v(" "),a("p",[e._v("The serializer and deserializer support a number of cusomizations using properties, see "),a("a",{attrs:{href:"#json-serde"}},[e._v("JSON")]),e._v(" for more information.\nThe "),a("code",[e._v("kafka-clients")]),e._v(" code, not Spring, instantiates these objects, unless you inject them directly into the consumer and producer factories.\nIf you wish to configure the (de)serializer using properties, but wish to use, say, a custom "),a("code",[e._v("ObjectMapper")]),e._v(", simply create a subclass and pass the custom mapper into the "),a("code",[e._v("super")]),e._v(" constructor. For example:")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("public class CustomJsonSerializer extends JsonSerializer {\n\n public CustomJsonSerializer() {\n super(customizedObjectMapper());\n }\n\n private static ObjectMapper customizedObjectMapper() {\n ObjectMapper mapper = JacksonUtils.enhancedObjectMapper();\n mapper.disable(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS);\n return mapper;\n }\n\n}\n")])])]),a("p",[e._v("== Other Resources")]),e._v(" "),a("p",[e._v("In addition to this reference documentation, we recommend a number of other resources that may help you learn about Spring and Apache Kafka.")]),e._v(" "),a("ul",[a("li",[a("p",[a("a",{attrs:{href:"https://kafka.apache.org/",target:"_blank",rel:"noopener noreferrer"}},[e._v("Apache Kafka Project Home Page"),a("OutboundLink")],1)])]),e._v(" "),a("li",[a("p",[a("a",{attrs:{href:"https://projects.spring.io/spring-kafka/",target:"_blank",rel:"noopener noreferrer"}},[e._v("Spring for Apache Kafka Home Page"),a("OutboundLink")],1)])]),e._v(" "),a("li",[a("p",[a("a",{attrs:{href:"https://github.com/spring-projects/spring-kafka",target:"_blank",rel:"noopener noreferrer"}},[e._v("Spring for Apache Kafka GitHub Repository"),a("OutboundLink")],1)])]),e._v(" "),a("li",[a("p",[a("a",{attrs:{href:"https://github.com/spring-projects/spring-integration",target:"_blank",rel:"noopener noreferrer"}},[e._v("Spring Integration GitHub Repository (Apache Kafka Module)"),a("OutboundLink")],1)])])]),e._v(" "),a("p",[e._v("== Override Spring Boot Dependencies")]),e._v(" "),a("p",[e._v("When using Spring for Apache Kafka in a Spring Boot application, the Apache Kafka dependency versions are determined by Spring Boot’s dependency management.\nIf you wish to use a different version of "),a("code",[e._v("kafka-clients")]),e._v(" or "),a("code",[e._v("kafka-streams")]),e._v(", and use the embedded kafka broker for testing, you need to override their version used by Spring Boot dependency management and add two "),a("code",[e._v("test")]),e._v(" artifacts for Apache Kafka.")]),e._v(" "),a("table",[a("thead",[a("tr",[a("th"),e._v(" "),a("th",[e._v("There is a bug in Apache Kafka 3.0.0 when running the embedded broker on Microsoft Windows "),a("a",{attrs:{href:"https://issues.apache.org/jira/browse/KAFKA-13391",target:"_blank",rel:"noopener noreferrer"}},[e._v("KAFKA-13391"),a("OutboundLink")],1),e._v("."),a("br"),e._v("To use the embedded broker on Windows, you need to downgrade the Apache Kafka version to 2.8.1 until 3.0.1 is available."),a("br"),e._v("When using 2.8.1, you also need to exclude "),a("code",[e._v("zookeeper")]),e._v(" dependency from "),a("code",[e._v("spring-kafka-test")]),e._v(".")])])]),e._v(" "),a("tbody")]),e._v(" "),a("p",[e._v("Maven")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("\n 2.8.1\n\n\n\n org.springframework.kafka\n spring-kafka\n\n\x3c!-- optional - only needed when using kafka-streams --\x3e\n\n org.apache.kafka\n kafka-streams\n\n\n\n org.springframework.kafka\n spring-kafka-test\n test\n \x3c!-- needed if downgrading to Apache Kafka 2.8.1 --\x3e\n \n \n org.apache.zookeeper\n zookeeper\n \n \n\n\n\n org.apache.kafka\n kafka-clients\n test\n test\n ${kafka.version}\n\n\n\n org.apache.kafka\n kafka_2.13\n test\n test\n ${kafka.version}\n\n")])])]),a("p",[e._v("Gradle")]),e._v(" "),a("div",{staticClass:"language- extra-class"},[a("pre",{pre:!0,attrs:{class:"language-text"}},[a("code",[e._v("ext['kafka.version'] = '2.8.1'\n\ndependencies {\n implementation 'org.springframework.kafka:spring-kafka'\n implementation \"org.apache.kafka:kafka-streams\" // optional - only needed when using kafka-streams\n testImplementation ('org.springframework.kafka:spring-kafka-test') {\n // needed if downgrading to Apache Kafka 2.8.1\n exclude group: 'org.apache.zookeeper', module: 'zookeeper'\n }\n testImplementation \"org.apache.kafka:kafka-clients:${kafka.version}:test\"\n testImplementation \"org.apache.kafka:kafka_2.13:${kafka.version}:test\"\n}\n")])])]),a("p",[e._v("The test scope dependencies are only needed if you are using the embedded Kafka broker in tests.")]),e._v(" "),a("p",[e._v("== Change History")]),e._v(" "),a("p",[e._v("=== Changes between 2.6 and 2.7")]),e._v(" "),a("p",[e._v("==== Kafka Client Version")]),e._v(" "),a("p",[e._v("This version requires the 2.7.0 "),a("code",[e._v("kafka-clients")]),e._v(".\nIt is also compatible with the 2.8.0 clients, since version 2.7.1; see "),a("a",{attrs:{href:"#update-deps"}},[e._v("[update-deps]")]),e._v(".")]),e._v(" "),a("p",[e._v("==== Non-Blocking Delayed Retries Using Topics")]),e._v(" "),a("p",[e._v("This significant new feature is added in this release.\nWhen strict ordering is not important, failed deliveries can be sent to another topic to be consumed later.\nA series of such retry topics can be configured, with increasing delays.\nSee "),a("a",{attrs:{href:"#retry-topic"}},[e._v("Non-Blocking Retries")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("==== Listener Container Changes")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("onlyLogRecordMetadata")]),e._v(" container property is now "),a("code",[e._v("true")]),e._v(" by default.")]),e._v(" "),a("p",[e._v("A new container property "),a("code",[e._v("stopImmediate")]),e._v(" is now available.")]),e._v(" "),a("p",[e._v("See "),a("a",{attrs:{href:"#container-props"}},[e._v("Listener Container Properties")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("Error handlers that use a "),a("code",[e._v("BackOff")]),e._v(" between delivery attempts (e.g. "),a("code",[e._v("SeekToCurrentErrorHandler")]),e._v(" and "),a("code",[e._v("DefaultAfterRollbackProcessor")]),e._v(") will now exit the back off interval soon after the container is stopped, rather than delaying the stop.\nSee "),a("a",{attrs:{href:"#after-rollback"}},[e._v("After-rollback Processor")]),e._v(" and "),a("a",{attrs:{href:"#seek-to-current"}},[e._v("[seek-to-current]")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("Error handlers and after rollback processors that extend "),a("code",[e._v("FailedRecordProcessor")]),e._v(" can now be configured with one or more "),a("code",[e._v("RetryListener")]),e._v(" s to receive information about retry and recovery progress.")]),e._v(" "),a("p",[e._v("See See "),a("a",{attrs:{href:"#after-rollback"}},[e._v("After-rollback Processor")]),e._v(", "),a("a",{attrs:{href:"#seek-to-current"}},[e._v("[seek-to-current]")]),e._v(", and "),a("a",{attrs:{href:"#recovering-batch-eh"}},[e._v("[recovering-batch-eh]")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("RecordInterceptor")]),e._v(" now has additional methods called after the listener returns (normally, or by throwing an exception).\nIt also has a sub-interface "),a("code",[e._v("ConsumerAwareRecordInterceptor")]),e._v(".\nIn addition, there is now a "),a("code",[e._v("BatchInterceptor")]),e._v(" for batch listeners.\nSee "),a("a",{attrs:{href:"#message-listener-container"}},[e._v("Message Listener Containers")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("==== "),a("code",[e._v("@KafkaListener")]),e._v(" Changes")]),e._v(" "),a("p",[e._v("You can now validate the payload parameter of "),a("code",[e._v("@KafkaHandler")]),e._v(" methods (class-level listeners).\nSee "),a("a",{attrs:{href:"#kafka-validation"}},[a("code",[e._v("@KafkaListener")]),e._v(" "),a("code",[e._v("@Payload")]),e._v(" Validation")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("You can now set the "),a("code",[e._v("rawRecordHeader")]),e._v(" property on the "),a("code",[e._v("MessagingMessageConverter")]),e._v(" and "),a("code",[e._v("BatchMessagingMessageConverter")]),e._v(" which causes the raw "),a("code",[e._v("ConsumerRecord")]),e._v(" to be added to the converted "),a("code",[e._v("Message")]),e._v(".\nThis is useful, for example, if you wish to use a "),a("code",[e._v("DeadLetterPublishingRecoverer")]),e._v(" in a listener error handler.\nSee "),a("a",{attrs:{href:"#listener-error-handlers"}},[e._v("Listener Error Handlers")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("You can now modify "),a("code",[e._v("@KafkaListener")]),e._v(" annotations during application initialization.\nSee "),a("a",{attrs:{href:"#kafkalistener-attrs"}},[a("code",[e._v("@KafkaListener")]),e._v(" Attribute Modification")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("==== "),a("code",[e._v("DeadLetterPublishingRecover")]),e._v(" Changes")]),e._v(" "),a("p",[e._v("Now, if both the key and value fail deserialization, the original values are published to the DLT.\nPreviously, the value was populated but the key "),a("code",[e._v("DeserializationException")]),e._v(" remained in the headers.\nThere is a breaking API change, if you subclassed the recoverer and overrode the "),a("code",[e._v("createProducerRecord")]),e._v(" method.")]),e._v(" "),a("p",[e._v("In addition, the recoverer verifies that the partition selected by the destination resolver actually exists before publishing to it.")]),e._v(" "),a("p",[e._v("See "),a("a",{attrs:{href:"#dead-letters"}},[e._v("Publishing Dead-letter Records")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("==== "),a("code",[e._v("ChainedKafkaTransactionManager")]),e._v(" is Deprecated")]),e._v(" "),a("p",[e._v("See "),a("a",{attrs:{href:"#transactions"}},[e._v("Transactions")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("==== "),a("code",[e._v("ReplyingKafkaTemplate")]),e._v(" Changes")]),e._v(" "),a("p",[e._v("There is now a mechanism to examine a reply and fail the future exceptionally if some condition exists.")]),e._v(" "),a("p",[e._v("Support for sending and receiving "),a("code",[e._v("spring-messaging")]),e._v(" "),a("code",[e._v("Message")]),e._v(" s has been added.")]),e._v(" "),a("p",[e._v("See "),a("a",{attrs:{href:"#replying-template"}},[e._v("Using "),a("code",[e._v("ReplyingKafkaTemplate")])]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("==== Kafka Streams Changes")]),e._v(" "),a("p",[e._v("By default, the "),a("code",[e._v("StreamsBuilderFactoryBean")]),e._v(" is now configured to not clean up local state.\nSee "),a("a",{attrs:{href:"#streams-config"}},[e._v("Configuration")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("==== "),a("code",[e._v("KafkaAdmin")]),e._v(" Changes")]),e._v(" "),a("p",[e._v("New methods "),a("code",[e._v("createOrModifyTopics")]),e._v(" and "),a("code",[e._v("describeTopics")]),e._v(" have been added."),a("code",[e._v("KafkaAdmin.NewTopics")]),e._v(" has been added to facilitate configuring multiple topics in a single bean.\nSee "),a("a",{attrs:{href:"#configuring-topics"}},[e._v("Configuring Topics")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("==== "),a("code",[e._v("MessageConverter")]),e._v(" Changes")]),e._v(" "),a("p",[e._v("It is now possible to add a "),a("code",[e._v("spring-messaging")]),e._v(" "),a("code",[e._v("SmartMessageConverter")]),e._v(" to the "),a("code",[e._v("MessagingMessageConverter")]),e._v(", allowing content negotiation based on the "),a("code",[e._v("contentType")]),e._v(" header.\nSee "),a("a",{attrs:{href:"#messaging-message-conversion"}},[e._v("Spring Messaging Message Conversion")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("==== Sequencing "),a("code",[e._v("@KafkaListener")]),e._v(" s")]),e._v(" "),a("p",[e._v("See "),a("a",{attrs:{href:"#sequencing"}},[e._v("Starting "),a("code",[e._v("@KafkaListener")]),e._v(" s in Sequence")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("==== "),a("code",[e._v("ExponentialBackOffWithMaxRetries")])]),e._v(" "),a("p",[e._v("A new "),a("code",[e._v("BackOff")]),e._v(" implementation is provided, making it more convenient to configure the max retries.\nSee "),a("a",{attrs:{href:"#exp-backoff"}},[a("code",[e._v("ExponentialBackOffWithMaxRetries")]),e._v(" Implementation")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("==== Conditional Delegating Error Handlers")]),e._v(" "),a("p",[e._v("These new error handlers can be configured to delegate to different error handlers, depending on the exception type.\nSee "),a("a",{attrs:{href:"#cond-eh"}},[e._v("Delegating Error Handler")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("=== Changes between 2.5 and 2.6")]),e._v(" "),a("p",[e._v("==== Kafka Client Version")]),e._v(" "),a("p",[e._v("This version requires the 2.6.0 "),a("code",[e._v("kafka-clients")]),e._v(".")]),e._v(" "),a("p",[e._v("==== Listener Container Changes")]),e._v(" "),a("p",[e._v("The default "),a("code",[e._v("EOSMode")]),e._v(" is now "),a("code",[e._v("BETA")]),e._v(".\nSee "),a("a",{attrs:{href:"#exactly-once"}},[e._v("Exactly Once Semantics")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("Various error handlers (that extend "),a("code",[e._v("FailedRecordProcessor")]),e._v(") and the "),a("code",[e._v("DefaultAfterRollbackProcessor")]),e._v(" now reset the "),a("code",[e._v("BackOff")]),e._v(" if recovery fails.\nIn addition, you can now select the "),a("code",[e._v("BackOff")]),e._v(" to use based on the failed record and/or exception.\nSee "),a("a",{attrs:{href:"#seek-to-current"}},[e._v("[seek-to-current]")]),e._v(", "),a("a",{attrs:{href:"#recovering-batch-eh"}},[e._v("[recovering-batch-eh]")]),e._v(", "),a("a",{attrs:{href:"#dead-letters"}},[e._v("Publishing Dead-letter Records")]),e._v(" and "),a("a",{attrs:{href:"#after-rollback"}},[e._v("After-rollback Processor")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("You can now configure an "),a("code",[e._v("adviceChain")]),e._v(" in the container properties.\nSee "),a("a",{attrs:{href:"#container-props"}},[e._v("Listener Container Properties")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("When the container is configured to publish "),a("code",[e._v("ListenerContainerIdleEvent")]),e._v(" s, it now publishes a "),a("code",[e._v("ListenerContainerNoLongerIdleEvent")]),e._v(" when a record is received after publishing an idle event.\nSee "),a("a",{attrs:{href:"#events"}},[e._v("Application Events")]),e._v(" and "),a("a",{attrs:{href:"#idle-containers"}},[e._v("Detecting Idle and Non-Responsive Consumers")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("==== @KafkaListener Changes")]),e._v(" "),a("p",[e._v("When using manual partition assignment, you can now specify a wildcard for determining which partitions should be reset to the initial offset.\nIn addition, if the listener implements "),a("code",[e._v("ConsumerSeekAware")]),e._v(", "),a("code",[e._v("onPartitionsAssigned()")]),e._v(" is called after the manual assignment.\n(Also added in version 2.5.5).\nSee "),a("a",{attrs:{href:"#manual-assignment"}},[e._v("Explicit Partition Assignment")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("Convenience methods have been added to "),a("code",[e._v("AbstractConsumerSeekAware")]),e._v(" to make seeking easier.\nSee "),a("a",{attrs:{href:"#seek"}},[e._v("Seeking to a Specific Offset")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("==== ErrorHandler Changes")]),e._v(" "),a("p",[e._v("Subclasses of "),a("code",[e._v("FailedRecordProcessor")]),e._v(" (e.g. "),a("code",[e._v("SeekToCurrentErrorHandler")]),e._v(", "),a("code",[e._v("DefaultAfterRollbackProcessor")]),e._v(", "),a("code",[e._v("RecoveringBatchErrorHandler")]),e._v(") can now be configured to reset the retry state if the exception is a different type to that which occurred previously with this record.\nSee "),a("a",{attrs:{href:"#seek-to-current"}},[e._v("[seek-to-current]")]),e._v(", "),a("a",{attrs:{href:"#after-rollback"}},[e._v("After-rollback Processor")]),e._v(", "),a("a",{attrs:{href:"#recovering-batch-eh"}},[e._v("[recovering-batch-eh]")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("==== Producer Factory Changes")]),e._v(" "),a("p",[e._v("You can now set a maximum age for producers after which they will be closed and recreated.\nSee "),a("a",{attrs:{href:"#transactions"}},[e._v("Transactions")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("You can now update the configuration map after the "),a("code",[e._v("DefaultKafkaProducerFactory")]),e._v(" has been created.\nThis might be useful, for example, if you have to update SSL key/trust store locations after a credentials change.\nSee "),a("a",{attrs:{href:"#producer-factory"}},[e._v("Using "),a("code",[e._v("DefaultKafkaProducerFactory")])]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("=== Changes between 2.4 and 2.5")]),e._v(" "),a("p",[e._v("This section covers the changes made from version 2.4 to version 2.5.\nFor changes in earlier version, see "),a("a",{attrs:{href:"#history"}},[e._v("[history]")]),e._v(".")]),e._v(" "),a("p",[e._v("==== Consumer/Producer Factory Changes")]),e._v(" "),a("p",[e._v("The default consumer and producer factories can now invoke a callback whenever a consumer or producer is created or closed.\nImplementations for native Micrometer metrics are provided.\nSee "),a("a",{attrs:{href:"#factory-listeners"}},[e._v("Factory Listeners")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("You can now change bootstrap server properties at runtime, enabling failover to another Kafka cluster.\nSee "),a("a",{attrs:{href:"#connecting"}},[e._v("Connecting to Kafka")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("==== "),a("code",[e._v("StreamsBuilderFactoryBean")]),e._v(" Changes")]),e._v(" "),a("p",[e._v("The factory bean can now invoke a callback whenever a "),a("code",[e._v("KafkaStreams")]),e._v(" created or destroyed.\nAn Implementation for native Micrometer metrics is provided.\nSee "),a("a",{attrs:{href:"#streams-micrometer"}},[e._v("KafkaStreams Micrometer Support")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("==== Kafka Client Version")]),e._v(" "),a("p",[e._v("This version requires the 2.5.0 "),a("code",[e._v("kafka-clients")]),e._v(".")]),e._v(" "),a("p",[e._v("==== Class/Package Changes")]),e._v(" "),a("p",[a("code",[e._v("SeekUtils")]),e._v(" has been moved from the "),a("code",[e._v("o.s.k.support")]),e._v(" package to "),a("code",[e._v("o.s.k.listener")]),e._v(".")]),e._v(" "),a("p",[e._v("==== Delivery Attempts Header")]),e._v(" "),a("p",[e._v("There is now an option to to add a header which tracks delivery attempts when using certain error handlers and after rollback processors.\nSee "),a("a",{attrs:{href:"#delivery-header"}},[e._v("Delivery Attempts Header")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("==== @KafkaListener Changes")]),e._v(" "),a("p",[e._v("Default reply headers will now be populated automatically if needed when a "),a("code",[e._v("@KafkaListener")]),e._v(" return type is "),a("code",[e._v("Message")]),e._v(".\nSee "),a("a",{attrs:{href:"#reply-message"}},[e._v("Reply Type Message")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("KafkaHeaders.RECEIVED_MESSAGE_KEY")]),e._v(" is no longer populated with a "),a("code",[e._v("null")]),e._v(" value when the incoming record has a "),a("code",[e._v("null")]),e._v(" key; the header is omitted altogether.")]),e._v(" "),a("p",[a("code",[e._v("@KafkaListener")]),e._v(" methods can now specify a "),a("code",[e._v("ConsumerRecordMetadata")]),e._v(" parameter instead of using discrete headers for metadata such as topic, partition, etc.\nSee "),a("a",{attrs:{href:"#consumer-record-metadata"}},[e._v("Consumer Record Metadata")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("==== Listener Container Changes")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("assignmentCommitOption")]),e._v(" container property is now "),a("code",[e._v("LATEST_ONLY_NO_TX")]),e._v(" by default.\nSee "),a("a",{attrs:{href:"#container-props"}},[e._v("Listener Container Properties")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("subBatchPerPartition")]),e._v(" container property is now "),a("code",[e._v("true")]),e._v(" by default when using transactions.\nSee "),a("a",{attrs:{href:"#transactions"}},[e._v("Transactions")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("A new "),a("code",[e._v("RecoveringBatchErrorHandler")]),e._v(" is now provided.\nSee "),a("a",{attrs:{href:"#recovering-batch-eh"}},[e._v("[recovering-batch-eh]")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("Static group membership is now supported.\nSee "),a("a",{attrs:{href:"#message-listener-container"}},[e._v("Message Listener Containers")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("When incremental/cooperative rebalancing is configured, if offsets fail to commit with a non-fatal "),a("code",[e._v("RebalanceInProgressException")]),e._v(", the container will attempt to re-commit the offsets for the partitions that remain assigned to this instance after the rebalance is completed.")]),e._v(" "),a("p",[e._v("The default error handler is now the "),a("code",[e._v("SeekToCurrentErrorHandler")]),e._v(" for record listeners and "),a("code",[e._v("RecoveringBatchErrorHandler")]),e._v(" for batch listeners.\nSee "),a("a",{attrs:{href:"#error-handlers"}},[e._v("Container Error Handlers")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("You can now control the level at which exceptions intentionally thrown by standard error handlers are logged.\nSee "),a("a",{attrs:{href:"#error-handlers"}},[e._v("Container Error Handlers")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("getAssignmentsByClientId()")]),e._v(" method has been added, making it easier to determine which consumers in a concurrent container are assigned which partition(s).\nSee "),a("a",{attrs:{href:"#container-props"}},[e._v("Listener Container Properties")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("You can now suppress logging entire "),a("code",[e._v("ConsumerRecord")]),e._v(" s in error, debug logs etc.\nSee "),a("code",[e._v("onlyLogRecordMetadata")]),e._v(" in "),a("a",{attrs:{href:"#container-props"}},[e._v("Listener Container Properties")]),e._v(".")]),e._v(" "),a("p",[e._v("==== KafkaTemplate Changes")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("KafkaTemplate")]),e._v(" can now maintain micrometer timers.\nSee "),a("a",{attrs:{href:"#micrometer"}},[e._v("Monitoring")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("KafkaTemplate")]),e._v(" can now be configured with "),a("code",[e._v("ProducerConfig")]),e._v(" properties to override those in the producer factory.\nSee "),a("a",{attrs:{href:"#kafka-template"}},[e._v("Using "),a("code",[e._v("KafkaTemplate")])]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("A "),a("code",[e._v("RoutingKafkaTemplate")]),e._v(" has now been provided.\nSee "),a("a",{attrs:{href:"#routing-template"}},[e._v("Using "),a("code",[e._v("RoutingKafkaTemplate")])]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("You can now use "),a("code",[e._v("KafkaSendCallback")]),e._v(" instead of "),a("code",[e._v("ListenerFutureCallback")]),e._v(" to get a narrower exception, making it easier to extract the failed "),a("code",[e._v("ProducerRecord")]),e._v(".\nSee "),a("a",{attrs:{href:"#kafka-template"}},[e._v("Using "),a("code",[e._v("KafkaTemplate")])]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("==== Kafka String Serializer/Deserializer")]),e._v(" "),a("p",[e._v("New "),a("code",[e._v("ToStringSerializer")]),e._v("/"),a("code",[e._v("StringDeserializer")]),e._v(" s as well as an associated "),a("code",[e._v("SerDe")]),e._v(" are now provided.\nSee "),a("a",{attrs:{href:"#string-serde"}},[e._v("String serialization")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("==== JsonDeserializer")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("JsonDeserializer")]),e._v(" now has more flexibility to determine the deserialization type.\nSee "),a("a",{attrs:{href:"#serdes-type-methods"}},[e._v("Using Methods to Determine Types")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("==== Delegating Serializer/Deserializer")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("DelegatingSerializer")]),e._v(' can now handle "standard" types, when the outbound record has no header.\nSee '),a("a",{attrs:{href:"#delegating-serialization"}},[e._v("Delegating Serializer and Deserializer")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("==== Testing Changes")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("KafkaTestUtils.consumerProps()")]),e._v(" helper record now sets "),a("code",[e._v("ConsumerConfig.AUTO_OFFSET_RESET_CONFIG")]),e._v(" to "),a("code",[e._v("earliest")]),e._v(" by default.\nSee "),a("a",{attrs:{href:"#junit"}},[e._v("JUnit")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("=== Changes between 2.3 and 2.4")]),e._v(" "),a("p",[e._v("==== Kafka Client Version")]),e._v(" "),a("p",[e._v("This version requires the 2.4.0 "),a("code",[e._v("kafka-clients")]),e._v(" or higher and supports the new incremental rebalancing feature.")]),e._v(" "),a("p",[e._v("==== ConsumerAwareRebalanceListener")]),e._v(" "),a("p",[e._v("Like "),a("code",[e._v("ConsumerRebalanceListener")]),e._v(", this interface now has an additional method "),a("code",[e._v("onPartitionsLost")]),e._v(".\nRefer to the Apache Kafka documentation for more information.")]),e._v(" "),a("p",[e._v("Unlike the "),a("code",[e._v("ConsumerRebalanceListener")]),e._v(", The default implementation does "),a("strong",[e._v("not")]),e._v(" call "),a("code",[e._v("onPartitionsRevoked")]),e._v(".\nInstead, the listener container will call that method after it has called "),a("code",[e._v("onPartitionsLost")]),e._v("; you should not, therefore, do the same when implementing "),a("code",[e._v("ConsumerAwareRebalanceListener")]),e._v(".")]),e._v(" "),a("p",[e._v("See the IMPORTANT note at the end of "),a("a",{attrs:{href:"#rebalance-listeners"}},[e._v("Rebalancing Listeners")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("==== GenericErrorHandler")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("isAckAfterHandle()")]),e._v(" default implementation now returns true by default.")]),e._v(" "),a("p",[e._v("==== KafkaTemplate")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("KafkaTemplate")]),e._v(" now supports non-transactional publishing alongside transactional.\nSee "),a("a",{attrs:{href:"#tx-template-mixed"}},[a("code",[e._v("KafkaTemplate")]),e._v(" Transactional and non-Transactional Publishing")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("==== AggregatingReplyingKafkaTemplate")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("releaseStrategy")]),e._v(" is now a "),a("code",[e._v("BiConsumer")]),e._v(".\nIt is now called after a timeout (as well as when records arrive); the second parameter is "),a("code",[e._v("true")]),e._v(" in the case of a call after a timeout.")]),e._v(" "),a("p",[e._v("See "),a("a",{attrs:{href:"#aggregating-request-reply"}},[e._v("Aggregating Multiple Replies")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("==== Listener Container")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("ContainerProperties")]),e._v(" provides an "),a("code",[e._v("authorizationExceptionRetryInterval")]),e._v(" option to let the listener container to retry after any "),a("code",[e._v("AuthorizationException")]),e._v(" is thrown by the "),a("code",[e._v("KafkaConsumer")]),e._v(".\nSee its JavaDocs and "),a("a",{attrs:{href:"#kafka-container"}},[e._v("Using "),a("code",[e._v("KafkaMessageListenerContainer")])]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("==== @KafkaListener")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("@KafkaListener")]),e._v(" annotation has a new property "),a("code",[e._v("splitIterables")]),e._v("; default true.\nWhen a replying listener returns an "),a("code",[e._v("Iterable")]),e._v(" this property controls whether the return result is sent as a single record or a record for each element is sent.\nSee "),a("a",{attrs:{href:"#annotation-send-to"}},[e._v("Forwarding Listener Results using "),a("code",[e._v("@SendTo")])]),e._v(" for more information")]),e._v(" "),a("p",[e._v("Batch listeners can now be configured with a "),a("code",[e._v("BatchToRecordAdapter")]),e._v("; this allows, for example, the batch to be processed in a transaction while the listener gets one record at a time.\nWith the default implementation, a "),a("code",[e._v("ConsumerRecordRecoverer")]),e._v(" can be used to handle errors within the batch, without stopping the processing of the entire batch - this might be useful when using transactions.\nSee "),a("a",{attrs:{href:"#transactions-batch"}},[e._v("Transactions with Batch Listeners")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("==== Kafka Streams")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("StreamsBuilderFactoryBean")]),e._v(" accepts a new property "),a("code",[e._v("KafkaStreamsInfrastructureCustomizer")]),e._v(".\nThis allows configuration of the builder and/or topology before the stream is created.\nSee "),a("a",{attrs:{href:"#streams-spring"}},[e._v("Spring Management")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("=== Changes Between 2.2 and 2.3")]),e._v(" "),a("p",[e._v("This section covers the changes made from version 2.2 to version 2.3.")]),e._v(" "),a("p",[e._v("==== Tips, Tricks and Examples")]),e._v(" "),a("p",[e._v("A new chapter "),a("a",{attrs:{href:"#tips-n-tricks"}},[e._v("[tips-n-tricks]")]),e._v(" has been added.\nPlease submit GitHub issues and/or pull requests for additional entries in that chapter.")]),e._v(" "),a("p",[e._v("==== Kafka Client Version")]),e._v(" "),a("p",[e._v("This version requires the 2.3.0 "),a("code",[e._v("kafka-clients")]),e._v(" or higher.")]),e._v(" "),a("p",[e._v("==== Class/Package Changes")]),e._v(" "),a("p",[a("code",[e._v("TopicPartitionInitialOffset")]),e._v(" is deprecated in favor of "),a("code",[e._v("TopicPartitionOffset")]),e._v(".")]),e._v(" "),a("p",[e._v("==== Configuration Changes")]),e._v(" "),a("p",[e._v("Starting with version 2.3.4, the "),a("code",[e._v("missingTopicsFatal")]),e._v(" container property is false by default.\nWhen this is true, the application fails to start if the broker is down; many users were affected by this change; given that Kafka is a high-availability platform, we did not anticipate that starting an application with no active brokers would be a common use case.")]),e._v(" "),a("p",[e._v("==== Producer and Consumer Factory Changes")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("DefaultKafkaProducerFactory")]),e._v(" can now be configured to create a producer per thread.\nYou can also provide "),a("code",[e._v("Supplier")]),e._v(" instances in the constructor as an alternative to either configured classes (which require no-arg constructors), or constructing with "),a("code",[e._v("Serializer")]),e._v(" instances, which are then shared between all Producers.\nSee "),a("a",{attrs:{href:"#producer-factory"}},[e._v("Using "),a("code",[e._v("DefaultKafkaProducerFactory")])]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("The same option is available with "),a("code",[e._v("Supplier")]),e._v(" instances in "),a("code",[e._v("DefaultKafkaConsumerFactory")]),e._v(".\nSee "),a("a",{attrs:{href:"#kafka-container"}},[e._v("Using "),a("code",[e._v("KafkaMessageListenerContainer")])]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("==== Listener Container Changes")]),e._v(" "),a("p",[e._v("Previously, error handlers received "),a("code",[e._v("ListenerExecutionFailedException")]),e._v(" (with the actual listener exception as the "),a("code",[e._v("cause")]),e._v(") when the listener was invoked using a listener adapter (such as "),a("code",[e._v("@KafkaListener")]),e._v(" s).\nExceptions thrown by native "),a("code",[e._v("GenericMessageListener")]),e._v(" s were passed to the error handler unchanged.\nNow a "),a("code",[e._v("ListenerExecutionFailedException")]),e._v(" is always the argument (with the actual listener exception as the "),a("code",[e._v("cause")]),e._v("), which provides access to the container’s "),a("code",[e._v("group.id")]),e._v(" property.")]),e._v(" "),a("p",[e._v("Because the listener container has it’s own mechanism for committing offsets, it prefers the Kafka "),a("code",[e._v("ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG")]),e._v(" to be "),a("code",[e._v("false")]),e._v(".\nIt now sets it to false automatically unless specifically set in the consumer factory or the container’s consumer property overrides.")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("ackOnError")]),e._v(" property is now "),a("code",[e._v("false")]),e._v(" by default.\nSee "),a("a",{attrs:{href:"#seek-to-current"}},[e._v("[seek-to-current]")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("It is now possible to obtain the consumer’s "),a("code",[e._v("group.id")]),e._v(" property in the listener method.\nSee "),a("a",{attrs:{href:"#listener-group-id"}},[e._v("Obtaining the Consumer "),a("code",[e._v("group.id")])]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("The container has a new property "),a("code",[e._v("recordInterceptor")]),e._v(" allowing records to be inspected or modified before invoking the listener.\nA "),a("code",[e._v("CompositeRecordInterceptor")]),e._v(" is also provided in case you need to invoke multiple interceptors.\nSee "),a("a",{attrs:{href:"#message-listener-container"}},[e._v("Message Listener Containers")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("ConsumerSeekAware")]),e._v(" has new methods allowing you to perform seeks relative to the beginning, end, or current position and to seek to the first offset greater than or equal to a time stamp.\nSee "),a("a",{attrs:{href:"#seek"}},[e._v("Seeking to a Specific Offset")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("A convenience class "),a("code",[e._v("AbstractConsumerSeekAware")]),e._v(" is now provided to simplify seeking.\nSee "),a("a",{attrs:{href:"#seek"}},[e._v("Seeking to a Specific Offset")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("ContainerProperties")]),e._v(" provides an "),a("code",[e._v("idleBetweenPolls")]),e._v(" option to let the main loop in the listener container to sleep between "),a("code",[e._v("KafkaConsumer.poll()")]),e._v(" calls.\nSee its JavaDocs and "),a("a",{attrs:{href:"#kafka-container"}},[e._v("Using "),a("code",[e._v("KafkaMessageListenerContainer")])]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("When using "),a("code",[e._v("AckMode.MANUAL")]),e._v(" (or "),a("code",[e._v("MANUAL_IMMEDIATE")]),e._v(") you can now cause a redelivery by calling "),a("code",[e._v("nack")]),e._v(" on the "),a("code",[e._v("Acknowledgment")]),e._v(".\nSee "),a("a",{attrs:{href:"#committing-offsets"}},[e._v("Committing Offsets")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("Listener performance can now be monitored using Micrometer "),a("code",[e._v("Timer")]),e._v(" s.\nSee "),a("a",{attrs:{href:"#micrometer"}},[e._v("Monitoring")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("The containers now publish additional consumer lifecycle events relating to startup.\nSee "),a("a",{attrs:{href:"#events"}},[e._v("Application Events")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("Transactional batch listeners can now support zombie fencing.\nSee "),a("a",{attrs:{href:"#transactions"}},[e._v("Transactions")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("The listener container factory can now be configured with a "),a("code",[e._v("ContainerCustomizer")]),e._v(" to further configure each container after it has been created and configured.\nSee "),a("a",{attrs:{href:"#container-factory"}},[e._v("Container factory")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("==== ErrorHandler Changes")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("SeekToCurrentErrorHandler")]),e._v(" now treats certain exceptions as fatal and disables retry for those, invoking the recoverer on first failure.")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("SeekToCurrentErrorHandler")]),e._v(" and "),a("code",[e._v("SeekToCurrentBatchErrorHandler")]),e._v(" can now be configured to apply a "),a("code",[e._v("BackOff")]),e._v(" (thread sleep) between delivery attempts.")]),e._v(" "),a("p",[e._v("Starting with version 2.3.2, recovered records' offsets will be committed when the error handler returns after recovering a failed record.")]),e._v(" "),a("p",[e._v("See "),a("a",{attrs:{href:"#seek-to-current"}},[e._v("[seek-to-current]")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("DeadLetterPublishingRecoverer")]),e._v(", when used in conjunction with an "),a("code",[e._v("ErrorHandlingDeserializer")]),e._v(", now sets the payload of the message sent to the dead-letter topic, to the original value that could not be deserialized.\nPreviously, it was "),a("code",[e._v("null")]),e._v(" and user code needed to extract the "),a("code",[e._v("DeserializationException")]),e._v(" from the message headers.\nSee "),a("a",{attrs:{href:"#dead-letters"}},[e._v("Publishing Dead-letter Records")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("==== TopicBuilder")]),e._v(" "),a("p",[e._v("A new class "),a("code",[e._v("TopicBuilder")]),e._v(" is provided for more convenient creation of "),a("code",[e._v("NewTopic")]),e._v(" "),a("code",[e._v("@Bean")]),e._v(" s for automatic topic provisioning.\nSee "),a("a",{attrs:{href:"#configuring-topics"}},[e._v("Configuring Topics")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("==== Kafka Streams Changes")]),e._v(" "),a("p",[e._v("You can now perform additional configuration of the "),a("code",[e._v("StreamsBuilderFactoryBean")]),e._v(" created by "),a("code",[e._v("@EnableKafkaStreams")]),e._v(".\nSee "),a("a",{attrs:{href:"#streams-config"}},[e._v("Streams Configuration")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("A "),a("code",[e._v("RecoveringDeserializationExceptionHandler")]),e._v(" is now provided which allows records with deserialization errors to be recovered.\nIt can be used in conjunction with a "),a("code",[e._v("DeadLetterPublishingRecoverer")]),e._v(" to send these records to a dead-letter topic.\nSee "),a("a",{attrs:{href:"#streams-deser-recovery"}},[e._v("Recovery from Deserialization Exceptions")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("HeaderEnricher")]),e._v(" transformer has been provided, using SpEL to generate the header values.\nSee "),a("a",{attrs:{href:"#streams-header-enricher"}},[e._v("Header Enricher")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("MessagingTransformer")]),e._v(" has been provided.\nThis allows a Kafka streams topology to interact with a spring-messaging component, such as a Spring Integration flow.\nSee "),a("a",{attrs:{href:"#streams-messaging"}},[a("code",[e._v("MessagingTransformer")])]),e._v(" and See ["),a("a",{attrs:{href:"https://docs.spring.io/spring-integration/docs/current/reference/html/kafka.html#streams-integration",target:"_blank",rel:"noopener noreferrer"}},[e._v("Calling a Spring Integration Flow from a "),a("code",[e._v("KStream")]),a("OutboundLink")],1),e._v("] for more information.")]),e._v(" "),a("p",[e._v("==== JSON Component Changes")]),e._v(" "),a("p",[e._v("Now all the JSON-aware components are configured by default with a Jackson "),a("code",[e._v("ObjectMapper")]),e._v(" produced by the "),a("code",[e._v("JacksonUtils.enhancedObjectMapper()")]),e._v(".\nThe "),a("code",[e._v("JsonDeserializer")]),e._v(" now provides "),a("code",[e._v("TypeReference")]),e._v("-based constructors for better handling of target generic container types.\nAlso a "),a("code",[e._v("JacksonMimeTypeModule")]),e._v(" has been introduced for serialization of "),a("code",[e._v("org.springframework.util.MimeType")]),e._v(" to plain string.\nSee its JavaDocs and "),a("a",{attrs:{href:"#serdes"}},[e._v("Serialization, Deserialization, and Message Conversion")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("A "),a("code",[e._v("ByteArrayJsonMessageConverter")]),e._v(" has been provided as well as a new super class for all Json converters, "),a("code",[e._v("JsonMessageConverter")]),e._v(".\nAlso, a "),a("code",[e._v("StringOrBytesSerializer")]),e._v(" is now available; it can serialize "),a("code",[e._v("byte[]")]),e._v(", "),a("code",[e._v("Bytes")]),e._v(" and "),a("code",[e._v("String")]),e._v(" values in "),a("code",[e._v("ProducerRecord")]),e._v(" s.\nSee "),a("a",{attrs:{href:"#messaging-message-conversion"}},[e._v("Spring Messaging Message Conversion")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("JsonSerializer")]),e._v(", "),a("code",[e._v("JsonDeserializer")]),e._v(" and "),a("code",[e._v("JsonSerde")]),e._v(" now have fluent APIs to make programmatic configuration simpler.\nSee the javadocs, "),a("a",{attrs:{href:"#serdes"}},[e._v("Serialization, Deserialization, and Message Conversion")]),e._v(", and "),a("a",{attrs:{href:"#serde"}},[e._v("Streams JSON Serialization and Deserialization")]),e._v(" for more informaion.")]),e._v(" "),a("p",[e._v("==== ReplyingKafkaTemplate")]),e._v(" "),a("p",[e._v("When a reply times out, the future is completed exceptionally with a "),a("code",[e._v("KafkaReplyTimeoutException")]),e._v(" instead of a "),a("code",[e._v("KafkaException")]),e._v(".")]),e._v(" "),a("p",[e._v("Also, an overloaded "),a("code",[e._v("sendAndReceive")]),e._v(" method is now provided that allows specifying the reply timeout on a per message basis.")]),e._v(" "),a("p",[e._v("==== AggregatingReplyingKafkaTemplate")]),e._v(" "),a("p",[e._v("Extends the "),a("code",[e._v("ReplyingKafkaTemplate")]),e._v(" by aggregating replies from multiple receivers.\nSee "),a("a",{attrs:{href:"#aggregating-request-reply"}},[e._v("Aggregating Multiple Replies")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("==== Transaction Changes")]),e._v(" "),a("p",[e._v("You can now override the producer factory’s "),a("code",[e._v("transactionIdPrefix")]),e._v(" on the "),a("code",[e._v("KafkaTemplate")]),e._v(" and "),a("code",[e._v("KafkaTransactionManager")]),e._v(".\nSee "),a("a",{attrs:{href:"#transaction-id-prefix"}},[a("code",[e._v("transactionIdPrefix")])]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("==== New Delegating Serializer/Deserializer")]),e._v(" "),a("p",[e._v("The framework now provides a delegating "),a("code",[e._v("Serializer")]),e._v(" and "),a("code",[e._v("Deserializer")]),e._v(", utilizing a header to enable producing and consuming records with multiple key/value types.\nSee "),a("a",{attrs:{href:"#delegating-serialization"}},[e._v("Delegating Serializer and Deserializer")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("==== New Retrying Deserializer")]),e._v(" "),a("p",[e._v("The framework now provides a delegating "),a("code",[e._v("RetryingDeserializer")]),e._v(", to retry serialization when transient errors such as network problems might occur.\nSee "),a("a",{attrs:{href:"#retrying-deserialization"}},[e._v("Retrying Deserializer")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("=== Changes Between 2.1 and 2.2")]),e._v(" "),a("p",[e._v("==== Kafka Client Version")]),e._v(" "),a("p",[e._v("This version requires the 2.0.0 "),a("code",[e._v("kafka-clients")]),e._v(" or higher.")]),e._v(" "),a("p",[e._v("==== Class and Package Changes")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("ContainerProperties")]),e._v(" class has been moved from "),a("code",[e._v("org.springframework.kafka.listener.config")]),e._v(" to "),a("code",[e._v("org.springframework.kafka.listener")]),e._v(".")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("AckMode")]),e._v(" enum has been moved from "),a("code",[e._v("AbstractMessageListenerContainer")]),e._v(" to "),a("code",[e._v("ContainerProperties")]),e._v(".")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("setBatchErrorHandler()")]),e._v(" and "),a("code",[e._v("setErrorHandler()")]),e._v(" methods have been moved from "),a("code",[e._v("ContainerProperties")]),e._v(" to both "),a("code",[e._v("AbstractMessageListenerContainer")]),e._v(" and "),a("code",[e._v("AbstractKafkaListenerContainerFactory")]),e._v(".")]),e._v(" "),a("p",[e._v("==== After Rollback Processing")]),e._v(" "),a("p",[e._v("A new "),a("code",[e._v("AfterRollbackProcessor")]),e._v(" strategy is provided.\nSee "),a("a",{attrs:{href:"#after-rollback"}},[e._v("After-rollback Processor")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("==== "),a("code",[e._v("ConcurrentKafkaListenerContainerFactory")]),e._v(" Changes")]),e._v(" "),a("p",[e._v("You can now use the "),a("code",[e._v("ConcurrentKafkaListenerContainerFactory")]),e._v(" to create and configure any "),a("code",[e._v("ConcurrentMessageListenerContainer")]),e._v(", not only those for "),a("code",[e._v("@KafkaListener")]),e._v(" annotations.\nSee "),a("a",{attrs:{href:"#container-factory"}},[e._v("Container factory")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("==== Listener Container Changes")]),e._v(" "),a("p",[e._v("A new container property ("),a("code",[e._v("missingTopicsFatal")]),e._v(") has been added.\nSee "),a("a",{attrs:{href:"#kafka-container"}},[e._v("Using "),a("code",[e._v("KafkaMessageListenerContainer")])]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("A "),a("code",[e._v("ConsumerStoppedEvent")]),e._v(" is now emitted when a consumer stops.\nSee "),a("a",{attrs:{href:"#thread-safety"}},[e._v("Thread Safety")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("Batch listeners can optionally receive the complete "),a("code",[e._v("ConsumerRecords")]),e._v(" object instead of a "),a("code",[e._v("List")]),e._v(".\nSee "),a("a",{attrs:{href:"#batch-listeners"}},[e._v("Batch Listeners")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("DefaultAfterRollbackProcessor")]),e._v(" and "),a("code",[e._v("SeekToCurrentErrorHandler")]),e._v(" can now recover (skip) records that keep failing, and, by default, does so after 10 failures.\nThey can be configured to publish failed records to a dead-letter topic.")]),e._v(" "),a("p",[e._v("Starting with version 2.2.4, the consumer’s group ID can be used while selecting the dead letter topic name.")]),e._v(" "),a("p",[e._v("See "),a("a",{attrs:{href:"#after-rollback"}},[e._v("After-rollback Processor")]),e._v(", "),a("a",{attrs:{href:"#seek-to-current"}},[e._v("[seek-to-current]")]),e._v(", and "),a("a",{attrs:{href:"#dead-letters"}},[e._v("Publishing Dead-letter Records")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("ConsumerStoppingEvent")]),e._v(" has been added.\nSee "),a("a",{attrs:{href:"#events"}},[e._v("Application Events")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("SeekToCurrentErrorHandler")]),e._v(" can now be configured to commit the offset of a recovered record when the container is configured with "),a("code",[e._v("AckMode.MANUAL_IMMEDIATE")]),e._v(" (since 2.2.4).\nSee "),a("a",{attrs:{href:"#seek-to-current"}},[e._v("[seek-to-current]")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("==== @KafkaListener Changes")]),e._v(" "),a("p",[e._v("You can now override the "),a("code",[e._v("concurrency")]),e._v(" and "),a("code",[e._v("autoStartup")]),e._v(" properties of the listener container factory by setting properties on the annotation.\nYou can now add configuration to determine which headers (if any) are copied to a reply message.\nSee "),a("a",{attrs:{href:"#kafka-listener-annotation"}},[a("code",[e._v("@KafkaListener")]),e._v(" Annotation")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("You can now use "),a("code",[e._v("@KafkaListener")]),e._v(" as a meta-annotation on your own annotations.\nSee "),a("a",{attrs:{href:"#kafka-listener-meta"}},[a("code",[e._v("@KafkaListener")]),e._v(" as a Meta Annotation")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("It is now easier to configure a "),a("code",[e._v("Validator")]),e._v(" for "),a("code",[e._v("@Payload")]),e._v(" validation.\nSee "),a("a",{attrs:{href:"#kafka-validation"}},[a("code",[e._v("@KafkaListener")]),e._v(" "),a("code",[e._v("@Payload")]),e._v(" Validation")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("You can now specify kafka consumer properties directly on the annotation; these will override any properties with the same name defined in the consumer factory (since version 2.2.4).\nSee "),a("a",{attrs:{href:"#annotation-properties"}},[e._v("Annotation Properties")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("==== Header Mapping Changes")]),e._v(" "),a("p",[e._v("Headers of type "),a("code",[e._v("MimeType")]),e._v(" and "),a("code",[e._v("MediaType")]),e._v(" are now mapped as simple strings in the "),a("code",[e._v("RecordHeader")]),e._v(" value.\nPreviously, they were mapped as JSON and only "),a("code",[e._v("MimeType")]),e._v(" was decoded."),a("code",[e._v("MediaType")]),e._v(" could not be decoded.\nThey are now simple strings for interoperability.")]),e._v(" "),a("p",[e._v("Also, the "),a("code",[e._v("DefaultKafkaHeaderMapper")]),e._v(" has a new "),a("code",[e._v("addToStringClasses")]),e._v(" method, allowing the specification of types that should be mapped by using "),a("code",[e._v("toString()")]),e._v(" instead of JSON.\nSee "),a("a",{attrs:{href:"#headers"}},[e._v("Message Headers")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("==== Embedded Kafka Changes")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("KafkaEmbedded")]),e._v(" class and its "),a("code",[e._v("KafkaRule")]),e._v(" interface have been deprecated in favor of the "),a("code",[e._v("EmbeddedKafkaBroker")]),e._v(" and its JUnit 4 "),a("code",[e._v("EmbeddedKafkaRule")]),e._v(" wrapper.\nThe "),a("code",[e._v("@EmbeddedKafka")]),e._v(" annotation now populates an "),a("code",[e._v("EmbeddedKafkaBroker")]),e._v(" bean instead of the deprecated "),a("code",[e._v("KafkaEmbedded")]),e._v(".\nThis change allows the use of "),a("code",[e._v("@EmbeddedKafka")]),e._v(" in JUnit 5 tests.\nThe "),a("code",[e._v("@EmbeddedKafka")]),e._v(" annotation now has the attribute "),a("code",[e._v("ports")]),e._v(" to specify the port that populates the "),a("code",[e._v("EmbeddedKafkaBroker")]),e._v(".\nSee "),a("a",{attrs:{href:"#testing"}},[e._v("Testing Applications")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("==== JsonSerializer/Deserializer Enhancements")]),e._v(" "),a("p",[e._v("You can now provide type mapping information by using producer and consumer properties.")]),e._v(" "),a("p",[e._v("New constructors are available on the deserializer to allow overriding the type header information with the supplied target type.")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("JsonDeserializer")]),e._v(" now removes any type information headers by default.")]),e._v(" "),a("p",[e._v("You can now configure the "),a("code",[e._v("JsonDeserializer")]),e._v(" to ignore type information headers by using a Kafka property (since 2.2.3).")]),e._v(" "),a("p",[e._v("See "),a("a",{attrs:{href:"#serdes"}},[e._v("Serialization, Deserialization, and Message Conversion")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("==== Kafka Streams Changes")]),e._v(" "),a("p",[e._v("The streams configuration bean must now be a "),a("code",[e._v("KafkaStreamsConfiguration")]),e._v(" object instead of a "),a("code",[e._v("StreamsConfig")]),e._v(" object.")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("StreamsBuilderFactoryBean")]),e._v(" has been moved from package "),a("code",[e._v("…​core")]),e._v(" to "),a("code",[e._v("…​config")]),e._v(".")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("KafkaStreamBrancher")]),e._v(" has been introduced for better end-user experience when conditional branches are built on top of "),a("code",[e._v("KStream")]),e._v(" instance.")]),e._v(" "),a("p",[e._v("See "),a("a",{attrs:{href:"#streams-kafka-streams"}},[e._v("Apache Kafka Streams Support")]),e._v(" and "),a("a",{attrs:{href:"#streams-config"}},[e._v("Configuration")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("==== Transactional ID")]),e._v(" "),a("p",[e._v("When a transaction is started by the listener container, the "),a("code",[e._v("transactional.id")]),e._v(" is now the "),a("code",[e._v("transactionIdPrefix")]),e._v(" appended with "),a("code",[e._v("..")]),e._v(".\nThis change allows proper fencing of zombies, "),a("a",{attrs:{href:"https://www.confluent.io/blog/transactions-apache-kafka/",target:"_blank",rel:"noopener noreferrer"}},[e._v("as described here"),a("OutboundLink")],1),e._v(".")]),e._v(" "),a("p",[e._v("=== Changes Between 2.0 and 2.1")]),e._v(" "),a("p",[e._v("==== Kafka Client Version")]),e._v(" "),a("p",[e._v("This version requires the 1.0.0 "),a("code",[e._v("kafka-clients")]),e._v(" or higher.")]),e._v(" "),a("p",[e._v("The 1.1.x client is supported natively in version 2.2.")]),e._v(" "),a("p",[e._v("==== JSON Improvements")]),e._v(" "),a("p",[e._v("The "),a("code",[e._v("StringJsonMessageConverter")]),e._v(" and "),a("code",[e._v("JsonSerializer")]),e._v(" now add type information in "),a("code",[e._v("Headers")]),e._v(", letting the converter and "),a("code",[e._v("JsonDeserializer")]),e._v(" create specific types on reception, based on the message itself rather than a fixed configured type.\nSee "),a("a",{attrs:{href:"#serdes"}},[e._v("Serialization, Deserialization, and Message Conversion")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("==== Container Stopping Error Handlers")]),e._v(" "),a("p",[e._v("Container error handlers are now provided for both record and batch listeners that treat any exceptions thrown by the listener as fatal/\nThey stop the container.\nSee "),a("a",{attrs:{href:"#annotation-error-handling"}},[e._v("Handling Exceptions")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("==== Pausing and Resuming Containers")]),e._v(" "),a("p",[e._v("The listener containers now have "),a("code",[e._v("pause()")]),e._v(" and "),a("code",[e._v("resume()")]),e._v(" methods (since version 2.1.3).\nSee "),a("a",{attrs:{href:"#pause-resume"}},[e._v("Pausing and Resuming Listener Containers")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("==== Stateful Retry")]),e._v(" "),a("p",[e._v("Starting with version 2.1.3, you can configure stateful retry.\nSee "),a("a",{attrs:{href:"#stateful-retry"}},[e._v("[stateful-retry]")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("==== Client ID")]),e._v(" "),a("p",[e._v("Starting with version 2.1.1, you can now set the "),a("code",[e._v("client.id")]),e._v(" prefix on "),a("code",[e._v("@KafkaListener")]),e._v(".\nPreviously, to customize the client ID, you needed a separate consumer factory (and container factory) per listener.\nThe prefix is suffixed with "),a("code",[e._v("-n")]),e._v(" to provide unique client IDs when you use concurrency.")]),e._v(" "),a("p",[e._v("==== Logging Offset Commits")]),e._v(" "),a("p",[e._v("By default, logging of topic offset commits is performed with the "),a("code",[e._v("DEBUG")]),e._v(" logging level.\nStarting with version 2.1.2, a new property in "),a("code",[e._v("ContainerProperties")]),e._v(" called "),a("code",[e._v("commitLogLevel")]),e._v(" lets you specify the log level for these messages.\nSee "),a("a",{attrs:{href:"#kafka-container"}},[e._v("Using "),a("code",[e._v("KafkaMessageListenerContainer")])]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("==== Default @KafkaHandler")]),e._v(" "),a("p",[e._v("Starting with version 2.1.3, you can designate one of the "),a("code",[e._v("@KafkaHandler")]),e._v(" annotations on a class-level "),a("code",[e._v("@KafkaListener")]),e._v(" as the default.\nSee "),a("a",{attrs:{href:"#class-level-kafkalistener"}},[a("code",[e._v("@KafkaListener")]),e._v(" on a Class")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("==== ReplyingKafkaTemplate")]),e._v(" "),a("p",[e._v("Starting with version 2.1.3, a subclass of "),a("code",[e._v("KafkaTemplate")]),e._v(" is provided to support request/reply semantics.\nSee "),a("a",{attrs:{href:"#replying-template"}},[e._v("Using "),a("code",[e._v("ReplyingKafkaTemplate")])]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("==== ChainedKafkaTransactionManager")]),e._v(" "),a("p",[e._v("Version 2.1.3 introduced the "),a("code",[e._v("ChainedKafkaTransactionManager")]),e._v(".\n(It is now deprecated).")]),e._v(" "),a("p",[e._v("==== Migration Guide from 2.0")]),e._v(" "),a("p",[e._v("See the "),a("a",{attrs:{href:"https://github.com/spring-projects/spring-kafka/wiki/Spring-for-Apache-Kafka-2.0-to-2.1-Migration-Guide",target:"_blank",rel:"noopener noreferrer"}},[e._v("2.0 to 2.1 Migration"),a("OutboundLink")],1),e._v(" guide.")]),e._v(" "),a("p",[e._v("=== Changes Between 1.3 and 2.0")]),e._v(" "),a("p",[e._v("==== Spring Framework and Java Versions")]),e._v(" "),a("p",[e._v("The Spring for Apache Kafka project now requires Spring Framework 5.0 and Java 8.")]),e._v(" "),a("p",[e._v("==== "),a("code",[e._v("@KafkaListener")]),e._v(" Changes")]),e._v(" "),a("p",[e._v("You can now annotate "),a("code",[e._v("@KafkaListener")]),e._v(" methods (and classes and "),a("code",[e._v("@KafkaHandler")]),e._v(" methods) with "),a("code",[e._v("@SendTo")]),e._v(".\nIf the method returns a result, it is forwarded to the specified topic.\nSee "),a("a",{attrs:{href:"#annotation-send-to"}},[e._v("Forwarding Listener Results using "),a("code",[e._v("@SendTo")])]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("==== Message Listeners")]),e._v(" "),a("p",[e._v("Message listeners can now be aware of the "),a("code",[e._v("Consumer")]),e._v(" object.\nSee "),a("a",{attrs:{href:"#message-listeners"}},[e._v("Message Listeners")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("==== Using "),a("code",[e._v("ConsumerAwareRebalanceListener")])]),e._v(" "),a("p",[e._v("Rebalance listeners can now access the "),a("code",[e._v("Consumer")]),e._v(" object during rebalance notifications.\nSee "),a("a",{attrs:{href:"#rebalance-listeners"}},[e._v("Rebalancing Listeners")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("=== Changes Between 1.2 and 1.3")]),e._v(" "),a("p",[e._v("==== Support for Transactions")]),e._v(" "),a("p",[e._v("The 0.11.0.0 client library added support for transactions.\nThe "),a("code",[e._v("KafkaTransactionManager")]),e._v(" and other support for transactions have been added.\nSee "),a("a",{attrs:{href:"#transactions"}},[e._v("Transactions")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("==== Support for Headers")]),e._v(" "),a("p",[e._v("The 0.11.0.0 client library added support for message headers.\nThese can now be mapped to and from "),a("code",[e._v("spring-messaging")]),e._v(" "),a("code",[e._v("MessageHeaders")]),e._v(".\nSee "),a("a",{attrs:{href:"#headers"}},[e._v("Message Headers")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("==== Creating Topics")]),e._v(" "),a("p",[e._v("The 0.11.0.0 client library provides an "),a("code",[e._v("AdminClient")]),e._v(", which you can use to create topics.\nThe "),a("code",[e._v("KafkaAdmin")]),e._v(" uses this client to automatically add topics defined as "),a("code",[e._v("@Bean")]),e._v(" instances.")]),e._v(" "),a("p",[e._v("==== Support for Kafka Timestamps")]),e._v(" "),a("p",[a("code",[e._v("KafkaTemplate")]),e._v(" now supports an API to add records with timestamps.\nNew "),a("code",[e._v("KafkaHeaders")]),e._v(" have been introduced regarding "),a("code",[e._v("timestamp")]),e._v(" support.\nAlso, new "),a("code",[e._v("KafkaConditions.timestamp()")]),e._v(" and "),a("code",[e._v("KafkaMatchers.hasTimestamp()")]),e._v(" testing utilities have been added.\nSee "),a("a",{attrs:{href:"#kafka-template"}},[e._v("Using "),a("code",[e._v("KafkaTemplate")])]),e._v(", "),a("a",{attrs:{href:"#kafka-listener-annotation"}},[a("code",[e._v("@KafkaListener")]),e._v(" Annotation")]),e._v(", and "),a("a",{attrs:{href:"#testing"}},[e._v("Testing Applications")]),e._v(" for more details.")]),e._v(" "),a("p",[e._v("==== "),a("code",[e._v("@KafkaListener")]),e._v(" Changes")]),e._v(" "),a("p",[e._v("You can now configure a "),a("code",[e._v("KafkaListenerErrorHandler")]),e._v(" to handle exceptions.\nSee "),a("a",{attrs:{href:"#annotation-error-handling"}},[e._v("Handling Exceptions")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("By default, the "),a("code",[e._v("@KafkaListener")]),e._v(" "),a("code",[e._v("id")]),e._v(" property is now used as the "),a("code",[e._v("group.id")]),e._v(" property, overriding the property configured in the consumer factory (if present).\nFurther, you can explicitly configure the "),a("code",[e._v("groupId")]),e._v(" on the annotation.\nPreviously, you would have needed a separate container factory (and consumer factory) to use different "),a("code",[e._v("group.id")]),e._v(" values for listeners.\nTo restore the previous behavior of using the factory configured "),a("code",[e._v("group.id")]),e._v(", set the "),a("code",[e._v("idIsGroup")]),e._v(" property on the annotation to "),a("code",[e._v("false")]),e._v(".")]),e._v(" "),a("p",[e._v("==== "),a("code",[e._v("@EmbeddedKafka")]),e._v(" Annotation")]),e._v(" "),a("p",[e._v("For convenience, a test class-level "),a("code",[e._v("@EmbeddedKafka")]),e._v(" annotation is provided, to register "),a("code",[e._v("KafkaEmbedded")]),e._v(" as a bean.\nSee "),a("a",{attrs:{href:"#testing"}},[e._v("Testing Applications")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("==== Kerberos Configuration")]),e._v(" "),a("p",[e._v("Support for configuring Kerberos is now provided.\nSee "),a("a",{attrs:{href:"#kerberos"}},[e._v("JAAS and Kerberos")]),e._v(" for more information.")]),e._v(" "),a("p",[e._v("=== Changes Between 1.1 and 1.2")]),e._v(" "),a("p",[e._v("This version uses the 0.10.2.x client.")]),e._v(" "),a("p",[e._v("=== Changes Between 1.0 and 1.1")]),e._v(" "),a("p",[e._v("==== Kafka Client")]),e._v(" "),a("p",[e._v("This version uses the Apache Kafka 0.10.x.x client.")]),e._v(" "),a("p",[e._v("==== Batch Listeners")]),e._v(" "),a("p",[e._v("Listeners can be configured to receive the entire batch of messages returned by the "),a("code",[e._v("consumer.poll()")]),e._v(" operation, rather than one at a time.")]),e._v(" "),a("p",[e._v("==== Null Payloads")]),e._v(" "),a("p",[e._v("Null payloads are used to “delete” keys when you use log compaction.")]),e._v(" "),a("p",[e._v("==== Initial Offset")]),e._v(" "),a("p",[e._v("When explicitly assigning partitions, you can now configure the initial offset relative to the current position for the consumer group, rather than absolute or relative to the current end.")]),e._v(" "),a("p",[e._v("==== Seek")]),e._v(" "),a("p",[e._v("You can now seek the position of each topic or partition.\nYou can use this to set the initial position during initialization when group management is in use and Kafka assigns the partitions.\nYou can also seek when an idle container is detected or at any arbitrary point in your application’s execution.\nSee "),a("a",{attrs:{href:"#seek"}},[e._v("Seeking to a Specific Offset")]),e._v(" for more information.")])])}),[],!1,null,null,null);t.default=r.exports}}]);