diff --git a/docs/.vuepress/config.js b/docs/.vuepress/config.js index ae5cce3b95712519720289b6ebb32fce221bdab3..9cc41e1bd87c5265d07124f840073622a67ece8c 100644 --- a/docs/.vuepress/config.js +++ b/docs/.vuepress/config.js @@ -202,7 +202,7 @@ module.exports = { "build-tool-plugins.md", "howto.md" ], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + initialOpenGroupIndex: 0 } ], '/en/spring-framework/': [ @@ -220,7 +220,7 @@ module.exports = { "integration.md", "languages.md" ], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + initialOpenGroupIndex: 0 } ], '/en/spring-data/': [ @@ -231,7 +231,7 @@ module.exports = { children: [ "spring-data.md" ], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + initialOpenGroupIndex: 0 } ], '/en/spring-cloud/': [ @@ -261,7 +261,7 @@ module.exports = { "spring-cloud-vault.md", "spring-cloud-zookeeper.md" ], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + initialOpenGroupIndex: 0 } ], '/en/spring-cloud-data-flow/': [ @@ -272,7 +272,7 @@ module.exports = { children: [ "spring-cloud-dataflow.md", ], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + initialOpenGroupIndex: 0 } ], '/en/spring-security/': [ @@ -435,7 +435,7 @@ module.exports = { "reactive-test-web-oauth2.md", "reactive-configuration-webflux.md" ], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + initialOpenGroupIndex: 0 } ], "/en/spring-for-graphql/": [ @@ -444,7 +444,7 @@ module.exports = { sidebarDepth: 2, collapsable: false, children: ["spring-graphql.md"], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + initialOpenGroupIndex: 0 } ], "/en/spring-session/": [ @@ -470,12 +470,12 @@ module.exports = { "api.md", "upgrading.md" ], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + initialOpenGroupIndex: 0 } ], "/en/spring-integration/": [ { - title: "Spring Integration 文档", + title: "Spring Integration", sidebarDepth: 2, collapsable: false, children: [ @@ -535,163 +535,163 @@ module.exports = { "resources.md", "history.md" ], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + initialOpenGroupIndex: 0 + } + ], + "/en/spring-hateoas/": [ + { + title: "Spring HATEOAS", + sidebarDepth: 2, + collapsable: false, + children: ["spring-hateoas.md"], + initialOpenGroupIndex: 0 + } + ], + "/en/spring-rest-docs/": [ + { + title: "Spring REST Docs", + sidebarDepth: 2, + collapsable: false, + children: ["spring-restdocs.md"], + initialOpenGroupIndex: 0 + } + ], + "/en/spring-batch/": [ + { + title: "Spring Batch 文档", + sidebarDepth: 2, + collapsable: false, + children: [ + "spring-batch-intro.md", + "whatsnew.md", + "domain.md", + "job.md", + "step.md", + "readersAndWriters.md", + "processor.md", + "scalability.md", + "repeat.md", + "retry.md", + "testing.md", + "common-patterns.md", + "jsr-352.md", + "spring-batch-integration.md", + "monitoring-and-metrics.md", + "appendix.md", + "schema-appendix.md", + "transaction-appendix.md", + "glossary.md" + ], + initialOpenGroupIndex: 0 + } + ], + "/en/spring-amqp/": [ + { + title: "Spring AMQP", + sidebarDepth: 2, + collapsable: false, + children: ["spring-amqp.md"], + initialOpenGroupIndex: 0 + } + ], + "/en/spring-credhub/": [ + { + title: "Spring CredHub", + sidebarDepth: 2, + collapsable: false, + children: ["spring-credhub.md"], + initialOpenGroupIndex: 0 + } + ], + "/en/spring-flo/": [ + { + title: "Spring Flo", + sidebarDepth: 2, + collapsable: false, + children: ["spring-flo.md"], + initialOpenGroupIndex: 0 + } + ], + "/en/spring-for-apache-kafka/": [ + { + title: "Spring for Apache Kafka", + sidebarDepth: 2, + collapsable: false, + children: ["spring-kafka.md"], + initialOpenGroupIndex: 0 + } + ], + "/en/spring-ldap/": [ + { + title: "Spring for Apache Kafka", + sidebarDepth: 2, + collapsable: false, + children: ["spring-ldap.md"], + initialOpenGroupIndex: 0 + } + ], + "/en/spring-shell/": [ + { + title: "Spring Shell", + sidebarDepth: 2, + collapsable: false, + children: ["spring-shell.md"], + initialOpenGroupIndex: 0 + } + ], + "/en/spring-statemachine/": [ + { + title: "Spring Statemachine", + sidebarDepth: 2, + collapsable: false, + children: ["spring-statemachine.md"], + initialOpenGroupIndex: 0 + } + ], + "/en/spring-vault/": [ + { + title: "Spring Vault", + sidebarDepth: 2, + collapsable: false, + children: ["spring-vault.md"], + initialOpenGroupIndex: 0 + } + ], + "/en/spring-web-flow/": [ + { + title: "Spring Web Flow", + sidebarDepth: 2, + collapsable: false, + children: [ + "preface.md", + "introduction.md", + "whatsnew.md", + "defining-flows.md", + "el.md", + "views.md", + "actions.md", + "flow-managed-persistence.md", + "flow-security.md", + "flow-inheritance.md", + "system-setup.md", + "spring-mvc.md", + "spring-js.md", + "spring-faces.md", + "testing.md", + "field-mappings.md" + ], + initialOpenGroupIndex: 0 + } + ], + "/en/spring-web-services/": [ + { + title: "Spring Web Services", + sidebarDepth: 2, + collapsable: false, + children: ["spring-web-service.md"], + initialOpenGroupIndex: 0 } ], - // "/spring-hateoas/": [ - // { - // title: "Spring HATEOAS 文档", - // sidebarDepth: 2, - // collapsable: false, - // children: ["/spring-hateoas/spring-hateoas.md"], - // initialOpenGroupIndex: 0 // 可选的, 默认值是 0 - // } - // ], - // "/spring-rest-docs/": [ - // { - // title: "Spring HATEOAS 文档", - // sidebarDepth: 2, - // collapsable: false, - // children: ["/spring-rest-docs/spring-restdocs.md"], - // initialOpenGroupIndex: 0 // 可选的, 默认值是 0 - // } - // ], - // "/spring-batch/": [ - // { - // title: "Spring Batch 文档", - // sidebarDepth: 2, - // collapsable: false, - // children: [ - // "/spring-batch/spring-batch-intro.md", - // "/spring-batch/whatsnew.md", - // "/spring-batch/domain.md", - // "/spring-batch/job.md", - // "/spring-batch/step.md", - // "/spring-batch/readersAndWriters.md", - // "/spring-batch/processor.md", - // "/spring-batch/scalability.md", - // "/spring-batch/repeat.md", - // "/spring-batch/retry.md", - // "/spring-batch/testing.md", - // "/spring-batch/common-patterns.md", - // "/spring-batch/jsr-352.md", - // "/spring-batch/spring-batch-integration.md", - // "/spring-batch/monitoring-and-metrics.md", - // "/spring-batch/appendix.md", - // "/spring-batch/schema-appendix.md", - // "/spring-batch/transaction-appendix.md", - // "/spring-batch/glossary.md" - // ], - // initialOpenGroupIndex: 0 // 可选的, 默认值是 0 - // } - // ], - // "/spring-amqp/": [ - // { - // title: "Spring AMQP 文档", - // sidebarDepth: 2, - // collapsable: false, - // children: ["/spring-amqp/spring-amqp.md"], - // initialOpenGroupIndex: 0 // 可选的, 默认值是 0 - // } - // ], - // "/spring-credhub/": [ - // { - // title: "Spring CredHub 文档", - // sidebarDepth: 2, - // collapsable: false, - // children: ["/spring-credhub/spring-credhub.md"], - // initialOpenGroupIndex: 0 // 可选的, 默认值是 0 - // } - // ], - // "/spring-flo/": [ - // { - // title: "Spring Flo 文档", - // sidebarDepth: 2, - // collapsable: false, - // children: ["/spring-flo/spring-flo.md"], - // initialOpenGroupIndex: 0 // 可选的, 默认值是 0 - // } - // ], - // "/spring-for-apache-kafka/": [ - // { - // title: "Spring for Apache Kafka 文档", - // sidebarDepth: 2, - // collapsable: false, - // children: ["/spring-for-apache-kafka/spring-kafka.md"], - // initialOpenGroupIndex: 0 // 可选的, 默认值是 0 - // } - // ], - // "/spring-ldap/": [ - // { - // title: "Spring for Apache Kafka 文档", - // sidebarDepth: 2, - // collapsable: false, - // children: ["/spring-ldap/spring-ldap.md"], - // initialOpenGroupIndex: 0 // 可选的, 默认值是 0 - // } - // ], - // "/spring-shell/": [ - // { - // title: "Spring Shell 文档", - // sidebarDepth: 2, - // collapsable: false, - // children: ["/spring-shell/spring-shell.md"], - // initialOpenGroupIndex: 0 // 可选的, 默认值是 0 - // } - // ], - // "/spring-statemachine/": [ - // { - // title: "Spring Statemachine 文档", - // sidebarDepth: 2, - // collapsable: false, - // children: ["/spring-statemachine/spring-statemachine.md"], - // initialOpenGroupIndex: 0 // 可选的, 默认值是 0 - // } - // ], - // "/spring-vault/": [ - // { - // title: "Spring Vault 文档", - // sidebarDepth: 2, - // collapsable: false, - // children: ["/spring-vault/spring-vault.md"], - // initialOpenGroupIndex: 0 // 可选的, 默认值是 0 - // } - // ], - // "/spring-web-flow/": [ - // { - // title: "Spring Web Flow 文档", - // sidebarDepth: 2, - // collapsable: false, - // children: [ - // "/spring-web-flow/preface.md", - // "/spring-web-flow/introduction.md", - // "/spring-web-flow/whatsnew.md", - // "/spring-web-flow/defining-flows.md", - // "/spring-web-flow/el.md", - // "/spring-web-flow/views.md", - // "/spring-web-flow/actions.md", - // "/spring-web-flow/flow-managed-persistence.md", - // "/spring-web-flow/flow-security.md", - // "/spring-web-flow/flow-inheritance.md", - // "/spring-web-flow/system-setup.md", - // "/spring-web-flow/spring-mvc.md", - // "/spring-web-flow/spring-js.md", - // "/spring-web-flow/spring-faces.md", - // "/spring-web-flow/testing.md", - // "/spring-web-flow/field-mappings.md" - // ], - // initialOpenGroupIndex: 0 // 可选的, 默认值是 0 - // } - // ], - // "/spring-web-services/": [ - // { - // title: "Spring Web Services 文档", - // sidebarDepth: 2, - // collapsable: false, - // children: ["/spring-web-services/spring-web-service.md"], - // initialOpenGroupIndex: 0 // 可选的, 默认值是 0 - // } - // ], // fallback '/en/spring/': @@ -705,7 +705,7 @@ module.exports = { "introducing-spring-boot.md", "quickstart.md" ], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + initialOpenGroupIndex: 0 }, { title: 'GUIDE', @@ -716,7 +716,7 @@ module.exports = { "installing.md", "initializr.md" ], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + initialOpenGroupIndex: 0 }, { title: 'IDE', @@ -726,7 +726,7 @@ module.exports = { "vscode_java.md", "intellij_idea.md" ], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + initialOpenGroupIndex: 0 }, { title: 'DEMO', @@ -737,7 +737,7 @@ module.exports = { "rest-service.md", "consuming-rest.md" ], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + initialOpenGroupIndex: 0 } ] } @@ -820,7 +820,7 @@ module.exports = { "/spring-boot/build-tool-plugins.md", "/spring-boot/howto.md" ], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + initialOpenGroupIndex: 0 } ], "/spring-framework/": [ @@ -838,7 +838,7 @@ module.exports = { "/spring-framework/integration.md", "/spring-framework/languages.md" ], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + initialOpenGroupIndex: 0 } ], "/spring-data/": [ @@ -849,7 +849,7 @@ module.exports = { children: [ "/spring-data/spring-data.md" ], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + initialOpenGroupIndex: 0 } ], "/spring-cloud/": [ @@ -879,7 +879,7 @@ module.exports = { "/spring-cloud/spring-cloud-vault.md", "/spring-cloud/spring-cloud-zookeeper.md" ], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + initialOpenGroupIndex: 0 } ], "/spring-cloud-data-flow/": [ @@ -888,7 +888,7 @@ module.exports = { sidebarDepth: 2, collapsable: false, children: ["/spring-cloud-data-flow/spring-cloud-dataflow.md"], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + initialOpenGroupIndex: 0 } ], "/spring-security/": [ @@ -1051,7 +1051,7 @@ module.exports = { "/spring-security/reactive-test-web-oauth2.md", "/spring-security/reactive-configuration-webflux.md" ], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + initialOpenGroupIndex: 0 } ], @@ -1061,7 +1061,7 @@ module.exports = { sidebarDepth: 2, collapsable: false, children: ["/spring-for-graphql/spring-graphql.md"], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + initialOpenGroupIndex: 0 } ], "/spring-session/": [ @@ -1087,7 +1087,7 @@ module.exports = { "/spring-session/api.md", "/spring-session/upgrading.md" ], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + initialOpenGroupIndex: 0 } ], "/spring-integration/": [ @@ -1152,7 +1152,7 @@ module.exports = { "/spring-integration/resources.md", "/spring-integration/history.md" ], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + initialOpenGroupIndex: 0 } ], "/spring-hateoas/": [ @@ -1161,7 +1161,7 @@ module.exports = { sidebarDepth: 2, collapsable: false, children: ["/spring-hateoas/spring-hateoas.md"], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + initialOpenGroupIndex: 0 } ], "/spring-rest-docs/": [ @@ -1170,7 +1170,7 @@ module.exports = { sidebarDepth: 2, collapsable: false, children: ["/spring-rest-docs/spring-restdocs.md"], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + initialOpenGroupIndex: 0 } ], "/spring-batch/": [ @@ -1199,7 +1199,7 @@ module.exports = { "/spring-batch/transaction-appendix.md", "/spring-batch/glossary.md" ], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + initialOpenGroupIndex: 0 } ], "/spring-amqp/": [ @@ -1208,7 +1208,7 @@ module.exports = { sidebarDepth: 2, collapsable: false, children: ["/spring-amqp/spring-amqp.md"], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + initialOpenGroupIndex: 0 } ], "/spring-credhub/": [ @@ -1217,7 +1217,7 @@ module.exports = { sidebarDepth: 2, collapsable: false, children: ["/spring-credhub/spring-credhub.md"], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + initialOpenGroupIndex: 0 } ], "/spring-flo/": [ @@ -1226,7 +1226,7 @@ module.exports = { sidebarDepth: 2, collapsable: false, children: ["/spring-flo/spring-flo.md"], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + initialOpenGroupIndex: 0 } ], "/spring-for-apache-kafka/": [ @@ -1235,7 +1235,7 @@ module.exports = { sidebarDepth: 2, collapsable: false, children: ["/spring-for-apache-kafka/spring-kafka.md"], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + initialOpenGroupIndex: 0 } ], "/spring-ldap/": [ @@ -1244,7 +1244,7 @@ module.exports = { sidebarDepth: 2, collapsable: false, children: ["/spring-ldap/spring-ldap.md"], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + initialOpenGroupIndex: 0 } ], "/spring-shell/": [ @@ -1253,7 +1253,7 @@ module.exports = { sidebarDepth: 2, collapsable: false, children: ["/spring-shell/spring-shell.md"], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + initialOpenGroupIndex: 0 } ], "/spring-statemachine/": [ @@ -1262,7 +1262,7 @@ module.exports = { sidebarDepth: 2, collapsable: false, children: ["/spring-statemachine/spring-statemachine.md"], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + initialOpenGroupIndex: 0 } ], "/spring-vault/": [ @@ -1271,7 +1271,7 @@ module.exports = { sidebarDepth: 2, collapsable: false, children: ["/spring-vault/spring-vault.md"], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + initialOpenGroupIndex: 0 } ], "/spring-web-flow/": [ @@ -1297,7 +1297,7 @@ module.exports = { "/spring-web-flow/testing.md", "/spring-web-flow/field-mappings.md" ], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + initialOpenGroupIndex: 0 } ], "/spring-web-services/": [ @@ -1306,7 +1306,7 @@ module.exports = { sidebarDepth: 2, collapsable: false, children: ["/spring-web-services/spring-web-service.md"], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + initialOpenGroupIndex: 0 } ], // fallback @@ -1327,7 +1327,7 @@ module.exports = { "introducing-spring-boot.md", "quickstart.md" ], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + initialOpenGroupIndex: 0 }, { title: "教程", @@ -1338,7 +1338,7 @@ module.exports = { "installing.md", "initializr.md" ], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + initialOpenGroupIndex: 0 }, { title: "编辑器", @@ -1348,7 +1348,7 @@ module.exports = { "vscode_java.md", "intellij_idea.md" ], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + initialOpenGroupIndex: 0 }, { title: "代码案例", @@ -1359,7 +1359,7 @@ module.exports = { "rest-service.md", "consuming-rest.md" ], - initialOpenGroupIndex: 0 // 可选的, 默认值是 0 + initialOpenGroupIndex: 0 } ] } diff --git a/docs/en/spring-cloud-data-flow/READEME.md b/docs/en/spring-amqp/README.md similarity index 100% rename from docs/en/spring-cloud-data-flow/READEME.md rename to docs/en/spring-amqp/README.md diff --git a/docs/en/spring-amqp/spring-amqp.md b/docs/en/spring-amqp/spring-amqp.md new file mode 100644 index 0000000000000000000000000000000000000000..631c572daba81673cd6ca452814bced763f4c341 --- /dev/null +++ b/docs/en/spring-amqp/spring-amqp.md @@ -0,0 +1,8287 @@ +# Spring AMQP + +## 1. Preface + +The Spring AMQP project applies core Spring concepts to the development of AMQP-based messaging solutions. +We provide a “template” as a high-level abstraction for sending and receiving messages. +We also provide support for message-driven POJOs. +These libraries facilitate management of AMQP resources while promoting the use of dependency injection and declarative configuration. +In all of these cases, you can see similarities to the JMS support in the Spring Framework. +For other project-related information, visit the Spring AMQP project [homepage](https://projects.spring.io/spring-amqp/). + +## 2. What’s New + +### 2.1. Changes in 2.4 Since 2.3 + +This section describes the changes between version 2.4 and version 2.4. +See [Change History](#change-history) for changes in previous versions. + +#### 2.1.1. `@RabbitListener` Changes + +`MessageProperties` is now available for argument matching. +See [Annotated Endpoint Method Signature](#async-annotation-driven-enable-signature) for more information. + +#### 2.1.2. `RabbitAdmin` Changes + +A new property `recoverManualDeclarations` allows recovery of manually declared queues/exchanges/bindings. +See [Recovering Auto-Delete Declarations](#declarable-recovery) for more information. + +#### 2.1.3. Remoting Support + +Support remoting using Spring Framework’s RMI support is deprecated and will be removed in 3.0. +See [Spring Remoting with AMQP](#remoting) for more information. + +## 3. Introduction + +This first part of the reference documentation is a high-level overview of Spring AMQP and the underlying concepts. +It includes some code snippets to get you up and running as quickly as possible. + +### 3.1. Quick Tour for the impatient + +#### 3.1.1. Introduction + +This is the five-minute tour to get started with Spring AMQP. + +Prerequisites: Install and run the RabbitMQ broker ([https://www.rabbitmq.com/download.html](https://www.rabbitmq.com/download.html)). +Then grab the spring-rabbit JAR and all its dependencies - the easiest way to do so is to declare a dependency in your build tool. +For example, for Maven, you can do something resembling the following: + +``` + + org.springframework.amqp + spring-rabbit + 2.4.2 + +``` + +For Gradle, you can do something resembling the following: + +``` +compile 'org.springframework.amqp:spring-rabbit:2.4.2' +``` + +##### Compatibility + +The minimum Spring Framework version dependency is 5.2.0. + +The minimum `amqp-client` Java client library version is 5.7.0. + +##### Very, Very Quick + +This section offers the fastest introduction. + +First, add the following `import` statements to make the examples later in this section work: + +``` +import org.springframework.amqp.core.AmqpAdmin; +import org.springframework.amqp.core.AmqpTemplate; +import org.springframework.amqp.core.Queue; +import org.springframework.amqp.rabbit.connection.CachingConnectionFactory; +import org.springframework.amqp.rabbit.connection.ConnectionFactory; +import org.springframework.amqp.rabbit.core.RabbitAdmin; +import org.springframework.amqp.rabbit.core.RabbitTemplate; +``` + +The following example uses plain, imperative Java to send and receive a message: + +``` +ConnectionFactory connectionFactory = new CachingConnectionFactory(); +AmqpAdmin admin = new RabbitAdmin(connectionFactory); +admin.declareQueue(new Queue("myqueue")); +AmqpTemplate template = new RabbitTemplate(connectionFactory); +template.convertAndSend("myqueue", "foo"); +String foo = (String) template.receiveAndConvert("myqueue"); +``` + +Note that there is also a `ConnectionFactory` in the native Java Rabbit client. +We use the Spring abstraction in the preceding code. +It caches channels (and optionally connections) for reuse. +We rely on the default exchange in the broker (since none is specified in the send), and the default binding of all queues to the default exchange by their name (thus, we can use the queue name as a routing key in the send). +Those behaviors are defined in the AMQP specification. + +##### With XML Configuration + +The following example is the same as the preceding example but externalizes the resource configuration to XML: + +``` +ApplicationContext context = + new GenericXmlApplicationContext("classpath:/rabbit-context.xml"); +AmqpTemplate template = context.getBean(AmqpTemplate.class); +template.convertAndSend("myqueue", "foo"); +String foo = (String) template.receiveAndConvert("myqueue"); +``` + +``` + + + + + + + + + + + +``` + +By default, the `` declaration automatically looks for beans of type `Queue`, `Exchange`, and `Binding` and declares them to the broker on behalf of the user. +As a result, you need not use that bean explicitly in the simple Java driver. +There are plenty of options to configure the properties of the components in the XML schema. +You can use auto-complete features of your XML editor to explore them and look at their documentation. + +##### With Java Configuration + +The following example repeats the same example as the preceding example but with the external configuration defined in Java: + +``` +ApplicationContext context = + new AnnotationConfigApplicationContext(RabbitConfiguration.class); +AmqpTemplate template = context.getBean(AmqpTemplate.class); +template.convertAndSend("myqueue", "foo"); +String foo = (String) template.receiveAndConvert("myqueue"); + +........ + +@Configuration +public class RabbitConfiguration { + + @Bean + public CachingConnectionFactory connectionFactory() { + return new CachingConnectionFactory("localhost"); + } + + @Bean + public RabbitAdmin amqpAdmin() { + return new RabbitAdmin(connectionFactory()); + } + + @Bean + public RabbitTemplate rabbitTemplate() { + return new RabbitTemplate(connectionFactory()); + } + + @Bean + public Queue myQueue() { + return new Queue("myqueue"); + } +} +``` + +##### With Spring Boot Auto Configuration and an Async POJO Listener + +Spring Boot automatically configures the infrastructure beans, as the following example shows: + +``` +@SpringBootApplication +public class Application { + + public static void main(String[] args) { + SpringApplication.run(Application.class, args); + } + + @Bean + public ApplicationRunner runner(AmqpTemplate template) { + return args -> template.convertAndSend("myqueue", "foo"); + } + + @Bean + public Queue myQueue() { + return new Queue("myqueue"); + } + + @RabbitListener(queues = "myqueue") + public void listen(String in) { + System.out.println(in); + } + +} +``` + +## 4. Reference + +This part of the reference documentation details the various components that comprise Spring AMQP. +The [main chapter](#amqp) covers the core classes to develop an AMQP application. +This part also includes a chapter about the [sample applications](#sample-apps). + +### 4.1. Using Spring AMQP + +This chapter explores the interfaces and classes that are the essential components for developing applications with Spring AMQP. + +#### 4.1.1. AMQP Abstractions + +Spring AMQP consists of two modules (each represented by a JAR in the distribution): `spring-amqp` and `spring-rabbit`. +The 'spring-amqp' module contains the `org.springframework.amqp.core` package. +Within that package, you can find the classes that represent the core AMQP “model”. +Our intention is to provide generic abstractions that do not rely on any particular AMQP broker implementation or client library. +End user code can be more portable across vendor implementations as it can be developed against the abstraction layer only. +These abstractions are then implemented by broker-specific modules, such as 'spring-rabbit'. +There is currently only a RabbitMQ implementation. +However, the abstractions have been validated in .NET using Apache Qpid in addition to RabbitMQ. +Since AMQP operates at the protocol level, in principle, you can use the RabbitMQ client with any broker that supports the same protocol version, but we do not test any other brokers at present. + +This overview assumes that you are already familiar with the basics of the AMQP specification. +If not, have a look at the resources listed in [Other Resources](#resources) + +##### `Message` + +The 0-9-1 AMQP specification does not define a `Message` class or interface. +Instead, when performing an operation such as `basicPublish()`, the content is passed as a byte-array argument and additional properties are passed in as separate arguments. +Spring AMQP defines a `Message` class as part of a more general AMQP domain model representation. +The purpose of the `Message` class is to encapsulate the body and properties within a single instance so that the API can, in turn, be simpler. +The following example shows the `Message` class definition: + +``` +public class Message { + + private final MessageProperties messageProperties; + + private final byte[] body; + + public Message(byte[] body, MessageProperties messageProperties) { + this.body = body; + this.messageProperties = messageProperties; + } + + public byte[] getBody() { + return this.body; + } + + public MessageProperties getMessageProperties() { + return this.messageProperties; + } +} +``` + +The `MessageProperties` interface defines several common properties, such as 'messageId', 'timestamp', 'contentType', and several more. +You can also extend those properties with user-defined 'headers' by calling the `setHeader(String key, Object value)` method. + +| |Starting with versions `1.5.7`, `1.6.11`, `1.7.4`, and `2.0.0`, if a message body is a serialized `Serializable` java object, it is no longer deserialized (by default) when performing `toString()` operations (such as in log messages).
This is to prevent unsafe deserialization.
By default, only `java.util` and `java.lang` classes are deserialized.
To revert to the previous behavior, you can add allowable class/package patterns by invoking `Message.addAllowedListPatterns(…​)`.
A simple `` **wildcard is supported, for example `com.something.`**`, *.MyClass`.
Bodies that cannot be deserialized are represented by `byte[]` in log messages.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Exchange + +The `Exchange` interface represents an AMQP Exchange, which is what a Message Producer sends to. +Each Exchange within a virtual host of a broker has a unique name as well as a few other properties. +The following example shows the `Exchange` interface: + +``` +public interface Exchange { + + String getName(); + + String getExchangeType(); + + boolean isDurable(); + + boolean isAutoDelete(); + + Map getArguments(); + +} +``` + +As you can see, an `Exchange` also has a 'type' represented by constants defined in `ExchangeTypes`. +The basic types are: `direct`, `topic`, `fanout`, and `headers`. +In the core package, you can find implementations of the `Exchange` interface for each of those types. +The behavior varies across these `Exchange` types in terms of how they handle bindings to queues. +For example, a `Direct` exchange lets a queue be bound by a fixed routing key (often the queue’s name). +A `Topic` exchange supports bindings with routing patterns that may include the '\*' and '#' wildcards for 'exactly-one' and 'zero-or-more', respectively. +The `Fanout` exchange publishes to all queues that are bound to it without taking any routing key into consideration. +For much more information about these and the other Exchange types, see [Other Resources](#resources). + +| |The AMQP specification also requires that any broker provide a “default” direct exchange that has no name.
All queues that are declared are bound to that default `Exchange` with their names as routing keys.
You can learn more about the default Exchange’s usage within Spring AMQP in [`AmqpTemplate`](#amqp-template).| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Queue + +The `Queue` class represents the component from which a message consumer receives messages. +Like the various `Exchange` classes, our implementation is intended to be an abstract representation of this core AMQP type. +The following listing shows the `Queue` class: + +``` +public class Queue { + + private final String name; + + private volatile boolean durable; + + private volatile boolean exclusive; + + private volatile boolean autoDelete; + + private volatile Map arguments; + + /** + * The queue is durable, non-exclusive and non auto-delete. + * + * @param name the name of the queue. + */ + public Queue(String name) { + this(name, true, false, false); + } + + // Getters and Setters omitted for brevity + +} +``` + +Notice that the constructor takes the queue name. +Depending on the implementation, the admin template may provide methods for generating a uniquely named queue. +Such queues can be useful as a “reply-to” address or in other **temporary** situations. +For that reason, the 'exclusive' and 'autoDelete' properties of an auto-generated queue would both be set to 'true'. + +| |See the section on queues in [Configuring the Broker](#broker-configuration) for information about declaring queues by using namespace support, including queue arguments.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Binding + +Given that a producer sends to an exchange and a consumer receives from a queue, the bindings that connect queues to exchanges are critical for connecting those producers and consumers via messaging. +In Spring AMQP, we define a `Binding` class to represent those connections. +This section reviews the basic options for binding queues to exchanges. + +You can bind a queue to a `DirectExchange` with a fixed routing key, as the following example shows: + +``` +new Binding(someQueue, someDirectExchange, "foo.bar"); +``` + +You can bind a queue to a `TopicExchange` with a routing pattern, as the following example shows: + +``` +new Binding(someQueue, someTopicExchange, "foo.*"); +``` + +You can bind a queue to a `FanoutExchange` with no routing key, as the following example shows: + +``` +new Binding(someQueue, someFanoutExchange); +``` + +We also provide a `BindingBuilder` to facilitate a “fluent API” style, as the following example shows: + +``` +Binding b = BindingBuilder.bind(someQueue).to(someTopicExchange).with("foo.*"); +``` + +| |For clarity, the preceding example shows the `BindingBuilder` class, but this style works well when using a static import for the 'bind()' method.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------| + +By itself, an instance of the `Binding` class only holds the data about a connection. +In other words, it is not an “active” component. +However, as you will see later in [Configuring the Broker](#broker-configuration), the `AmqpAdmin` class can use `Binding` instances to actually trigger the binding actions on the broker. +Also, as you can see in that same section, you can define the `Binding` instances by using Spring’s `@Bean` annotations within `@Configuration` classes. +There is also a convenient base class that further simplifies that approach for generating AMQP-related bean definitions and recognizes the queues, exchanges, and bindings so that they are all declared on the AMQP broker upon application startup. + +The `AmqpTemplate` is also defined within the core package. +As one of the main components involved in actual AMQP messaging, it is discussed in detail in its own section (see [`AmqpTemplate`](#amqp-template)). + +#### 4.1.2. Connection and Resource Management + +Whereas the AMQP model we described in the previous section is generic and applicable to all implementations, when we get into the management of resources, the details are specific to the broker implementation. +Therefore, in this section, we focus on code that exists only within our “spring-rabbit” module since, at this point, RabbitMQ is the only supported implementation. + +The central component for managing a connection to the RabbitMQ broker is the `ConnectionFactory` interface. +The responsibility of a `ConnectionFactory` implementation is to provide an instance of `org.springframework.amqp.rabbit.connection.Connection`, which is a wrapper for `com.rabbitmq.client.Connection`. + +##### Choosing a Connection Factory + +There are three connection factories to chose from + +* `PooledChannelConnectionFactory` + +* `ThreadChannelConnectionFactory` + +* `CachingConnectionFactory` + +The first two were added in version 2.3. + +For most use cases, the `PooledChannelConnectionFactory` should be used. +The `ThreadChannelConnectionFactory` can be used if you want to ensure strict message ordering without the need to use [Scoped Operations](#scoped-operations). +The `CachingConnectionFactory` should be used if you want to use correlated publisher confirmations or if you wish to open multiple connections, via its `CacheMode`. + +Simple publisher confirmations are supported by all three factories. + +When configuring a `RabbitTemplate` to use a [separate connection](#separate-connection), you can now, starting with version 2.3.2, configure the publishing connection factory to be a different type. +By default, the publishing factory is the same type and any properties set on the main factory are also propagated to the publishing factory. + +###### `PooledChannelConnectionFactory` + +This factory manages a single connection and two pools of channels, based on the Apache Pool2. +One pool is for transactional channels, the other is for non-transactional channels. +The pools are `GenericObjectPool` s with default configuration; a callback is provided to configure the pools; refer to the Apache documentation for more information. + +The Apache `commons-pool2` jar must be on the class path to use this factory. + +``` +@Bean +PooledChannelConnectionFactory pcf() throws Exception { + ConnectionFactory rabbitConnectionFactory = new ConnectionFactory(); + rabbitConnectionFactory.setHost("localhost"); + PooledChannelConnectionFactory pcf = new PooledChannelConnectionFactory(rabbitConnectionFactory); + pcf.setPoolConfigurer((pool, tx) -> { + if (tx) { + // configure the transactional pool + } + else { + // configure the non-transactional pool + } + }); + return pcf; +} +``` + +###### `ThreadChannelConnectionFactory` + +This factory manages a single connection and two `ThreadLocal` s, one for transactional channels, the other for non-transactional channels. +This factory ensures that all operations on the same thread use the same channel (as long as it remains open). +This facilitates strict message ordering without the need for [Scoped Operations](#scoped-operations). +To avoid memory leaks, if your application uses many short-lived threads, you must call the factory’s `closeThreadChannel()` to release the channel resource. +Starting with version 2.3.7, a thread can transfer its channel(s) to another thread. +See [Strict Message Ordering in a Multi-Threaded Environment](#multi-strict) for more information. + +###### `CachingConnectionFactory` + +The third implementation provided is the `CachingConnectionFactory`, which, by default, establishes a single connection proxy that can be shared by the application. +Sharing of the connection is possible since the “unit of work” for messaging with AMQP is actually a “channel” (in some ways, this is similar to the relationship between a connection and a session in JMS). +The connection instance provides a `createChannel` method. +The `CachingConnectionFactory` implementation supports caching of those channels, and it maintains separate caches for channels based on whether they are transactional. +When creating an instance of `CachingConnectionFactory`, you can provide the 'hostname' through the constructor. +You should also provide the 'username' and 'password' properties. +To configure the size of the channel cache (the default is 25), you can call the`setChannelCacheSize()` method. + +Starting with version 1.3, you can configure the `CachingConnectionFactory` to cache connections as well as only channels. +In this case, each call to `createConnection()` creates a new connection (or retrieves an idle one from the cache). +Closing a connection returns it to the cache (if the cache size has not been reached). +Channels created on such connections are also cached. +The use of separate connections might be useful in some environments, such as consuming from an HA cluster, in +conjunction with a load balancer, to connect to different cluster members, and others. +To cache connections, set the `cacheMode` to `CacheMode.CONNECTION`. + +| |This does not limit the number of connections.
Rather, it specifies how many idle open connections are allowed.| +|---|-------------------------------------------------------------------------------------------------------------------| + +Starting with version 1.5.5, a new property called `connectionLimit` is provided. +When this property is set, it limits the total number of connections allowed. +When set, if the limit is reached, the `channelCheckoutTimeLimit` is used to wait for a connection to become idle. +If the time is exceeded, an `AmqpTimeoutException` is thrown. + +| |When the cache mode is `CONNECTION`, automatic declaration of queues and others
(See [Automatic Declaration of Exchanges, Queues, and Bindings](#automatic-declaration)) is NOT supported.

Also, at the time of this writing, the `amqp-client` library by default creates a fixed thread pool for each connection (default size: `Runtime.getRuntime().availableProcessors() * 2` threads).
When using a large number of connections, you should consider setting a custom `executor` on the `CachingConnectionFactory`.
Then, the same executor can be used by all connections and its threads can be shared.
The executor’s thread pool should be unbounded or set appropriately for the expected use (usually, at least one thread per connection).
If multiple channels are created on each connection, the pool size affects the concurrency, so a variable (or simple cached) thread pool executor would be most suitable.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +It is important to understand that the cache size is (by default) not a limit but is merely the number of channels that can be cached. +With a cache size of, say, 10, any number of channels can actually be in use. +If more than 10 channels are being used and they are all returned to the cache, 10 go in the cache. +The remainder are physically closed. + +Starting with version 1.6, the default channel cache size has been increased from 1 to 25. +In high volume, multi-threaded environments, a small cache means that channels are created and closed at a high rate. +Increasing the default cache size can avoid this overhead. +You should monitor the channels in use through the RabbitMQ Admin UI and consider increasing the cache size further if you +see many channels being created and closed. +The cache grows only on-demand (to suit the concurrency requirements of the application), so this change does not +impact existing low-volume applications. + +Starting with version 1.4.2, the `CachingConnectionFactory` has a property called `channelCheckoutTimeout`. +When this property is greater than zero, the `channelCacheSize` becomes a limit on the number of channels that can be created on a connection. +If the limit is reached, calling threads block until a channel is available or this timeout is reached, in which case a `AmqpTimeoutException` is thrown. + +| |Channels used within the framework (for example,`RabbitTemplate`) are reliably returned to the cache.
If you create channels outside of the framework, (for example,
by accessing the connections directly and invoking `createChannel()`), you must return them (by closing) reliably, perhaps in a `finally` block, to avoid running out of channels.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The following example shows how to create a new `connection`: + +``` +CachingConnectionFactory connectionFactory = new CachingConnectionFactory("somehost"); +connectionFactory.setUsername("guest"); +connectionFactory.setPassword("guest"); + +Connection connection = connectionFactory.createConnection(); +``` + +When using XML, the configuration might look like the following example: + +``` + + + + + +``` + +| |There is also a `SingleConnectionFactory` implementation that is available only in the unit test code of the framework.
It is simpler than `CachingConnectionFactory`, since it does not cache channels, but it is not intended for practical usage outside of simple tests due to its lack of performance and resilience.
If you need to implement your own `ConnectionFactory` for some reason, the `AbstractConnectionFactory` base class may provide a nice starting point.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +A `ConnectionFactory` can be created quickly and conveniently by using the rabbit namespace, as follows: + +``` + +``` + +In most cases, this approach is preferable, since the framework can choose the best defaults for you. +The created instance is a `CachingConnectionFactory`. +Keep in mind that the default cache size for channels is 25. +If you want more channels to be cachedm, set a larger value by setting the 'channelCacheSize' property. +In XML it would look like as follows: + +``` + + + + + + +``` + +Also, with the namespace, you can add the 'channel-cache-size' attribute, as follows: + +``` + +``` + +The default cache mode is `CHANNEL`, but you can configure it to cache connections instead. +In the following example, we use `connection-cache-size`: + +``` + +``` + +You can provide host and port attributes by using the namespace, as follows: + +``` + +``` + +Alternatively, if running in a clustered environment, you can use the addresses attribute, as follows: + +``` + +``` + +See [Connecting to a Cluster](#cluster) for information about `address-shuffle-mode`. + +The following example with a custom thread factory that prefixes thread names with `rabbitmq-`: + +``` + + + + + +``` + +##### AddressResolver + +Starting with version 2.1.15, you can now use an `AddressResover` to resolve the connection address(es). +This will override any settings of the `addresses` and `host/port` properties. + +##### Naming Connections + +Starting with version 1.7, a `ConnectionNameStrategy` is provided for the injection into the `AbstractionConnectionFactory`. +The generated name is used for the application-specific identification of the target RabbitMQ connection. +The connection name is displayed in the management UI if the RabbitMQ server supports it. +This value does not have to be unique and cannot be used as a connection identifier — for example, in HTTP API requests. +This value is supposed to be human-readable and is a part of `ClientProperties` under the `connection_name` key. +You can use a simple Lambda, as follows: + +``` +connectionFactory.setConnectionNameStrategy(connectionFactory -> "MY_CONNECTION"); +``` + +The `ConnectionFactory` argument can be used to distinguish target connection names by some logic. +By default, the `beanName` of the `AbstractConnectionFactory`, a hex string representing the object, and an internal counter are used to generate the `connection_name`. +The `` namespace component is also supplied with the `connection-name-strategy` attribute. + +An implementation of `SimplePropertyValueConnectionNameStrategy` sets the connection name to an application property. +You can declare it as a `@Bean` and inject it into the connection factory, as the following example shows: + +``` +@Bean +public SimplePropertyValueConnectionNameStrategy cns() { + return new SimplePropertyValueConnectionNameStrategy("spring.application.name"); +} + +@Bean +public ConnectionFactory rabbitConnectionFactory(ConnectionNameStrategy cns) { + CachingConnectionFactory connectionFactory = new CachingConnectionFactory(); + ... + connectionFactory.setConnectionNameStrategy(cns); + return connectionFactory; +} +``` + +The property must exist in the application context’s `Environment`. + +| |When using Spring Boot and its autoconfigured connection factory, you need only declare the `ConnectionNameStrategy` `@Bean`.
Boot auto-detects the bean and wires it into the factory.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Blocked Connections and Resource Constraints + +The connection might be blocked for interaction from the broker that corresponds to the [Memory Alarm](https://www.rabbitmq.com/memory.html). +Starting with version 2.0, the `org.springframework.amqp.rabbit.connection.Connection` can be supplied with `com.rabbitmq.client.BlockedListener` instances to be notified for connection blocked and unblocked events. +In addition, the `AbstractConnectionFactory` emits a `ConnectionBlockedEvent` and `ConnectionUnblockedEvent`, respectively, through its internal `BlockedListener` implementation. +These let you provide application logic to react appropriately to problems on the broker and (for example) take some corrective actions. + +| |When the application is configured with a single `CachingConnectionFactory`, as it is by default with Spring Boot auto-configuration, the application stops working when the connection is blocked by the Broker.
And when it is blocked by the Broker, any of its clients stop to work.
If we have producers and consumers in the same application, we may end up with a deadlock when producers are blocking the connection (because there are no resources on the Broker any more) and consumers cannot free them (because the connection is blocked).
To mitigate the problem, we suggest having one more separate `CachingConnectionFactory` instance with the same options — one for producers and one for consumers.
A separate `CachingConnectionFactory` is not possible for transactional producers that execute on a consumer thread, since they should reuse the `Channel` associated with the consumer transactions.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Starting with version 2.0.2, the `RabbitTemplate` has a configuration option to automatically use a second connection factory, unless transactions are being used. +See [Using a Separate Connection](#separate-connection) for more information. +The `ConnectionNameStrategy` for the publisher connection is the same as the primary strategy with `.publisher` appended to the result of calling the method. + +Starting with version 1.7.7, an `AmqpResourceNotAvailableException` is provided, which is thrown when `SimpleConnection.createChannel()` cannot create a `Channel` (for example, because the `channelMax` limit is reached and there are no available channels in the cache). +You can use this exception in the `RetryPolicy` to recover the operation after some back-off. + +##### Configuring the Underlying Client Connection Factory + +The `CachingConnectionFactory` uses an instance of the Rabbit client `ConnectionFactory`. +A number of configuration properties are passed through (`host, port, userName, password, requestedHeartBeat, and connectionTimeout` for example) when setting the equivalent property on the `CachingConnectionFactory`. +To set other properties (`clientProperties`, for example), you can define an instance of the Rabbit factory and provide a reference to it by using the appropriate constructor of the `CachingConnectionFactory`. +When using the namespace ([as described earlier](#connections)), you need to provide a reference to the configured factory in the `connection-factory` attribute. +For convenience, a factory bean is provided to assist in configuring the connection factory in a Spring application context, as discussed in [the next section](#rabbitconnectionfactorybean-configuring-ssl). + +``` + +``` + +| |The 4.0.x client enables automatic recovery by default.
While compatible with this feature, Spring AMQP has its own recovery mechanisms and the client recovery feature generally is not needed.
We recommend disabling `amqp-client` automatic recovery, to avoid getting `AutoRecoverConnectionNotCurrentlyOpenException` instances when the broker is available but the connection has not yet recovered.
You may notice this exception, for example, when a `RetryTemplate` is configured in a `RabbitTemplate`, even when failing over to another broker in a cluster.
Since the auto-recovering connection recovers on a timer, the connection may be recovered more quickly by using Spring AMQP’s recovery mechanisms.
Starting with version 1.7.1, Spring AMQP disables `amqp-client` automatic recovery unless you explicitly create your own RabbitMQ connection factory and provide it to the `CachingConnectionFactory`.
RabbitMQ `ConnectionFactory` instances created by the `RabbitConnectionFactoryBean` also have the option disabled by default.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### `RabbitConnectionFactoryBean` and Configuring SSL + +Starting with version 1.4, a convenient `RabbitConnectionFactoryBean` is provided to enable convenient configuration of SSL properties on the underlying client connection factory by using dependency injection. +Other setters delegate to the underlying factory. +Previously, you had to configure the SSL options programmatically. +The following example shows how to configure a `RabbitConnectionFactoryBean`: + +``` + + + + + + +``` + +See the [RabbitMQ Documentation](https://www.rabbitmq.com/ssl.html) for information about configuring SSL. +Omit the `keyStore` and `trustStore` configuration to connect over SSL without certificate validation. +The next example shows how you can provide key and trust store configuration. + +The `sslPropertiesLocation` property is a Spring `Resource` pointing to a properties file containing the following keys: + +``` +keyStore=file:/secret/keycert.p12 +trustStore=file:/secret/trustStore +keyStore.passPhrase=secret +trustStore.passPhrase=secret +``` + +The `keyStore` and `truststore` are Spring `Resources` pointing to the stores. +Typically this properties file is secured by the operating system with the application having read access. + +Starting with Spring AMQP version 1.5,you can set these properties directly on the factory bean. +If both discrete properties and `sslPropertiesLocation` is provided, properties in the latter override the +discrete values. + +| |Starting with version 2.0, the server certificate is validated by default because it is more secure.
If you wish to skip this validation for some reason, set the factory bean’s `skipServerCertificateValidation` property to `true`.
Starting with version 2.1, the `RabbitConnectionFactoryBean` now calls `enableHostnameVerification()` by default.
To revert to the previous behavior, set the `enableHostnameVerification` property to `false`.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Starting with version 2.2.5, the factory bean will always use TLS v1.2 by default; previously, it used v1.1 in some cases and v1.2 in others (depending on other properties).
If you need to use v1.1 for some reason, set the `sslAlgorithm` property: `setSslAlgorithm("TLSv1.1")`.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Connecting to a Cluster + +To connect to a cluster, configure the `addresses` property on the `CachingConnectionFactory`: + +``` +@Bean +public CachingConnectionFactory ccf() { + CachingConnectionFactory ccf = new CachingConnectionFactory(); + ccf.setAddresses("host1:5672,host2:5672,host3:5672"); + return ccf; +} +``` + +The underlying connection factory will attempt to connect to each host, in order, whenever a new connection is established. +Starting with version 2.1.8, the connection order can be made random by setting the `addressShuffleMode` property to `RANDOM`; the shuffle will be applied before creating any new connection. +Starting with version 2.6, the `INORDER` shuffle mode was added, which means the first address is moved to the end after a connection is created. +You may wish to use this mode with the [RabbitMQ Sharding Plugin](https://github.com/rabbitmq/rabbitmq-sharding) with `CacheMode.CONNECTION` and suitable concurrency if you wish to consume from all shards on all nodes. + +``` +@Bean +public CachingConnectionFactory ccf() { + CachingConnectionFactory ccf = new CachingConnectionFactory(); + ccf.setAddresses("host1:5672,host2:5672,host3:5672"); + ccf.setAddressShuffleMode(AddressShuffleMode.RANDOM); + return ccf; +} +``` + +##### Routing Connection Factory + +Starting with version 1.3, the `AbstractRoutingConnectionFactory` has been introduced. +This factory provides a mechanism to configure mappings for several `ConnectionFactories` and determine a target `ConnectionFactory` by some `lookupKey` at runtime. +Typically, the implementation checks a thread-bound context. +For convenience, Spring AMQP provides the `SimpleRoutingConnectionFactory`, which gets the current thread-bound `lookupKey` from the `SimpleResourceHolder`. +The following examples shows how to configure a `SimpleRoutingConnectionFactory` in both XML and Java: + +``` + + + + + + + + + + +``` + +``` +public class MyService { + + @Autowired + private RabbitTemplate rabbitTemplate; + + public void service(String vHost, String payload) { + SimpleResourceHolder.bind(rabbitTemplate.getConnectionFactory(), vHost); + rabbitTemplate.convertAndSend(payload); + SimpleResourceHolder.unbind(rabbitTemplate.getConnectionFactory()); + } + +} +``` + +It is important to unbind the resource after use. +For more information, see the [JavaDoc](https://docs.spring.io/spring-amqp/docs/latest-ga/api/org/springframework/amqp/rabbit/connection/AbstractRoutingConnectionFactory.html) for `AbstractRoutingConnectionFactory`. + +Starting with version 1.4, `RabbitTemplate` supports the SpEL `sendConnectionFactorySelectorExpression` and `receiveConnectionFactorySelectorExpression` properties, which are evaluated on each AMQP protocol interaction operation (`send`, `sendAndReceive`, `receive`, or `receiveAndReply`), resolving to a `lookupKey` value for the provided `AbstractRoutingConnectionFactory`. +You can use bean references, such as `@vHostResolver.getVHost(#root)` in the expression. +For `send` operations, the message to be sent is the root evaluation object. +For `receive` operations, the `queueName` is the root evaluation object. + +The routing algorithm is as follows: If the selector expression is `null` or is evaluated to `null` or the provided `ConnectionFactory` is not an instance of `AbstractRoutingConnectionFactory`, everything works as before, relying on the provided `ConnectionFactory` implementation. +The same occurs if the evaluation result is not `null`, but there is no target `ConnectionFactory` for that `lookupKey` and the `AbstractRoutingConnectionFactory` is configured with `lenientFallback = true`. +In the case of an `AbstractRoutingConnectionFactory`, it does fallback to its `routing` implementation based on `determineCurrentLookupKey()`. +However, if `lenientFallback = false`, an `IllegalStateException` is thrown. + +The namespace support also provides the `send-connection-factory-selector-expression` and `receive-connection-factory-selector-expression` attributes on the `` component. + +Also, starting with version 1.4, you can configure a routing connection factory in a listener container. +In that case, the list of queue names is used as the lookup key. +For example, if you configure the container with `setQueueNames("thing1", "thing2")`, the lookup key is `[thing1,thing]"` (note that there is no space in the key). + +Starting with version 1.6.9, you can add a qualifier to the lookup key by using `setLookupKeyQualifier` on the listener container. +Doing so enables, for example, listening to queues with the same name but in a different virtual host (where you would have a connection factory for each). + +For example, with lookup key qualifier `thing1` and a container listening to queue `thing2`, the lookup key you could register the target connection factory with could be `thing1[thing2]`. + +| |The target (and default, if provided) connection factories must have the same settings for publisher confirms and returns.
See [Publisher Confirms and Returns](#cf-pub-conf-ret).| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Queue Affinity and the `LocalizedQueueConnectionFactory` + +When using HA queues in a cluster, for the best performance, you may want to connect to the physical broker +where the lead queue resides. +The `CachingConnectionFactory` can be configured with multiple broker addresses. +This is to fail over and the client attempts to connect in order. +The `LocalizedQueueConnectionFactory` uses the REST API provided by the management plugin to determine which node is the lead for the queue. +It then creates (or retrieves from a cache) a `CachingConnectionFactory` that connects to just that node. +If the connection fails, the new lead node is determined and the consumer connects to it. +The `LocalizedQueueConnectionFactory` is configured with a default connection factory, in case the physical location of the queue cannot be determined, in which case it connects as normal to the cluster. + +The `LocalizedQueueConnectionFactory` is a `RoutingConnectionFactory` and the `SimpleMessageListenerContainer` uses the queue names as the lookup key as discussed in [Routing Connection Factory](#routing-connection-factory) above. + +| |For this reason (the use of the queue name for the lookup), the `LocalizedQueueConnectionFactory` can only be used if the container is configured to listen to a single queue.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |The RabbitMQ management plugin must be enabled on each node.| +|---|------------------------------------------------------------| + +| |This connection factory is intended for long-lived connections, such as those used by the `SimpleMessageListenerContainer`.
It is not intended for short connection use, such as with a `RabbitTemplate` because of the overhead of invoking the REST API before making the connection.
Also, for publish operations, the queue is unknown, and the message is published to all cluster members anyway, so the logic of looking up the node has little value.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The following example configuration shows how to configure the factories: + +``` +@Autowired +private ConfigurationProperties props; + +@Bean +public CachingConnectionFactory defaultConnectionFactory() { + CachingConnectionFactory cf = new CachingConnectionFactory(); + cf.setAddresses(this.props.getAddresses()); + cf.setUsername(this.props.getUsername()); + cf.setPassword(this.props.getPassword()); + cf.setVirtualHost(this.props.getVirtualHost()); + return cf; +} + +@Bean +public LocalizedQueueConnectionFactory queueAffinityCF( + @Qualifier("defaultConnectionFactory") ConnectionFactory defaultCF) { + return new LocalizedQueueConnectionFactory(defaultCF, + StringUtils.commaDelimitedListToStringArray(this.props.getAddresses()), + StringUtils.commaDelimitedListToStringArray(this.props.getAdminUris()), + StringUtils.commaDelimitedListToStringArray(this.props.getNodes()), + this.props.getVirtualHost(), this.props.getUsername(), this.props.getPassword(), + false, null); +} +``` + +Notice that the first three parameters are arrays of `addresses`, `adminUris`, and `nodes`. +These are positional in that, when a container attempts to connect to a queue, it uses the admin API to determine which node is the lead for the queue and connects to the address in the same array position as that node. + +##### Publisher Confirms and Returns + +Confirmed (with correlation) and returned messages are supported by setting the `CachingConnectionFactory` property `publisherConfirmType` to `ConfirmType.CORRELATED` and the `publisherReturns` property to 'true'. + +When these options are set, `Channel` instances created by the factory are wrapped in an `PublisherCallbackChannel`, which is used to facilitate the callbacks. +When such a channel is obtained, the client can register a `PublisherCallbackChannel.Listener` with the `Channel`. +The `PublisherCallbackChannel` implementation contains logic to route a confirm or return to the appropriate listener. +These features are explained further in the following sections. + +See also `simplePublisherConfirms` in [Scoped Operations](#scoped-operations). + +| |For some more background information, see the blog post by the RabbitMQ team titled [Introducing Publisher Confirms](https://www.rabbitmq.com/blog/2011/02/10/introducing-publisher-confirms/).| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Connection and Channel Listeners + +The connection factory supports registering `ConnectionListener` and `ChannelListener` implementations. +This allows you to receive notifications for connection and channel related events. +(A `ConnectionListener` is used by the `RabbitAdmin` to perform declarations when the connection is established - see [Automatic Declaration of Exchanges, Queues, and Bindings](#automatic-declaration) for more information). +The following listing shows the `ConnectionListener` interface definition: + +``` +@FunctionalInterface +public interface ConnectionListener { + + void onCreate(Connection connection); + + default void onClose(Connection connection) { + } + + default void onShutDown(ShutdownSignalException signal) { + } + +} +``` + +Starting with version 2.0, the `org.springframework.amqp.rabbit.connection.Connection` object can be supplied with `com.rabbitmq.client.BlockedListener` instances to be notified for connection blocked and unblocked events. +The following example shows the ChannelListener interface definition: + +``` +@FunctionalInterface +public interface ChannelListener { + + void onCreate(Channel channel, boolean transactional); + + default void onShutDown(ShutdownSignalException signal) { + } + +} +``` + +See [Publishing is Asynchronous — How to Detect Successes and Failures](#publishing-is-async) for one scenario where you might want to register a `ChannelListener`. + +##### Logging Channel Close Events + +Version 1.5 introduced a mechanism to enable users to control logging levels. + +The `CachingConnectionFactory` uses a default strategy to log channel closures as follows: + +* Normal channel closes (200 OK) are not logged. + +* If a channel is closed due to a failed passive queue declaration, it is logged at debug level. + +* If a channel is closed because the `basic.consume` is refused due to an exclusive consumer condition, it is logged at + INFO level. + +* All others are logged at ERROR level. + +To modify this behavior, you can inject a custom `ConditionalExceptionLogger` into the`CachingConnectionFactory` in its `closeExceptionLogger` property. + +See also [Consumer Events](#consumer-events). + +##### Runtime Cache Properties + +Staring with version 1.6, the `CachingConnectionFactory` now provides cache statistics through the `getCacheProperties()`method. +These statistics can be used to tune the cache to optimize it in production. +For example, the high water marks can be used to determine whether the cache size should be increased. +If it equals the cache size, you might want to consider increasing further. +The following table describes the `CacheMode.CHANNEL` properties: + +| Property | Meaning | +|------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------| +| ```
connectionName
``` | The name of the connection generated by the `ConnectionNameStrategy`. | +| ```
channelCacheSize
``` | The currently configured maximum channels that are allowed to be idle. | +| ```
localPort
``` |The local port for the connection (if available).
This can be used to correlate with connections and channels on the RabbitMQ Admin UI.| +| ```
idleChannelsTx
``` | The number of transactional channels that are currently idle (cached). | +| ```
idleChannelsNotTx
``` | The number of non-transactional channels that are currently idle (cached). | +| ```
idleChannelsTxHighWater
``` | The maximum number of transactional channels that have been concurrently idle (cached). | +|```
idleChannelsNotTxHighWater
```| The maximum number of non-transactional channels have been concurrently idle (cached). | + +The following table describes the `CacheMode.CONNECTION` properties: + +| Property | Meaning | +|------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| ```
connectionName:
``` | The name of the connection generated by the `ConnectionNameStrategy`. | +| ```
openConnections
``` | The number of connection objects representing connections to brokers. | +| ```
channelCacheSize
``` | The currently configured maximum channels that are allowed to be idle. | +| ```
connectionCacheSize
``` | The currently configured maximum connections that are allowed to be idle. | +| ```
idleConnections
``` | The number of connections that are currently idle. | +| ```
idleConnectionsHighWater
``` | The maximum number of connections that have been concurrently idle. | +| ```
idleChannelsTx:
``` | The number of transactional channels that are currently idle (cached) for this connection.
You can use the `localPort` part of the property name to correlate with connections and channels on the RabbitMQ Admin UI. | +| ```
idleChannelsNotTx:
``` |The number of non-transactional channels that are currently idle (cached) for this connection.
The `localPort` part of the property name can be used to correlate with connections and channels on the RabbitMQ Admin UI.| +| ```
idleChannelsTxHighWater:
``` | The maximum number of transactional channels that have been concurrently idle (cached).
The localPort part of the property name can be used to correlate with connections and channels on the RabbitMQ Admin UI. | +|```
idleChannelsNotTxHighWater:
```| The maximum number of non-transactional channels have been concurrently idle (cached).
You can use the `localPort` part of the property name to correlate with connections and channels on the RabbitMQ Admin UI. | + +The `cacheMode` property (`CHANNEL` or `CONNECTION`) is also included. + +![cacheStats](https://docs.spring.io/spring-amqp/docs/current/reference/html/images/cacheStats.png) + +Figure 1. JVisualVM Example + +##### RabbitMQ Automatic Connection/Topology recovery + +Since the first version of Spring AMQP, the framework has provided its own connection and channel recovery in the event of a broker failure. +Also, as discussed in [Configuring the Broker](#broker-configuration), the `RabbitAdmin` re-declares any infrastructure beans (queues and others) when the connection is re-established. +It therefore does not rely on the [auto-recovery](https://www.rabbitmq.com/api-guide.html#recovery) that is now provided by the `amqp-client` library. +Spring AMQP now uses the `4.0.x` version of `amqp-client`, which has auto recovery enabled by default. +Spring AMQP can still use its own recovery mechanisms if you wish, disabling it in the client, (by setting the `automaticRecoveryEnabled` property on the underlying `RabbitMQ connectionFactory` to `false`). +However, the framework is completely compatible with auto-recovery being enabled. +This means any consumers you create within your code (perhaps via `RabbitTemplate.execute()`) can be recovered automatically. + +| |Only elements (queues, exchanges, bindings) that are defined as beans will be re-declared after a connection failure.
Elements declared by invoking `RabbitAdmin.declare*()` methods directly from user code are unknown to the framework and therefore cannot be recovered.
If you have a need for a variable number of declarations, consider defining a bean, or beans, of type `Declarables`, as discussed in [Declaring Collections of Exchanges, Queues, and Bindings](#collection-declaration).| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 4.1.3. Adding Custom Client Connection Properties + +The `CachingConnectionFactory` now lets you access the underlying connection factory to allow, for example, +setting custom client properties. +The following example shows how to do so: + +``` +connectionFactory.getRabbitConnectionFactory().getClientProperties().put("thing1", "thing2"); +``` + +These properties appear in the RabbitMQ Admin UI when viewing the connection. + +#### 4.1.4. `AmqpTemplate` + +As with many other high-level abstractions provided by the Spring Framework and related projects, Spring AMQP provides a “template” that plays a central role. +The interface that defines the main operations is called `AmqpTemplate`. +Those operations cover the general behavior for sending and receiving messages. +In other words, they are not unique to any implementation — hence the “AMQP” in the name. +On the other hand, there are implementations of that interface that are tied to implementations of the AMQP protocol. +Unlike JMS, which is an interface-level API itself, AMQP is a wire-level protocol. +The implementations of that protocol provide their own client libraries, so each implementation of the template interface depends on a particular client library. +Currently, there is only a single implementation: `RabbitTemplate`. +In the examples that follow, we often use an `AmqpTemplate`. +However, when you look at the configuration examples or any code excerpts where the template is instantiated or setters are invoked, you can see the implementation type (for example, `RabbitTemplate`). + +As mentioned earlier, the `AmqpTemplate` interface defines all of the basic operations for sending and receiving messages. +We will explore message sending and reception, respectively, in [Sending Messages](#sending-messages) and [Receiving Messages](#receiving-messages). + +See also [Async Rabbit Template](#async-template). + +##### Adding Retry Capabilities + +Starting with version 1.3, you can now configure the `RabbitTemplate` to use a `RetryTemplate` to help with handling problems with broker connectivity. +See the [spring-retry](https://github.com/spring-projects/spring-retry) project for complete information. +The following is only one example that uses an exponential back off policy and the default `SimpleRetryPolicy`, which makes three tries before throwing the exception to the caller. + +The following example uses the XML namespace: + +``` + + + + + + + + + + + +``` + +The following example uses the `@Configuration` annotation in Java: + +``` +@Bean +public RabbitTemplate rabbitTemplate() { + RabbitTemplate template = new RabbitTemplate(connectionFactory()); + RetryTemplate retryTemplate = new RetryTemplate(); + ExponentialBackOffPolicy backOffPolicy = new ExponentialBackOffPolicy(); + backOffPolicy.setInitialInterval(500); + backOffPolicy.setMultiplier(10.0); + backOffPolicy.setMaxInterval(10000); + retryTemplate.setBackOffPolicy(backOffPolicy); + template.setRetryTemplate(retryTemplate); + return template; +} +``` + +Starting with version 1.4, in addition to the `retryTemplate` property, the `recoveryCallback` option is supported on the `RabbitTemplate`. +It is used as a second argument for the `RetryTemplate.execute(RetryCallback retryCallback, RecoveryCallback recoveryCallback)`. + +| |The `RecoveryCallback` is somewhat limited, in that the retry context contains only the `lastThrowable` field.
For more sophisticated use cases, you should use an external `RetryTemplate` so that you can convey additional information to the `RecoveryCallback` through the context’s attributes.
The following example shows how to do so:| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +``` +retryTemplate.execute( + new RetryCallback() { + + @Override + public Object doWithRetry(RetryContext context) throws Exception { + context.setAttribute("message", message); + return rabbitTemplate.convertAndSend(exchange, routingKey, message); + } + + }, new RecoveryCallback() { + + @Override + public Object recover(RetryContext context) throws Exception { + Object message = context.getAttribute("message"); + Throwable t = context.getLastThrowable(); + // Do something with message + return null; + } + }); +} +``` + +In this case, you would **not** inject a `RetryTemplate` into the `RabbitTemplate`. + +##### Publishing is Asynchronous — How to Detect Successes and Failures + +Publishing messages is an asynchronous mechanism and, by default, messages that cannot be routed are dropped by RabbitMQ. +For successful publishing, you can receive an asynchronous confirm, as described in [Correlated Publisher Confirms and Returns](#template-confirms). +Consider two failure scenarios: + +* Publish to an exchange but there is no matching destination queue. + +* Publish to a non-existent exchange. + +The first case is covered by publisher returns, as described in [Correlated Publisher Confirms and Returns](#template-confirms). + +For the second case, the message is dropped and no return is generated. +The underlying channel is closed with an exception. +By default, this exception is logged, but you can register a `ChannelListener` with the `CachingConnectionFactory` to obtain notifications of such events. +The following example shows how to add a `ConnectionListener`: + +``` +this.connectionFactory.addConnectionListener(new ConnectionListener() { + + @Override + public void onCreate(Connection connection) { + } + + @Override + public void onShutDown(ShutdownSignalException signal) { + ... + } + +}); +``` + +You can examine the signal’s `reason` property to determine the problem that occurred. + +To detect the exception on the sending thread, you can `setChannelTransacted(true)` on the `RabbitTemplate` and the exception is detected on the `txCommit()`. +However, **transactions significantly impede performance**, so consider this carefully before enabling transactions for just this one use case. + +##### Correlated Publisher Confirms and Returns + +The `RabbitTemplate` implementation of `AmqpTemplate` supports publisher confirms and returns. + +For returned messages, the template’s `mandatory` property must be set to `true` or the `mandatory-expression`must evaluate to `true` for a particular message. +This feature requires a `CachingConnectionFactory` that has its `publisherReturns` property set to `true` (see [Publisher Confirms and Returns](#cf-pub-conf-ret)). +Returns are sent to the client by it registering a `RabbitTemplate.ReturnsCallback` by calling `setReturnsCallback(ReturnsCallback callback)`. +The callback must implement the following method: + +``` +void returnedMessage(ReturnedMessage returned); +``` + +The `ReturnedMessage` has the following properties: + +* `message` - the returned message itself + +* `replyCode` - a code indicating the reason for the return + +* `replyText` - a textual reason for the return - e.g. `NO_ROUTE` + +* `exchange` - the exchange to which the message was sent + +* `routingKey` - the routing key that was used + +Only one `ReturnsCallback` is supported by each `RabbitTemplate`. +See also [Reply Timeout](#reply-timeout). + +For publisher confirms (also known as publisher acknowledgements), the template requires a `CachingConnectionFactory` that has its `publisherConfirm` property set to `ConfirmType.CORRELATED`. +Confirms are sent to the client by it registering a `RabbitTemplate.ConfirmCallback` by calling `setConfirmCallback(ConfirmCallback callback)`. +The callback must implement this method: + +``` +void confirm(CorrelationData correlationData, boolean ack, String cause); +``` + +The `CorrelationData` is an object supplied by the client when sending the original message. +The `ack` is true for an `ack` and false for a `nack`. +For `nack` instances, the cause may contain a reason for the `nack`, if it is available when the `nack` is generated. +An example is when sending a message to a non-existent exchange. +In that case, the broker closes the channel. +The reason for the closure is included in the `cause`. +The `cause` was added in version 1.4. + +Only one `ConfirmCallback` is supported by a `RabbitTemplate`. + +| |When a rabbit template send operation completes, the channel is closed.
This precludes the reception of confirms or returns when the connection factory cache is full (when there is space in the cache, the channel is not physically closed and the returns and confirms proceed normally).
When the cache is full, the framework defers the close for up to five seconds, in order to allow time for the confirms and returns to be received.
When using confirms, the channel is closed when the last confirm is received.
When using only returns, the channel remains open for the full five seconds.
We generally recommend setting the connection factory’s `channelCacheSize` to a large enough value so that the channel on which a message is published is returned to the cache instead of being closed.
You can monitor channel usage by using the RabbitMQ management plugin.
If you see channels being opened and closed rapidly, you should consider increasing the cache size to reduce overhead on the server.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Before version 2.1, channels enabled for publisher confirms were returned to the cache before the confirms were received.
Some other process could check out the channel and perform some operation that causes the channel to close — such as publishing a message to a non-existent exchange.
This could cause the confirm to be lost.
Version 2.1 and later no longer return the channel to the cache while confirms are outstanding.
The `RabbitTemplate` performs a logical `close()` on the channel after each operation.
In general, this means that only one confirm is outstanding on a channel at a time.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Starting with version 2.2, the callbacks are invoked on one of the connection factory’s `executor` threads.
This is to avoid a potential deadlock if you perform Rabbit operations from within the callback.
With previous versions, the callbacks were invoked directly on the `amqp-client` connection I/O thread; this would deadlock if you perform some RPC operation (such as opening a new channel) since the I/O thread blocks waiting for the result, but the result needs to be processed by the I/O thread itself.
With those versions, it was necessary to hand off work (such as sending a messasge) to another thread within the callback.
This is no longer necessary since the framework now hands off the callback invocation to the executor.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |The guarantee of receiving a returned message before the ack is still maintained as long as the return callback executes in 60 seconds or less.
The confirm is scheduled to be delivered after the return callback exits or after 60 seconds, whichever comes first.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Starting with version 2.1, the `CorrelationData` object has a `ListenableFuture` that you can use to get the result, instead of using a `ConfirmCallback` on the template. +The following example shows how to configure a `CorrelationData` instance: + +``` +CorrelationData cd1 = new CorrelationData(); +this.templateWithConfirmsEnabled.convertAndSend("exchange", queue.getName(), "foo", cd1); +assertTrue(cd1.getFuture().get(10, TimeUnit.SECONDS).isAck()); +``` + +Since it is a `ListenableFuture`, you can either `get()` the result when ready or add listeners for an asynchronous callback. +The `Confirm` object is a simple bean with 2 properties: `ack` and `reason` (for `nack` instances). +The reason is not populated for broker-generated `nack` instances. +It is populated for `nack` instances generated by the framework (for example, closing the connection while `ack` instances are outstanding). + +In addition, when both confirms and returns are enabled, the `CorrelationData` is populated with the returned message, as long as the `CorrelationData` has a unique `id`; this is always the case, by default, starting with version 2.3. +It is guaranteed that the returned message is set before the future is set with the `ack`. + +See also [Scoped Operations](#scoped-operations) for a simpler mechanism for waiting for publisher confirms. + +##### Scoped Operations + +Normally, when using the template, a `Channel` is checked out of the cache (or created), used for the operation, and returned to the cache for reuse. +In a multi-threaded environment, there is no guarantee that the next operation uses the same channel. +There may be times, however, where you want to have more control over the use of a channel and ensure that a number of operations are all performed on the same channel. + +Starting with version 2.0, a new method called `invoke` is provided, with an `OperationsCallback`. +Any operations performed within the scope of the callback and on the provided `RabbitOperations` argument use the same dedicated `Channel`, which will be closed at the end (not returned to a cache). +If the channel is a `PublisherCallbackChannel`, it is returned to the cache after all confirms have been received (see [Correlated Publisher Confirms and Returns](#template-confirms)). + +``` +@FunctionalInterface +public interface OperationsCallback { + + T doInRabbit(RabbitOperations operations); + +} +``` + +One example of why you might need this is if you wish to use the `waitForConfirms()` method on the underlying `Channel`. +This method was not previously exposed by the Spring API because the channel is, generally, cached and shared, as discussed earlier. +The `RabbitTemplate` now provides `waitForConfirms(long timeout)` and `waitForConfirmsOrDie(long timeout)`, which delegate to the dedicated channel used within the scope of the `OperationsCallback`. +The methods cannot be used outside of that scope, for obvious reasons. + +Note that a higher-level abstraction that lets you correlate confirms to requests is provided elsewhere (see [Correlated Publisher Confirms and Returns](#template-confirms)). +If you want only to wait until the broker has confirmed delivery, you can use the technique shown in the following example: + +``` +Collection messages = getMessagesToSend(); +Boolean result = this.template.invoke(t -> { + messages.forEach(m -> t.convertAndSend(ROUTE, m)); + t.waitForConfirmsOrDie(10_000); + return true; +}); +``` + +If you wish `RabbitAdmin` operations to be invoked on the same channel within the scope of the `OperationsCallback`, the admin must have been constructed by using the same `RabbitTemplate` that was used for the `invoke` operation. + +| |The preceding discussion is moot if the template operations are already performed within the scope of an existing transaction — for example, when running on a transacted listener container thread and performing operations on a transacted template.
In that case, the operations are performed on that channel and committed when the thread returns to the container.
It is not necessary to use `invoke` in that scenario.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +When using confirms in this way, much of the infrastructure set up for correlating confirms to requests is not really needed (unless returns are also enabled). +Starting with version 2.2, the connection factory supports a new property called `publisherConfirmType`. +When this is set to `ConfirmType.SIMPLE`, the infrastructure is avoided and the confirm processing can be more efficient. + +Furthermore, the `RabbitTemplate` sets the `publisherSequenceNumber` property in the sent message `MessageProperties`. +If you wish to check (or log or otherwise use) specific confirms, you can do so with an overloaded `invoke` method, as the following example shows: + +``` +public T invoke(OperationsCallback action, com.rabbitmq.client.ConfirmCallback acks, + com.rabbitmq.client.ConfirmCallback nacks); +``` + +| |These `ConfirmCallback` objects (for `ack` and `nack` instances) are the Rabbit client callbacks, not the template callback.| +|---|----------------------------------------------------------------------------------------------------------------------------| + +The following example logs `ack` and `nack` instances: + +``` +Collection messages = getMessagesToSend(); +Boolean result = this.template.invoke(t -> { + messages.forEach(m -> t.convertAndSend(ROUTE, m)); + t.waitForConfirmsOrDie(10_000); + return true; +}, (tag, multiple) -> { + log.info("Ack: " + tag + ":" + multiple); +}, (tag, multiple) -> { + log.info("Nack: " + tag + ":" + multiple); +})); +``` + +| |Scoped operations are bound to a thread.
See [Strict Message Ordering in a Multi-Threaded Environment](#multi-strict) for a discussion about strict ordering in a multi-threaded environment.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Strict Message Ordering in a Multi-Threaded Environment + +The discussion in [Scoped Operations](#scoped-operations) applies only when the operations are performed on the same thread. + +Consider the following situation: + +* `thread-1` sends a message to a queue and hands off work to `thread-2` + +* `thread-2` sends a message to the same queue + +Because of the async nature of RabbitMQ and the use of cached channels; it is not certain that the same channel will be used and therefore the order in which the messages arrive in the queue is not guaranteed. +(In most cases they will arrive in order, but the probability of out-of-order delivery is not zero). +To solve this use case, you can use a bounded channel cache with size `1` (together with a `channelCheckoutTimeout`) to ensure the messages are always published on the same channel, and order will be guaranteed. +To do this, if you have other uses for the connection factory, such as consumers, you should either use a dedicated connection factory for the template, or configure the template to use the publisher connection factory embedded in the main connection factory (see [Using a Separate Connection](#separate-connection)). + +This is best illustrated with a simple Spring Boot Application: + +``` +@SpringBootApplication +public class Application { + + private static final Logger log = LoggerFactory.getLogger(Application.class); + + public static void main(String[] args) { + SpringApplication.run(Application.class, args); + } + + @Bean + TaskExecutor exec() { + ThreadPoolTaskExecutor exec = new ThreadPoolTaskExecutor(); + exec.setCorePoolSize(10); + return exec; + } + + @Bean + CachingConnectionFactory ccf() { + CachingConnectionFactory ccf = new CachingConnectionFactory("localhost"); + CachingConnectionFactory publisherCF = (CachingConnectionFactory) ccf.getPublisherConnectionFactory(); + publisherCF.setChannelCacheSize(1); + publisherCF.setChannelCheckoutTimeout(1000L); + return ccf; + } + + @RabbitListener(queues = "queue") + void listen(String in) { + log.info(in); + } + + @Bean + Queue queue() { + return new Queue("queue"); + } + + @Bean + public ApplicationRunner runner(Service service, TaskExecutor exec) { + return args -> { + exec.execute(() -> service.mainService("test")); + }; + } + +} + +@Component +class Service { + + private static final Logger LOG = LoggerFactory.getLogger(Service.class); + + private final RabbitTemplate template; + + private final TaskExecutor exec; + + Service(RabbitTemplate template, TaskExecutor exec) { + template.setUsePublisherConnection(true); + this.template = template; + this.exec = exec; + } + + void mainService(String toSend) { + LOG.info("Publishing from main service"); + this.template.convertAndSend("queue", toSend); + this.exec.execute(() -> secondaryService(toSend.toUpperCase())); + } + + void secondaryService(String toSend) { + LOG.info("Publishing from secondary service"); + this.template.convertAndSend("queue", toSend); + } + +} +``` + +Even though the publishing is performed on two different threads, they will both use the same channel because the cache is capped at a single channel. + +Starting with version 2.3.7, the `ThreadChannelConnectionFactory` supports transferring a thread’s channel(s) to another thread, using the `prepareContextSwitch` and `switchContext` methods. +The first method returns a context which is passed to the second thread which calls the second method. +A thread can have either a non-transactional channel or a transactional channel (or one of each) bound to it; you cannot transfer them individually, unless you use two connection factories. +An example follows: + +``` +@SpringBootApplication +public class Application { + + private static final Logger log = LoggerFactory.getLogger(Application.class); + + public static void main(String[] args) { + SpringApplication.run(Application.class, args); + } + + @Bean + TaskExecutor exec() { + ThreadPoolTaskExecutor exec = new ThreadPoolTaskExecutor(); + exec.setCorePoolSize(10); + return exec; + } + + @Bean + ThreadChannelConnectionFactory tccf() { + ConnectionFactory rabbitConnectionFactory = new ConnectionFactory(); + rabbitConnectionFactory.setHost("localhost"); + return new ThreadChannelConnectionFactory(rabbitConnectionFactory); + } + + @RabbitListener(queues = "queue") + void listen(String in) { + log.info(in); + } + + @Bean + Queue queue() { + return new Queue("queue"); + } + + @Bean + public ApplicationRunner runner(Service service, TaskExecutor exec) { + return args -> { + exec.execute(() -> service.mainService("test")); + }; + } + +} + +@Component +class Service { + + private static final Logger LOG = LoggerFactory.getLogger(Service.class); + + private final RabbitTemplate template; + + private final TaskExecutor exec; + + private final ThreadChannelConnectionFactory connFactory; + + Service(RabbitTemplate template, TaskExecutor exec, + ThreadChannelConnectionFactory tccf) { + + this.template = template; + this.exec = exec; + this.connFactory = tccf; + } + + void mainService(String toSend) { + LOG.info("Publishing from main service"); + this.template.convertAndSend("queue", toSend); + Object context = this.connFactory.prepareSwitchContext(); + this.exec.execute(() -> secondaryService(toSend.toUpperCase(), context)); + } + + void secondaryService(String toSend, Object threadContext) { + LOG.info("Publishing from secondary service"); + this.connFactory.switchContext(threadContext); + this.template.convertAndSend("queue", toSend); + this.connFactory.closeThreadChannel(); + } + +} +``` + +| |Once the `prepareSwitchContext` is called, if the current thread performs any more operations, they will be performed on a new channel.
It is important to close the thread-bound channel when it is no longer needed.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Messaging Integration + +Starting with version 1.4, `RabbitMessagingTemplate` (built on top of `RabbitTemplate`) provides an integration with the Spring Framework messaging abstraction — that is,`org.springframework.messaging.Message`. +This lets you send and receive messages by using the `spring-messaging` `Message` abstraction. +This abstraction is used by other Spring projects, such as Spring Integration and Spring’s STOMP support. +There are two message converters involved: one to convert between a spring-messaging `Message` and Spring AMQP’s `Message` abstraction and one to convert between Spring AMQP’s `Message` abstraction and the format required by the underlying RabbitMQ client library. +By default, the message payload is converted by the provided `RabbitTemplate` instance’s message converter. +Alternatively, you can inject a custom `MessagingMessageConverter` with some other payload converter, as the following example shows: + +``` +MessagingMessageConverter amqpMessageConverter = new MessagingMessageConverter(); +amqpMessageConverter.setPayloadConverter(myPayloadConverter); +rabbitMessagingTemplate.setAmqpMessageConverter(amqpMessageConverter); +``` + +##### Validated User Id + +Starting with version 1.6, the template now supports a `user-id-expression` (`userIdExpression` when using Java configuration). +If a message is sent, the user id property is set (if not already set) after evaluating this expression. +The root object for the evaluation is the message to be sent. + +The following examples show how to use the `user-id-expression` attribute: + +``` + + + +``` + +The first example is a literal expression. +The second obtains the `username` property from a connection factory bean in the application context. + +##### Using a Separate Connection + +Starting with version 2.0.2, you can set the `usePublisherConnection` property to `true` to use a different connection to that used by listener containers, when possible. +This is to avoid consumers being blocked when a producer is blocked for any reason. +The connection factories maintain a second internal connection factory for this purpose; by default it is the same type as the main factory, but can be set explicity if you wish to use a different factory type for publishing. +If the rabbit template is running in a transaction started by the listener container, the container’s channel is used, regardless of this setting. + +| |In general, you should not use a `RabbitAdmin` with a template that has this set to `true`.
Use the `RabbitAdmin` constructor that takes a connection factory.
If you use the other constructor that takes a template, ensure the template’s property is `false`.
This is because, often, an admin is used to declare queues for listener containers.
Using a template that has the property set to `true` would mean that exclusive queues (such as `AnonymousQueue`) would be declared on a different connection to that used by listener containers.
In that case, the queues cannot be used by the containers.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 4.1.5. Sending Messages + +When sending a message, you can use any of the following methods: + +``` +void send(Message message) throws AmqpException; + +void send(String routingKey, Message message) throws AmqpException; + +void send(String exchange, String routingKey, Message message) throws AmqpException; +``` + +We can begin our discussion with the last method in the preceding listing, since it is actually the most explicit. +It lets an AMQP exchange name (along with a routing key)be provided at runtime. +The last parameter is the callback that is responsible for actual creating the message instance. +An example of using this method to send a message might look like this: +The following example shows how to use the `send` method to send a message: + +``` +amqpTemplate.send("marketData.topic", "quotes.nasdaq.THING1", + new Message("12.34".getBytes(), someProperties)); +``` + +You can set the `exchange` property on the template itself if you plan to use that template instance to send to the same exchange most or all of the time. +In such cases, you can use the second method in the preceding listing. +The following example is functionally equivalent to the previous example: + +``` +amqpTemplate.setExchange("marketData.topic"); +amqpTemplate.send("quotes.nasdaq.FOO", new Message("12.34".getBytes(), someProperties)); +``` + +If both the `exchange` and `routingKey` properties are set on the template, you can use the method that accepts only the `Message`. +The following example shows how to do so: + +``` +amqpTemplate.setExchange("marketData.topic"); +amqpTemplate.setRoutingKey("quotes.nasdaq.FOO"); +amqpTemplate.send(new Message("12.34".getBytes(), someProperties)); +``` + +A better way of thinking about the exchange and routing key properties is that the explicit method parameters always override the template’s default values. +In fact, even if you do not explicitly set those properties on the template, there are always default values in place. +In both cases, the default is an empty `String`, but that is actually a sensible default. +As far as the routing key is concerned, it is not always necessary in the first place (for example, for +a `Fanout` exchange). +Furthermore, a queue may be bound to an exchange with an empty `String`. +Those are both legitimate scenarios for reliance on the default empty `String` value for the routing key property of the template. +As far as the exchange name is concerned, the empty `String` is commonly used because the AMQP specification defines the “default exchange” as having no name. +Since all queues are automatically bound to that default exchange (which is a direct exchange), using their name as the binding value, the second method in the preceding listing can be used for simple point-to-point messaging to any queue through the default exchange. +You can provide the queue name as the `routingKey`, either by providing the method parameter at runtime. +The following example shows how to do so: + +``` +RabbitTemplate template = new RabbitTemplate(); // using default no-name Exchange +template.send("queue.helloWorld", new Message("Hello World".getBytes(), someProperties)); +``` + +Alternately, you can create a template that can be used for publishing primarily or exclusively to a single Queue. +The following example shows how to do so: + +``` +RabbitTemplate template = new RabbitTemplate(); // using default no-name Exchange +template.setRoutingKey("queue.helloWorld"); // but we'll always send to this Queue +template.send(new Message("Hello World".getBytes(), someProperties)); +``` + +##### Message Builder API + +Starting with version 1.3, a message builder API is provided by the `MessageBuilder` and `MessagePropertiesBuilder`. +These methods provide a convenient “fluent” means of creating a message or message properties. +The following examples show the fluent API in action: + +``` +Message message = MessageBuilder.withBody("foo".getBytes()) + .setContentType(MessageProperties.CONTENT_TYPE_TEXT_PLAIN) + .setMessageId("123") + .setHeader("bar", "baz") + .build(); +``` + +``` +MessageProperties props = MessagePropertiesBuilder.newInstance() + .setContentType(MessageProperties.CONTENT_TYPE_TEXT_PLAIN) + .setMessageId("123") + .setHeader("bar", "baz") + .build(); +Message message = MessageBuilder.withBody("foo".getBytes()) + .andProperties(props) + .build(); +``` + +Each of the properties defined on the [`MessageProperties`](https://docs.spring.io/spring-amqp/docs/latest-ga/api/org/springframework/amqp/core/MessageProperties.html) can be set. +Other methods include `setHeader(String key, String value)`, `removeHeader(String key)`, `removeHeaders()`, and `copyProperties(MessageProperties properties)`. +Each property setting method has a `set*IfAbsent()` variant. +In the cases where a default initial value exists, the method is named `set*IfAbsentOrDefault()`. + +Five static methods are provided to create an initial message builder: + +``` +public static MessageBuilder withBody(byte[] body) (1) + +public static MessageBuilder withClonedBody(byte[] body) (2) + +public static MessageBuilder withBody(byte[] body, int from, int to) (3) + +public static MessageBuilder fromMessage(Message message) (4) + +public static MessageBuilder fromClonedMessage(Message message) (5) +``` + +|**1**| The message created by the builder has a body that is a direct reference to the argument. | +|-----|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| The message created by the builder has a body that is a new array containing a copy of bytes in the argument. | +|**3**|The message created by the builder has a body that is a new array containing the range of bytes from the argument.
See [`Arrays.copyOfRange()`](https://docs.oracle.com/javase/7/docs/api/java/util/Arrays.html) for more details.| +|**4**| The message created by the builder has a body that is a direct reference to the body of the argument.
The argument’s properties are copied to a new `MessageProperties` object. | +|**5**| The message created by the builder has a body that is a new array containing a copy of the argument’s body.
The argument’s properties are copied to a new `MessageProperties` object. | + +Three static methods are provided to create a `MessagePropertiesBuilder` instance: + +``` +public static MessagePropertiesBuilder newInstance() (1) + +public static MessagePropertiesBuilder fromProperties(MessageProperties properties) (2) + +public static MessagePropertiesBuilder fromClonedProperties(MessageProperties properties) (3) +``` + +|**1**| A new message properties object is initialized with default values. | +|-----|--------------------------------------------------------------------------------------------| +|**2**|The builder is initialized with, and `build()` will return, the provided properties object.,| +|**3**| The argument’s properties are copied to a new `MessageProperties` object. | + +With the `RabbitTemplate` implementation of `AmqpTemplate`, each of the `send()` methods has an overloaded version that takes an additional `CorrelationData` object. +When publisher confirms are enabled, this object is returned in the callback described in [`AmqpTemplate`](#amqp-template). +This lets the sender correlate a confirm (`ack` or `nack`) with the sent message. + +Starting with version 1.6.7, the `CorrelationAwareMessagePostProcessor` interface was introduced, allowing the correlation data to be modified after the message has been converted. +The following example shows how to use it: + +``` +Message postProcessMessage(Message message, Correlation correlation); +``` + +In version 2.0, this interface is deprecated. +The method has been moved to `MessagePostProcessor` with a default implementation that delegates to `postProcessMessage(Message message)`. + +Also starting with version 1.6.7, a new callback interface called `CorrelationDataPostProcessor` is provided. +This is invoked after all `MessagePostProcessor` instances (provided in the `send()` method as well as those provided in `setBeforePublishPostProcessors()`). +Implementations can update or replace the correlation data supplied in the `send()` method (if any). +The `Message` and original `CorrelationData` (if any) are provided as arguments. +The following example shows how to use the `postProcess` method: + +``` +CorrelationData postProcess(Message message, CorrelationData correlationData); +``` + +##### Publisher Returns + +When the template’s `mandatory` property is `true`, returned messages are provided by the callback described in [`AmqpTemplate`](#amqp-template). + +Starting with version 1.4, the `RabbitTemplate` supports the SpEL `mandatoryExpression` property, which is evaluated against each request message as the root evaluation object, resolving to a `boolean` value. +Bean references, such as `@myBean.isMandatory(#root)`, can be used in the expression. + +Publisher returns can also be used internally by the `RabbitTemplate` in send and receive operations. +See [Reply Timeout](#reply-timeout) for more information. + +##### Batching + +Version 1.4.2 introduced the `BatchingRabbitTemplate`. +This is a subclass of `RabbitTemplate` with an overridden `send` method that batches messages according to the `BatchingStrategy`. +Only when a batch is complete is the message sent to RabbitMQ. +The following listing shows the `BatchingStrategy` interface definition: + +``` +public interface BatchingStrategy { + + MessageBatch addToBatch(String exchange, String routingKey, Message message); + + Date nextRelease(); + + Collection releaseBatches(); + +} +``` + +| |Batched data is held in memory.
Unsent messages can be lost in the event of a system failure.| +|---|-------------------------------------------------------------------------------------------------| + +A `SimpleBatchingStrategy` is provided. +It supports sending messages to a single exchange or routing key. +It has the following properties: + +* `batchSize`: The number of messages in a batch before it is sent. + +* `bufferLimit`: The maximum size of the batched message. + This preempts the `batchSize`, if exceeded, and causes a partial batch to be sent. + +* `timeout`: A time after which a partial batch is sent when there is no new activity adding messages to the batch. + +The `SimpleBatchingStrategy` formats the batch by preceding each embedded message with a four-byte binary length. +This is communicated to the receiving system by setting the `springBatchFormat` message property to `lengthHeader4`. + +| |Batched messages are automatically de-batched by listener containers by default (by using the `springBatchFormat` message header).
Rejecting any message from a batch causes the entire batch to be rejected.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +However, see [@RabbitListener with Batching](#receiving-batch) for more information. + +#### 4.1.6. Receiving Messages + +Message reception is always a little more complicated than sending. +There are two ways to receive a `Message`. +The simpler option is to poll for one `Message` at a time with a polling method call. +The more complicated yet more common approach is to register a listener that receives `Messages` on-demand, asynchronously. +We consider an example of each approach in the next two sub-sections. + +##### Polling Consumer + +The `AmqpTemplate` itself can be used for polled `Message` reception. +By default, if no message is available, `null` is returned immediately. +There is no blocking. +Starting with version 1.5, you can set a `receiveTimeout`, in milliseconds, and the receive methods block for up to that long, waiting for a message. +A value less than zero means block indefinitely (or at least until the connection to the broker is lost). +Version 1.6 introduced variants of the `receive` methods that let the timeout be passed in on each call. + +| |Since the receive operation creates a new `QueueingConsumer` for each message, this technique is not really appropriate for high-volume environments.
Consider using an asynchronous consumer or a `receiveTimeout` of zero for those use cases.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +There are four simple `receive` methods available. +As with the `Exchange` on the sending side, there is a method that requires that a default queue property has been set +directly on the template itself, and there is a method that accepts a queue parameter at runtime. +Version 1.6 introduced variants to accept `timeoutMillis` to override `receiveTimeout` on a per-request basis. +The following listing shows the definitions of the four methods: + +``` +Message receive() throws AmqpException; + +Message receive(String queueName) throws AmqpException; + +Message receive(long timeoutMillis) throws AmqpException; + +Message receive(String queueName, long timeoutMillis) throws AmqpException; +``` + +As in the case of sending messages, the `AmqpTemplate` has some convenience methods for receiving POJOs instead of `Message` instances, and implementations provide a way to customize the `MessageConverter` used to create the `Object` returned: +The following listing shows those methods: + +``` +Object receiveAndConvert() throws AmqpException; + +Object receiveAndConvert(String queueName) throws AmqpException; + +Object receiveAndConvert(long timeoutMillis) throws AmqpException; + +Object receiveAndConvert(String queueName, long timeoutMillis) throws AmqpException; +``` + +Starting with version 2.0, there are variants of these methods that take an additional `ParameterizedTypeReference` argument to convert complex types. +The template must be configured with a `SmartMessageConverter`. +See [Converting From a `Message` With `RabbitTemplate`](#json-complex) for more information. + +Similar to `sendAndReceive` methods, beginning with version 1.3, the `AmqpTemplate` has several convenience `receiveAndReply` methods for synchronously receiving, processing and replying to messages. +The following listing shows those method definitions: + +``` + boolean receiveAndReply(ReceiveAndReplyCallback callback) + throws AmqpException; + + boolean receiveAndReply(String queueName, ReceiveAndReplyCallback callback) + throws AmqpException; + + boolean receiveAndReply(ReceiveAndReplyCallback callback, + String replyExchange, String replyRoutingKey) throws AmqpException; + + boolean receiveAndReply(String queueName, ReceiveAndReplyCallback callback, + String replyExchange, String replyRoutingKey) throws AmqpException; + + boolean receiveAndReply(ReceiveAndReplyCallback callback, + ReplyToAddressCallback replyToAddressCallback) throws AmqpException; + + boolean receiveAndReply(String queueName, ReceiveAndReplyCallback callback, + ReplyToAddressCallback replyToAddressCallback) throws AmqpException; +``` + +The `AmqpTemplate` implementation takes care of the `receive` and `reply` phases. +In most cases, you should provide only an implementation of `ReceiveAndReplyCallback` to perform some business logic for the received message and build a reply object or message, if needed. +Note, a `ReceiveAndReplyCallback` may return `null`. +In this case, no reply is sent and `receiveAndReply` works like the `receive` method. +This lets the same queue be used for a mixture of messages, some of which may not need a reply. + +Automatic message (request and reply) conversion is applied only if the provided callback is not an instance of `ReceiveAndReplyMessageCallback`, which provides a raw message exchange contract. + +The `ReplyToAddressCallback` is useful for cases requiring custom logic to determine the `replyTo` address at runtime against the received message and reply from the `ReceiveAndReplyCallback`. +By default, `replyTo` information in the request message is used to route the reply. + +The following listing shows an example of POJO-based receive and reply: + +``` +boolean received = + this.template.receiveAndReply(ROUTE, new ReceiveAndReplyCallback() { + + public Invoice handle(Order order) { + return processOrder(order); + } + }); +if (received) { + log.info("We received an order!"); +} +``` + +##### Asynchronous Consumer + +| |Spring AMQP also supports annotated listener endpoints through the use of the `@RabbitListener` annotation and provides an open infrastructure to register endpoints programmatically.
This is by far the most convenient way to setup an asynchronous consumer.
See [Annotation-driven Listener Endpoints](#async-annotation-driven) for more details.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |The prefetch default value used to be 1, which could lead to under-utilization of efficient consumers.
Starting with version 2.0, the default prefetch value is now 250, which should keep consumers busy in most common scenarios and
thus improve throughput.

There are, nevertheless, scenarios where the prefetch value should be low:

* For large messages, especially if the processing is slow (messages could add up to a large amount of memory in the client process)

* When strict message ordering is necessary (the prefetch value should be set back to 1 in this case)

* Other special cases

Also, with low-volume messaging and multiple consumers (including concurrency within a single listener container instance), you may wish to reduce the prefetch to get a more even distribution of messages across consumers.

See [Message Listener Container Configuration](#containerAttributes).

For more background about prefetch, see this post about [consumer utilization in RabbitMQ](https://www.rabbitmq.com/blog/2014/04/14/finding-bottlenecks-with-rabbitmq-3-3/)and this post about [queuing theory](https://www.rabbitmq.com/blog/2012/05/11/some-queuing-theory-throughput-latency-and-bandwidth/).| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +###### Message Listener + +For asynchronous `Message` reception, a dedicated component (not the `AmqpTemplate`) is involved. +That component is a container for a `Message`-consuming callback. +We consider the container and its properties later in this section. +First, though, we should look at the callback, since that is where your application code is integrated with the messaging system. +There are a few options for the callback, starting with an implementation of the `MessageListener` interface, which the following listing shows: + +``` +public interface MessageListener { + void onMessage(Message message); +} +``` + +If your callback logic depends on the AMQP Channel instance for any reason, you may instead use the `ChannelAwareMessageListener`. +It looks similar but has an extra parameter. +The following listing shows the `ChannelAwareMessageListener` interface definition: + +``` +public interface ChannelAwareMessageListener { + void onMessage(Message message, Channel channel) throws Exception; +} +``` + +| |In version 2.1, this interface moved from package `o.s.amqp.rabbit.core` to `o.s.amqp.rabbit.listener.api`.| +|---|-----------------------------------------------------------------------------------------------------------| + +###### `MessageListenerAdapter` + +If you prefer to maintain a stricter separation between your application logic and the messaging API, you can rely upon an adapter implementation that is provided by the framework. +This is often referred to as “Message-driven POJO” support. + +| |Version 1.5 introduced a more flexible mechanism for POJO messaging, the `@RabbitListener` annotation.
See [Annotation-driven Listener Endpoints](#async-annotation-driven) for more information.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +When using the adapter, you need to provide only a reference to the instance that the adapter itself should invoke. +The following example shows how to do so: + +``` +MessageListenerAdapter listener = new MessageListenerAdapter(somePojo); +listener.setDefaultListenerMethod("myMethod"); +``` + +You can subclass the adapter and provide an implementation of `getListenerMethodName()` to dynamically select different methods based on the message. +This method has two parameters, `originalMessage` and `extractedMessage`, the latter being the result of any conversion. +By default, a `SimpleMessageConverter` is configured. +See [`SimpleMessageConverter`](#simple-message-converter) for more information and information about other converters available. + +Starting with version 1.4.2, the original message has `consumerQueue` and `consumerTag` properties, which can be used to determine the queue from which a message was received. + +Starting with version 1.5, you can configure a map of consumer queue or tag to method name, to dynamically select the method to call. +If no entry is in the map, we fall back to the default listener method. +The default listener method (if not set) is `handleMessage`. + +Starting with version 2.0, a convenient `FunctionalInterface` has been provided. +The following listing shows the definition of `FunctionalInterface`: + +``` +@FunctionalInterface +public interface ReplyingMessageListener { + + R handleMessage(T t); + +} +``` + +This interface facilitates convenient configuration of the adapter by using Java 8 lambdas, as the following example shows: + +``` +new MessageListenerAdapter((ReplyingMessageListener) data -> { + ... + return result; +})); +``` + +Starting with version 2.2, the `buildListenerArguments(Object)` has been deprecated and new `buildListenerArguments(Object, Channel, Message)` one has been introduced instead. +The new method helps listener to get `Channel` and `Message` arguments to do more, such as calling `channel.basicReject(long, boolean)` in manual acknowledge mode. +The following listing shows the most basic example: + +``` +public class ExtendedListenerAdapter extends MessageListenerAdapter { + + @Override + protected Object[] buildListenerArguments(Object extractedMessage, Channel channel, Message message) { + return new Object[]{extractedMessage, channel, message}; + } + +} +``` + +Now you could configure `ExtendedListenerAdapter` as same as `MessageListenerAdapter` if you need to receive “channel” and “message”. +Parameters of listener should be set as `buildListenerArguments(Object, Channel, Message)` returned, as the following example of listener shows: + +``` +public void handleMessage(Object object, Channel channel, Message message) throws IOException { + ... +} +``` + +###### Container + +Now that you have seen the various options for the `Message`-listening callback, we can turn our attention to the container. +Basically, the container handles the “active” responsibilities so that the listener callback can remain passive. +The container is an example of a “lifecycle” component. +It provides methods for starting and stopping. +When configuring the container, you essentially bridge the gap between an AMQP Queue and the `MessageListener` instance. +You must provide a reference to the `ConnectionFactory` and the queue names or Queue instances from which that listener should consume messages. + +Prior to version 2.0, there was one listener container, the `SimpleMessageListenerContainer`. +There is now a second container, the `DirectMessageListenerContainer`. +The differences between the containers and criteria you might apply when choosing which to use are described in [Choosing a Container](#choose-container). + +The following listing shows the most basic example, which works by using the, `SimpleMessageListenerContainer`: + +``` +SimpleMessageListenerContainer container = new SimpleMessageListenerContainer(); +container.setConnectionFactory(rabbitConnectionFactory); +container.setQueueNames("some.queue"); +container.setMessageListener(new MessageListenerAdapter(somePojo)); +``` + +As an “active” component, it is most common to create the listener container with a bean definition so that it can run in the background. +The following example shows one way to do so with XML: + +``` + + + +``` + +The following listing shows another way to do so with XML: + +``` + + + +``` + +Both of the preceding examples create a `DirectMessageListenerContainer` (notice the `type` attribute — it defaults to `simple`). + +Alternately, you may prefer to use Java configuration, which looks similar to the preceding code snippet: + +``` +@Configuration +public class ExampleAmqpConfiguration { + + @Bean + public SimpleMessageListenerContainer messageListenerContainer() { + SimpleMessageListenerContainer container = new SimpleMessageListenerContainer(); + container.setConnectionFactory(rabbitConnectionFactory()); + container.setQueueName("some.queue"); + container.setMessageListener(exampleListener()); + return container; + } + + @Bean + public CachingConnectionFactory rabbitConnectionFactory() { + CachingConnectionFactory connectionFactory = + new CachingConnectionFactory("localhost"); + connectionFactory.setUsername("guest"); + connectionFactory.setPassword("guest"); + return connectionFactory; + } + + @Bean + public MessageListener exampleListener() { + return new MessageListener() { + public void onMessage(Message message) { + System.out.println("received: " + message); + } + }; + } +} +``` + +###### Consumer Priority + +Starting with RabbitMQ Version 3.2, the broker now supports consumer priority (see [Using Consumer Priorities with RabbitMQ](https://www.rabbitmq.com/blog/2013/12/16/using-consumer-priorities-with-rabbitmq/)). +This is enabled by setting the `x-priority` argument on the consumer. +The `SimpleMessageListenerContainer` now supports setting consumer arguments, as the following example shows: + +``` +container.setConsumerArguments(Collections. + singletonMap("x-priority", Integer.valueOf(10))); +``` + +For convenience, the namespace provides the `priority` attribute on the `listener` element, as the following example shows: + +``` + + + +``` + +Starting with version 1.3, you can modify the queues on which the container listens at runtime. +See [Listener Container Queues](#listener-queues). + +###### `auto-delete` Queues + +When a container is configured to listen to `auto-delete` queues, the queue has an `x-expires` option, or the [Time-To-Live](https://www.rabbitmq.com/ttl.html) policy is configured on the Broker, the queue is removed by the broker when the container is stopped (that is, when the last consumer is cancelled). +Before version 1.3, the container could not be restarted because the queue was missing. +The `RabbitAdmin` only automatically redeclares queues and so on when the connection is closed or when it opens, which does not happen when the container is stopped and started. + +Starting with version 1.3, the container uses a `RabbitAdmin` to redeclare any missing queues during startup. + +You can also use conditional declaration (see [Conditional Declaration](#conditional-declaration)) together with an `auto-startup="false"` admin to defer queue declaration until the container is started. +The following example shows how to do so: + +``` + + + + + + + + + + + + + +``` + +In this case, the queue and exchange are declared by `containerAdmin`, which has `auto-startup="false"` so that the elements are not declared during context initialization. +Also, the container is not started for the same reason. +When the container is later started, it uses its reference to `containerAdmin` to declare the elements. + +##### Batched Messages + +Batched messages (created by a producer) are automatically de-batched by listener containers (using the `springBatchFormat` message header). +Rejecting any message from a batch causes the entire batch to be rejected. +See [Batching](#template-batching) for more information about batching. + +Starting with version 2.2, the `SimpleMessageListenerContainer` can be use to create batches on the consumer side (where the producer sent discrete messages). + +Set the container property `consumerBatchEnabled` to enable this feature.`deBatchingEnabled` must also be true so that the container is responsible for processing batches of both types. +Implement `BatchMessageListener` or `ChannelAwareBatchMessageListener` when `consumerBatchEnabled` is true. +Starting with version 2.2.7 both the `SimpleMessageListenerContainer` and `DirectMessageListenerContainer` can debatch [producer created batches](#template-batching) as `List`. +See [@RabbitListener with Batching](#receiving-batch) for information about using this feature with `@RabbitListener`. + +##### Consumer Events + +The containers publish application events whenever a listener +(consumer) experiences a failure of some kind. +The event `ListenerContainerConsumerFailedEvent` has the following properties: + +* `container`: The listener container where the consumer experienced the problem. + +* `reason`: A textual reason for the failure. + +* `fatal`: A boolean indicating whether the failure was fatal. + With non-fatal exceptions, the container tries to restart the consumer, according to the `recoveryInterval` or `recoveryBackoff` (for the `SimpleMessageListenerContainer`) or the `monitorInterval` (for the `DirectMessageListenerContainer`). + +* `throwable`: The `Throwable` that was caught. + +These events can be consumed by implementing `ApplicationListener`. + +| |System-wide events (such as connection failures) are published by all consumers when `concurrentConsumers` is greater than 1.| +|---|-----------------------------------------------------------------------------------------------------------------------------| + +If a consumer fails because one if its queues is being used exclusively, by default, as well as publishing the event, a `WARN` log is issued. +To change this logging behavior, provide a custom `ConditionalExceptionLogger` in the `SimpleMessageListenerContainer` instance’s `exclusiveConsumerExceptionLogger` property. +See also [Logging Channel Close Events](#channel-close-logging). + +Fatal errors are always logged at the `ERROR` level. +This it not modifiable. + +Several other events are published at various stages of the container lifecycle: + +* `AsyncConsumerStartedEvent`: When the consumer is started. + +* `AsyncConsumerRestartedEvent`: When the consumer is restarted after a failure - `SimpleMessageListenerContainer` only. + +* `AsyncConsumerTerminatedEvent`: When a consumer is stopped normally. + +* `AsyncConsumerStoppedEvent`: When the consumer is stopped - `SimpleMessageListenerContainer` only. + +* `ConsumeOkEvent`: When a `consumeOk` is received from the broker, contains the queue name and `consumerTag` + +* `ListenerContainerIdleEvent`: See [Detecting Idle Asynchronous Consumers](#idle-containers). + +* `MissingQueueEvent`: When a missing queue is detected. + +##### Consumer Tags + +You can provide a strategy to generate consumer tags. +By default, the consumer tag is generated by the broker. +The following listing shows the `ConsumerTagStrategy` interface definition: + +``` +public interface ConsumerTagStrategy { + + String createConsumerTag(String queue); + +} +``` + +The queue is made available so that it can (optionally) be used in the tag. + +See [Message Listener Container Configuration](#containerAttributes). + +##### Annotation-driven Listener Endpoints + +The easiest way to receive a message asynchronously is to use the annotated listener endpoint infrastructure. +In a nutshell, it lets you expose a method of a managed bean as a Rabbit listener endpoint. +The following example shows how to use the `@RabbitListener` annotation: + +``` +@Component +public class MyService { + + @RabbitListener(queues = "myQueue") + public void processOrder(String data) { + ... + } + +} +``` + +The idea of the preceding example is that, whenever a message is available on the queue named `myQueue`, the `processOrder` method is invoked accordingly (in this case, with the payload of the message). + +The annotated endpoint infrastructure creates a message listener container behind the scenes for each annotated method, by using a `RabbitListenerContainerFactory`. + +In the preceding example, `myQueue` must already exist and be bound to some exchange. +The queue can be declared and bound automatically, as long as a `RabbitAdmin` exists in the application context. + +| |Property placeholders (`${some.property}`) or SpEL expressions (`#{someExpression}`) can be specified for the annotation properties (`queues` etc).
See [Listening to Multiple Queues](#annotation-multiple-queues) for an example of why you might use SpEL instead of a property placeholder.
The following listing shows three examples of how to declare a Rabbit listener:| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +``` +@Component +public class MyService { + + @RabbitListener(bindings = @QueueBinding( + value = @Queue(value = "myQueue", durable = "true"), + exchange = @Exchange(value = "auto.exch", ignoreDeclarationExceptions = "true"), + key = "orderRoutingKey") + ) + public void processOrder(Order order) { + ... + } + + @RabbitListener(bindings = @QueueBinding( + value = @Queue, + exchange = @Exchange(value = "auto.exch"), + key = "invoiceRoutingKey") + ) + public void processInvoice(Invoice invoice) { + ... + } + + @RabbitListener(queuesToDeclare = @Queue(name = "${my.queue}", durable = "true")) + public String handleWithSimpleDeclare(String data) { + ... + } + +} +``` + +In the first example, a queue `myQueue` is declared automatically (durable) together with the exchange, if needed, +and bound to the exchange with the routing key. +In the second example, an anonymous (exclusive, auto-delete) queue is declared and bound. +Multiple `QueueBinding` entries can be provided, letting the listener listen to multiple queues. +In the third example, a queue with the name retrieved from property `my.queue` is declared, if necessary, with the default binding to the default exchange using the queue name as the routing key. + +Since version 2.0, the `@Exchange` annotation supports any exchange types, including custom. +For more information, see [AMQP Concepts](https://www.rabbitmq.com/tutorials/amqp-concepts.html). + +You can use normal `@Bean` definitions when you need more advanced configuration. + +Notice `ignoreDeclarationExceptions` on the exchange in the first example. +This allows, for example, binding to an existing exchange that might have different settings (such as `internal`). +By default, the properties of an existing exchange must match. + +Starting with version 2.0, you can now bind a queue to an exchange with multiple routing keys, as the following example shows: + +``` +... + key = { "red", "yellow" } +... +``` + +You can also specify arguments within `@QueueBinding` annotations for queues, exchanges, +and bindings, as the following example shows: + +``` +@RabbitListener(bindings = @QueueBinding( + value = @Queue(value = "auto.headers", autoDelete = "true", + arguments = @Argument(name = "x-message-ttl", value = "10000", + type = "java.lang.Integer")), + exchange = @Exchange(value = "auto.headers", type = ExchangeTypes.HEADERS, autoDelete = "true"), + arguments = { + @Argument(name = "x-match", value = "all"), + @Argument(name = "thing1", value = "somevalue"), + @Argument(name = "thing2") + }) +) +public String handleWithHeadersExchange(String foo) { + ... +} +``` + +Notice that the `x-message-ttl` argument is set to 10 seconds for the queue. +Since the argument type is not `String`, we have to specify its type — in this case, `Integer`. +As with all such declarations, if the queue already exists, the arguments must match those on the queue. +For the header exchange, we set the binding arguments to match messages that have the `thing1` header set to `somevalue`, and +the `thing2` header must be present with any value. +The `x-match` argument means both conditions must be satisfied. + +The argument name, value, and type can be property placeholders (`${…​}`) or SpEL expressions (`#{…​}`). +The `name` must resolve to a `String`. +The expression for `type` must resolve to a `Class` or the fully-qualified name of a class. +The `value` must resolve to something that can be converted by the `DefaultConversionService` to the type (such as the `x-message-ttl` in the preceding example). + +If a name resolves to `null` or an empty `String`, that `@Argument` is ignored. + +###### Meta-annotations + +Sometimes you may want to use the same configuration for multiple listeners. +To reduce the boilerplate configuration, you can use meta-annotations to create your own listener annotation. +The following example shows how to do so: + +``` +@Target({ElementType.TYPE, ElementType.METHOD, ElementType.ANNOTATION_TYPE}) +@Retention(RetentionPolicy.RUNTIME) +@RabbitListener(bindings = @QueueBinding( + value = @Queue, + exchange = @Exchange(value = "metaFanout", type = ExchangeTypes.FANOUT))) +public @interface MyAnonFanoutListener { +} + +public class MetaListener { + + @MyAnonFanoutListener + public void handle1(String foo) { + ... + } + + @MyAnonFanoutListener + public void handle2(String foo) { + ... + } + +} +``` + +In the preceding example, each listener created by the `@MyAnonFanoutListener` annotation binds an anonymous, auto-delete +queue to the fanout exchange, `metaFanout`. +Starting with version 2.2.3, `@AliasFor` is supported to allow overriding properties on the meta-annotated annotation. +Also, user annotations can now be `@Repeatable`, allowing multiple containers to be created for a method. + +``` +@Component +static class MetaAnnotationTestBean { + + @MyListener("queue1") + @MyListener("queue2") + public void handleIt(String body) { + } + +} + +@RabbitListener +@Target(ElementType.METHOD) +@Retention(RetentionPolicy.RUNTIME) +@Repeatable(MyListeners.class) +static @interface MyListener { + + @AliasFor(annotation = RabbitListener.class, attribute = "queues") + String[] value() default {}; + +} + +@Target(ElementType.METHOD) +@Retention(RetentionPolicy.RUNTIME) +static @interface MyListeners { + + MyListener[] value(); + +} +``` + +###### Enable Listener Endpoint Annotations + +To enable support for `@RabbitListener` annotations, you can add `@EnableRabbit` to one of your `@Configuration` classes. +The following example shows how to do so: + +``` +@Configuration +@EnableRabbit +public class AppConfig { + + @Bean + public SimpleRabbitListenerContainerFactory rabbitListenerContainerFactory() { + SimpleRabbitListenerContainerFactory factory = new SimpleRabbitListenerContainerFactory(); + factory.setConnectionFactory(connectionFactory()); + factory.setConcurrentConsumers(3); + factory.setMaxConcurrentConsumers(10); + factory.setContainerCustomizer(container -> /* customize the container */); + return factory; + } +} +``` + +Since version 2.0, a `DirectMessageListenerContainerFactory` is also available. +It creates `DirectMessageListenerContainer` instances. + +| |For information to help you choose between `SimpleRabbitListenerContainerFactory` and `DirectRabbitListenerContainerFactory`, see [Choosing a Container](#choose-container).| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Starting wih version 2.2.2, you can provide a `ContainerCustomizer` implementation (as shown above). +This can be used to further configure the container after it has been created and configured; you can use this, for example, to set properties that are not exposed by the container factory. + +By default, the infrastructure looks for a bean named `rabbitListenerContainerFactory` as the source for the factory to use to create message listener containers. +In this case, and ignoring the RabbitMQ infrastructure setup, the `processOrder` method can be invoked with a core poll size of three threads and a maximum pool size of ten threads. + +You can customize the listener container factory to use for each annotation, or you can configure an explicit default by implementing the `RabbitListenerConfigurer` interface. +The default is required only if at least one endpoint is registered without a specific container factory. +See the [Javadoc](https://docs.spring.io/spring-amqp/docs/latest-ga/api/org/springframework/amqp/rabbit/annotation/RabbitListenerConfigurer.html) for full details and examples. + +The container factories provide methods for adding `MessagePostProcessor` instances that are applied after receiving messages (before invoking the listener) and before sending replies. + +See [Reply Management](#async-annotation-driven-reply) for information about replies. + +Starting with version 2.0.6, you can add a `RetryTemplate` and `RecoveryCallback` to the listener container factory. +It is used when sending replies. +The `RecoveryCallback` is invoked when retries are exhausted. +You can use a `SendRetryContextAccessor` to get information from the context. +The following example shows how to do so: + +``` +factory.setRetryTemplate(retryTemplate); +factory.setReplyRecoveryCallback(ctx -> { + Message failed = SendRetryContextAccessor.getMessage(ctx); + Address replyTo = SendRetryContextAccessor.getAddress(ctx); + Throwable t = ctx.getLastThrowable(); + ... + return null; +}); +``` + +If you prefer XML configuration, you can use the `` element. +Any beans annotated with `@RabbitListener` are detected. + +For `SimpleRabbitListenerContainer` instances, you can use XML similar to the following: + +``` + + + + + + + +``` + +For `DirectMessageListenerContainer` instances, you can use XML similar to the following: + +``` + + + + + + +``` + +Starting with version 2.0, the `@RabbitListener` annotation has a `concurrency` property. +It supports SpEL expressions (`#{…​}`) and property placeholders (`${…​}`). +Its meaning and allowed values depend on the container type, as follows: + +* For the `DirectMessageListenerContainer`, the value must be a single integer value, which sets the `consumersPerQueue` property on the container. + +* For the `SimpleRabbitListenerContainer`, the value can be a single integer value, which sets the `concurrentConsumers` property on the container, or it can have the form, `m-n`, where `m` is the `concurrentConsumers` property and `n` is the `maxConcurrentConsumers` property. + +In either case, this setting overrides the settings on the factory. +Previously you had to define different container factories if you had listeners that required different concurrency. + +The annotation also allows overriding the factory `autoStartup` and `taskExecutor` properties via the `autoStartup` and `executor` (since 2.2) annotation properties. +Using a different executor for each might help with identifying threads associated with each listener in logs and thread dumps. + +Version 2.2 also added the `ackMode` property, which allows you to override the container factory’s `acknowledgeMode` property. + +``` +@RabbitListener(id = "manual.acks.1", queues = "manual.acks.1", ackMode = "MANUAL") +public void manual1(String in, Channel channel, + @Header(AmqpHeaders.DELIVERY_TAG) long tag) throws IOException { + + ... + channel.basicAck(tag, false); +} +``` + +###### Message Conversion for Annotated Methods + +There are two conversion steps in the pipeline before invoking the listener. +The first step uses a `MessageConverter` to convert the incoming Spring AMQP `Message` to a Spring-messaging `Message`. +When the target method is invoked, the message payload is converted, if necessary, to the method parameter type. + +The default `MessageConverter` for the first step is a Spring AMQP `SimpleMessageConverter` that handles conversion to`String` and `java.io.Serializable` objects. +All others remain as a `byte[]`. +In the following discussion, we call this the “message converter”. + +The default converter for the second step is a `GenericMessageConverter`, which delegates to a conversion service +(an instance of `DefaultFormattingConversionService`). +In the following discussion, we call this the “method argument converter”. + +To change the message converter, you can add it as a property to the container factory bean. +The following example shows how to do so: + +``` +@Bean +public SimpleRabbitListenerContainerFactory rabbitListenerContainerFactory() { + SimpleRabbitListenerContainerFactory factory = new SimpleRabbitListenerContainerFactory(); + ... + factory.setMessageConverter(new Jackson2JsonMessageConverter()); + ... + return factory; +} +``` + +This configures a Jackson2 converter that expects header information to be present to guide the conversion. + +You can also use a `ContentTypeDelegatingMessageConverter`, which can handle conversion of different content types. + +Starting with version 2.3, you can override the factory converter by specifying a bean name in the `messageConverter` property. + +``` +@Bean +public Jackson2JsonMessageConverter jsonConverter() { + return new Jackson2JsonMessageConverter(); +} + +@RabbitListener(..., messageConverter = "jsonConverter") +public void listen(String in) { + ... +} +``` + +This avoids having to declare a different container factory just to change the converter. + +In most cases, it is not necessary to customize the method argument converter unless, for example, you want to use +a custom `ConversionService`. + +In versions prior to 1.6, the type information to convert the JSON had to be provided in message headers, or a +custom `ClassMapper` was required. +Starting with version 1.6, if there are no type information headers, the type can be inferred from the target +method arguments. + +| |This type inference works only for `@RabbitListener` at the method level.| +|---|-------------------------------------------------------------------------| + +See [Jackson2JsonMessageConverter](#json-message-converter) for more information. + +If you wish to customize the method argument converter, you can do so as follows: + +``` +@Configuration +@EnableRabbit +public class AppConfig implements RabbitListenerConfigurer { + + ... + + @Bean + public DefaultMessageHandlerMethodFactory myHandlerMethodFactory() { + DefaultMessageHandlerMethodFactory factory = new DefaultMessageHandlerMethodFactory(); + factory.setMessageConverter(new GenericMessageConverter(myConversionService())); + return factory; + } + + @Bean + public DefaultConversionService myConversionService() { + DefaultConversionService conv = new DefaultConversionService(); + conv.addConverter(mySpecialConverter()); + return conv; + } + + @Override + public void configureRabbitListeners(RabbitListenerEndpointRegistrar registrar) { + registrar.setMessageHandlerMethodFactory(myHandlerMethodFactory()); + } + + ... + +} +``` + +| |For multi-method listeners (see [Multi-method Listeners](#annotation-method-selection)), the method selection is based on the payload of the message **after the message conversion**.
The method argument converter is called only after the method has been selected.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +###### Adding a Custom `HandlerMethodArgumentResolver` to @RabbitListener + +Starting with version 2.3.7 you are able to add your own `HandlerMethodArgumentResolver` and resolve custom method parameters. +All you need is to implement `RabbitListenerConfigurer` and use method `setCustomMethodArgumentResolvers()` from class `RabbitListenerEndpointRegistrar`. + +``` +@Configuration +class CustomRabbitConfig implements RabbitListenerConfigurer { + + @Override + public void configureRabbitListeners(RabbitListenerEndpointRegistrar registrar) { + registrar.setCustomMethodArgumentResolvers( + new HandlerMethodArgumentResolver() { + + @Override + public boolean supportsParameter(MethodParameter parameter) { + return CustomMethodArgument.class.isAssignableFrom(parameter.getParameterType()); + } + + @Override + public Object resolveArgument(MethodParameter parameter, org.springframework.messaging.Message message) { + return new CustomMethodArgument( + (String) message.getPayload(), + message.getHeaders().get("customHeader", String.class) + ); + } + + } + ); + } + +} +``` + +###### Programmatic Endpoint Registration + +`RabbitListenerEndpoint` provides a model of a Rabbit endpoint and is responsible for configuring the container for that model. +The infrastructure lets you configure endpoints programmatically in addition to the ones that are detected by the `RabbitListener` annotation. +The following example shows how to do so: + +``` +@Configuration +@EnableRabbit +public class AppConfig implements RabbitListenerConfigurer { + + @Override + public void configureRabbitListeners(RabbitListenerEndpointRegistrar registrar) { + SimpleRabbitListenerEndpoint endpoint = new SimpleRabbitListenerEndpoint(); + endpoint.setQueueNames("anotherQueue"); + endpoint.setMessageListener(message -> { + // processing + }); + registrar.registerEndpoint(endpoint); + } +} +``` + +In the preceding example, we used `SimpleRabbitListenerEndpoint`, which provides the actual `MessageListener` to invoke, but you could just as well build your own endpoint variant to describe a custom invocation mechanism. + +It should be noted that you could just as well skip the use of `@RabbitListener` altogether and register your endpoints programmatically through `RabbitListenerConfigurer`. + +###### Annotated Endpoint Method Signature + +So far, we have been injecting a simple `String` in our endpoint, but it can actually have a very flexible method signature. +The following example rewrites it to inject the `Order` with a custom header: + +``` +@Component +public class MyService { + + @RabbitListener(queues = "myQueue") + public void processOrder(Order order, @Header("order_type") String orderType) { + ... + } +} +``` + +The following list shows the arguments that are available to be matched with parameters in listener endpoints: + +* The raw `org.springframework.amqp.core.Message`. + +* The `MessageProperties` from the raw `Message`. + +* The `com.rabbitmq.client.Channel` on which the message was received. + +* The `org.springframework.messaging.Message` converted from the incoming AMQP message. + +* `@Header`-annotated method arguments to extract a specific header value, including standard AMQP headers. + +* `@Headers`-annotated argument that must also be assignable to `java.util.Map` for getting access to all headers. + +* The converted payload + +A non-annotated element that is not one of the supported types (that is,`Message`, `MessageProperties`, `Message` and `Channel`) is matched with the payload. +You can make that explicit by annotating the parameter with `@Payload`. +You can also turn on validation by adding an extra `@Valid`. + +The ability to inject Spring’s message abstraction is particularly useful to benefit from all the information stored in the transport-specific message without relying on the transport-specific API. +The following example shows how to do so: + +``` +@RabbitListener(queues = "myQueue") +public void processOrder(Message order) { ... +} +``` + +Handling of method arguments is provided by `DefaultMessageHandlerMethodFactory`, which you can further customize to support additional method arguments. +The conversion and validation support can be customized there as well. + +For instance, if we want to make sure our `Order` is valid before processing it, we can annotate the payload with `@Valid` and configure the necessary validator, as follows: + +``` +@Configuration +@EnableRabbit +public class AppConfig implements RabbitListenerConfigurer { + + @Override + public void configureRabbitListeners(RabbitListenerEndpointRegistrar registrar) { + registrar.setMessageHandlerMethodFactory(myHandlerMethodFactory()); + } + + @Bean + public DefaultMessageHandlerMethodFactory myHandlerMethodFactory() { + DefaultMessageHandlerMethodFactory factory = new DefaultMessageHandlerMethodFactory(); + factory.setValidator(myValidator()); + return factory; + } +} +``` + +###### @RabbitListener @Payload Validation + +Starting with version 2.3.7, it is now easier to add a `Validator` to validate `@RabbitListener` and `@RabbitHandler` `@Payload` arguments. +Now, you can simply add the validator to the registrar itself. + +``` +@Configuration +@EnableRabbit +public class Config implements RabbitListenerConfigurer { + ... + @Override + public void configureRabbitListeners(RabbitListenerEndpointRegistrar registrar) { + registrar.setValidator(new MyValidator()); + } +} +``` + +| |When using Spring Boot with the validation starter, a `LocalValidatorFactoryBean` is auto-configured:| +|---|-----------------------------------------------------------------------------------------------------| + +``` +@Configuration +@EnableRabbit +public class Config implements RabbitListenerConfigurer { + @Autowired + private LocalValidatorFactoryBean validator; + ... + @Override + public void configureRabbitListeners(RabbitListenerEndpointRegistrar registrar) { + registrar.setValidator(this.validator); + } +} +``` + +To validate: + +``` +public static class ValidatedClass { + @Max(10) + private int bar; + public int getBar() { + return this.bar; + } + public void setBar(int bar) { + this.bar = bar; + } +} +``` + +and + +``` +@RabbitListener(id="validated", queues = "queue1", errorHandler = "validationErrorHandler", + containerFactory = "jsonListenerContainerFactory") +public void validatedListener(@Payload @Valid ValidatedClass val) { + ... +} +@Bean +public RabbitListenerErrorHandler validationErrorHandler() { + return (m, e) -> { + ... + }; +} +``` + +###### Listening to Multiple Queues + +When you use the `queues` attribute, you can specify that the associated container can listen to multiple queues. +You can use a `@Header` annotation to make the queue name from which a message was received available to the POJO +method. +The following example shows how to do so: + +``` +@Component +public class MyService { + + @RabbitListener(queues = { "queue1", "queue2" } ) + public void processOrder(String data, @Header(AmqpHeaders.CONSUMER_QUEUE) String queue) { + ... + } + +} +``` + +Starting with version 1.5, you can externalize the queue names by using property placeholders and SpEL. +The following example shows how to do so: + +``` +@Component +public class MyService { + + @RabbitListener(queues = "#{'${property.with.comma.delimited.queue.names}'.split(',')}" ) + public void processOrder(String data, @Header(AmqpHeaders.CONSUMER_QUEUE) String queue) { + ... + } + +} +``` + +Prior to version 1.5, only a single queue could be specified this way. +Each queue needed a separate property. + +###### Reply Management + +The existing support in `MessageListenerAdapter` already lets your method have a non-void return type. +When that is the case, the result of the invocation is encapsulated in a message sent to the the address specified in the `ReplyToAddress` header of the original message, or to the default address configured on the listener. +You can set that default address by using the `@SendTo` annotation of the messaging abstraction. + +Assuming our `processOrder` method should now return an `OrderStatus`, we can write it as follows to automatically send a reply: + +``` +@RabbitListener(destination = "myQueue") +@SendTo("status") +public OrderStatus processOrder(Order order) { + // order processing + return status; +} +``` + +If you need to set additional headers in a transport-independent manner, you could return a `Message` instead, something like the following: + +``` +@RabbitListener(destination = "myQueue") +@SendTo("status") +public Message processOrder(Order order) { + // order processing + return MessageBuilder + .withPayload(status) + .setHeader("code", 1234) + .build(); +} +``` + +Alternatively, you can use a `MessagePostProcessor` in the `beforeSendReplyMessagePostProcessors` container factory property to add more headers. +Starting with version 2.2.3, the called bean/method is made avaiable in the reply message, which can be used in a message post processor to communicate the information back to the caller: + +``` +factory.setBeforeSendReplyPostProcessors(msg -> { + msg.getMessageProperties().setHeader("calledBean", + msg.getMessageProperties().getTargetBean().getClass().getSimpleName()); + msg.getMessageProperties().setHeader("calledMethod", + msg.getMessageProperties().getTargetMethod().getName()); + return m; +}); +``` + +Starting with version 2.2.5, you can configure a `ReplyPostProcessor` to modify the reply message before it is sent; it is called after the `correlationId` header has been set up to match the request. + +``` +@RabbitListener(queues = "test.header", group = "testGroup", replyPostProcessor = "echoCustomHeader") +public String capitalizeWithHeader(String in) { + return in.toUpperCase(); +} + +@Bean +public ReplyPostProcessor echoCustomHeader() { + return (req, resp) -> { + resp.getMessageProperties().setHeader("myHeader", req.getMessageProperties().getHeader("myHeader")); + return resp; + }; +} +``` + +The `@SendTo` value is assumed as a reply `exchange` and `routingKey` pair that follows the `exchange/routingKey` pattern, +where one of those parts can be omitted. +The valid values are as follows: + +* `thing1/thing2`: The `replyTo` exchange and the `routingKey`.`thing1/`: The `replyTo` exchange and the default (empty) `routingKey`.`thing2` or `/thing2`: The `replyTo` `routingKey` and the default (empty) exchange.`/` or empty: The `replyTo` default exchange and the default `routingKey`. + +Also, you can use `@SendTo` without a `value` attribute. +This case is equal to an empty `sendTo` pattern.`@SendTo` is used only if the inbound message does not have a `replyToAddress` property. + +Starting with version 1.5, the `@SendTo` value can be a bean initialization SpEL Expression, as shown in the following example: + +``` +@RabbitListener(queues = "test.sendTo.spel") +@SendTo("#{spelReplyTo}") +public String capitalizeWithSendToSpel(String foo) { + return foo.toUpperCase(); +} +... +@Bean +public String spelReplyTo() { + return "test.sendTo.reply.spel"; +} +``` + +The expression must evaluate to a `String`, which can be a simple queue name (sent to the default exchange) or with +the form `exchange/routingKey` as discussed prior to the preceding example. + +| |The `#{…​}` expression is evaluated once, during initialization.| +|---|----------------------------------------------------------------| + +For dynamic reply routing, the message sender should include a `reply_to` message property or use the alternate +runtime SpEL expression (described after the next example). + +Starting with version 1.6, the `@SendTo` can be a SpEL expression that is evaluated at runtime against the request +and reply, as the following example shows: + +``` +@RabbitListener(queues = "test.sendTo.spel") +@SendTo("!{'some.reply.queue.with.' + result.queueName}") +public Bar capitalizeWithSendToSpel(Foo foo) { + return processTheFooAndReturnABar(foo); +} +``` + +The runtime nature of the SpEL expression is indicated with `!{…​}` delimiters. +The evaluation context `#root` object for the expression has three properties: + +* `request`: The `o.s.amqp.core.Message` request object. + +* `source`: The `o.s.messaging.Message` after conversion. + +* `result`: The method result. + +The context has a map property accessor, a standard type converter, and a bean resolver, which lets other beans in the +context be referenced (for example, `@someBeanName.determineReplyQ(request, result)`). + +In summary, `#{…​}` is evaluated once during initialization, with the `#root` object being the application context. +Beans are referenced by their names.`!{…​}` is evaluated at runtime for each message, with the root object having the properties listed earlier. +Beans are referenced with their names, prefixed by `@`. + +Starting with version 2.1, simple property placeholders are also supported (for example, `${some.reply.to}`). +With earlier versions, the following can be used as a work around, as the following example shows: + +``` +@RabbitListener(queues = "foo") +@SendTo("#{environment['my.send.to']}") +public String listen(Message in) { + ... + return ... +} +``` + +###### Reply ContentType + +If you are using a sophisticated message converter, such as the `ContentTypeDelegatingMessageConverter`, you can control the content type of the reply by setting the `replyContentType` property on the listener. +This allows the converter to select the appropriate delegate converter for the reply. + +``` +@RabbitListener(queues = "q1", messageConverter = "delegating", + replyContentType = "application/json") +public Thing2 listen(Thing1 in) { + ... +} +``` + +By default, for backwards compatibility, any content type property set by the converter will be overwritten by this value after conversion. +Converters such as the `SimpleMessageConverter` use the reply type rather than the content type to determine the conversion needed and sets the content type in the reply message appropriately. +This may not be the desired action and can be overridden by setting the `converterWinsContentType` property to `false`. +For example, if you return a `String` containing JSON, the `SimpleMessageConverter` will set the content type in the reply to `text/plain`. +The following configuration will ensure the content type is set properly, even if the `SimpleMessageConverter` is used. + +``` +@RabbitListener(queues = "q1", replyContentType = "application/json", + converterWinsContentType = "false") +public String listen(Thing in) { + ... + return someJsonString; +} +``` + +These properties (`replyContentType` and `converterWinsContentType`) do not apply when the return type is a Spring AMQP `Message` or a Spring Messaging `Message`. +In the first case, there is no conversion involved; simply set the `contentType` message property. +In the second case, the behavior is controlled using message headers: + +``` +@RabbitListener(queues = "q1", messageConverter = "delegating") +@SendTo("q2") +public Message listen(String in) { + ... + return MessageBuilder.withPayload(in.toUpperCase()) + .setHeader(MessageHeaders.CONTENT_TYPE, "application/xml") + .build(); +} +``` + +This content type will be passed in the `MessageProperties` to the converter. +By default, for backwards compatibility, any content type property set by the converter will be overwritten by this value after conversion. +If you wish to override that behavior, also set the `AmqpHeaders.CONTENT_TYPE_CONVERTER_WINS` to `true` and any value set by the converter will be retained. + +###### Multi-method Listeners + +Starting with version 1.5.0, you can specify the `@RabbitListener` annotation at the class level. +Together with the new `@RabbitHandler` annotation, this lets a single listener invoke different methods, based on +the payload type of the incoming message. +This is best described using an example: + +``` +@RabbitListener(id="multi", queues = "someQueue") +@SendTo("my.reply.queue") +public class MultiListenerBean { + + @RabbitHandler + public String thing2(Thing2 thing2) { + ... + } + + @RabbitHandler + public String cat(Cat cat) { + ... + } + + @RabbitHandler + public String hat(@Header("amqp_receivedRoutingKey") String rk, @Payload Hat hat) { + ... + } + + @RabbitHandler(isDefault = true) + public String defaultMethod(Object object) { + ... + } + +} +``` + +In this case, the individual `@RabbitHandler` methods are invoked if the converted payload is a `Thing2`, a `Cat`, or a `Hat`. +You should understand that the system must be able to identify a unique method based on the payload type. +The type is checked for assignability to a single parameter that has no annotations or that is annotated with the `@Payload` annotation. +Notice that the same method signatures apply, as discussed in the method-level `@RabbitListener` ([described earlier](#message-listener-adapter)). + +Starting with version 2.0.3, a `@RabbitHandler` method can be designated as the default method, which is invoked if there is no match on other methods. +At most, one method can be so designated. + +| |`@RabbitHandler` is intended only for processing message payloads after conversion, if you wish to receive the unconverted raw `Message` object, you must use `@RabbitListener` on the method, not the class.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +###### `@Repeatable` `@RabbitListener` + +Starting with version 1.6, the `@RabbitListener` annotation is marked with `@Repeatable`. +This means that the annotation can appear on the same annotated element (method or class) multiple times. +In this case, a separate listener container is created for each annotation, each of which invokes the same listener`@Bean`. +Repeatable annotations can be used with Java 8 or above. + +###### Proxy `@RabbitListener` and Generics + +If your service is intended to be proxied (for example, in the case of `@Transactional`), you should keep in mind some considerations when +the interface has generic parameters. +Consider the following example: + +``` +interface TxService

{ + + String handle(P payload, String header); + +} + +static class TxServiceImpl implements TxService { + + @Override + @RabbitListener(...) + public String handle(Thing thing, String rk) { + ... + } + +} +``` + +With a generic interface and a particular implementation, you are forced to switch to the CGLIB target class proxy because the actual implementation of the interface`handle` method is a bridge method. +In the case of transaction management, the use of CGLIB is configured by using +an annotation option: `@EnableTransactionManagement(proxyTargetClass = true)`. +And in this case, all annotations have to be declared on the target method in the implementation, as the following example shows: + +``` +static class TxServiceImpl implements TxService { + + @Override + @Transactional + @RabbitListener(...) + public String handle(@Payload Foo foo, @Header("amqp_receivedRoutingKey") String rk) { + ... + } + +} +``` + +###### Handling Exceptions + +By default, if an annotated listener method throws an exception, it is thrown to the container and the message are requeued and redelivered, discarded, or routed to a dead letter exchange, depending on the container and broker configuration. +Nothing is returned to the sender. + +Starting with version 2.0, the `@RabbitListener` annotation has two new attributes: `errorHandler` and `returnExceptions`. + +These are not configured by default. + +You can use the `errorHandler` to provide the bean name of a `RabbitListenerErrorHandler` implementation. +This functional interface has one method, as follows: + +``` +@FunctionalInterface +public interface RabbitListenerErrorHandler { + + Object handleError(Message amqpMessage, org.springframework.messaging.Message message, + ListenerExecutionFailedException exception) throws Exception; + +} +``` + +As you can see, you have access to the raw message received from the container, the spring-messaging `Message` object produced by the message converter, and the exception that was thrown by the listener (wrapped in a `ListenerExecutionFailedException`). +The error handler can either return some result (which is sent as the reply) or throw the original or a new exception (which is thrown to the container or returned to the sender, depending on the `returnExceptions` setting). + +The `returnExceptions` attribute, when `true`, causes exceptions to be returned to the sender. +The exception is wrapped in a `RemoteInvocationResult` object. +On the sender side, there is an available `RemoteInvocationAwareMessageConverterAdapter`, which, if configured into the `RabbitTemplate`, re-throws the server-side exception, wrapped in an `AmqpRemoteException`. +The stack trace of the server exception is synthesized by merging the server and client stack traces. + +| |This mechanism generally works only with the default `SimpleMessageConverter`, which uses Java serialization.
Exceptions are generally not “Jackson-friendly” and cannot be serialized to JSON.
If you use JSON, consider using an `errorHandler` to return some other Jackson-friendly `Error` object when an exception is thrown.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |In version 2.1, this interface moved from package `o.s.amqp.rabbit.listener` to `o.s.amqp.rabbit.listener.api`.| +|---|---------------------------------------------------------------------------------------------------------------| + +Starting with version 2.1.7, the `Channel` is available in a messaging message header; this allows you to ack or nack the failed messasge when using `AcknowledgeMode.MANUAL`: + +``` +public Object handleError(Message amqpMessage, org.springframework.messaging.Message message, + ListenerExecutionFailedException exception) { + ... + message.getHeaders().get(AmqpHeaders.CHANNEL, Channel.class) + .basicReject(message.getHeaders().get(AmqpHeaders.DELIVERY_TAG, Long.class), + true); + } +``` + +Starting with version 2.2.18, if a message conversion exception is thrown, the error handler will be called, with `null` in the `message` argument. +This allows the application to send some result to the caller, indicating that a badly-formed message was received. +Previously, such errors were thrown and handled by the container. + +###### Container Management + +Containers created for annotations are not registered with the application context. +You can obtain a collection of all containers by invoking `getListenerContainers()` on the`RabbitListenerEndpointRegistry` bean. +You can then iterate over this collection, for example, to stop or start all containers or invoke the `Lifecycle` methods +on the registry itself, which will invoke the operations on each container. + +You can also get a reference to an individual container by using its `id`, using `getListenerContainer(String id)` — for +example, `registry.getListenerContainer("multi")` for the container created by the snippet above. + +Starting with version 1.5.2, you can obtain the `id` values of the registered containers with `getListenerContainerIds()`. + +Starting with version 1.5, you can now assign a `group` to the container on the `RabbitListener` endpoint. +This provides a mechanism to get a reference to a subset of containers. +Adding a `group` attribute causes a bean of type `Collection` to be registered with the context with the group name. + +##### @RabbitListener with Batching + +When receiving a [a batch](#template-batching) of messages, the de-batching is normally performed by the container and the listener is invoked with one message at at time. +Starting with version 2.2, you can configure the listener container factory and listener to receive the entire batch in one call, simply set the factory’s `batchListener` property, and make the method payload parameter a `List`: + +``` +@Bean +public SimpleRabbitListenerContainerFactory rabbitListenerContainerFactory() { + SimpleRabbitListenerContainerFactory factory = new SimpleRabbitListenerContainerFactory(); + factory.setConnectionFactory(connectionFactory()); + factory.setBatchListener(true); + return factory; +} + +@RabbitListener(queues = "batch.1") +public void listen1(List in) { + ... +} + +// or + +@RabbitListener(queues = "batch.2") +public void listen2(List> in) { + ... +} +``` + +Setting the `batchListener` property to true automatically turns off the `deBatchingEnabled` container property in containers that the factory creates (unless `consumerBatchEnabled` is `true` - see below). Effectively, the debatching is moved from the container to the listener adapter and the adapter creates the list that is passed to the listener. + +A batch-enabled factory cannot be used with a [multi-method listener](#annotation-method-selection). + +Also starting with version 2.2. when receiving batched messages one-at-a-time, the last message contains a boolean header set to `true`. +This header can be obtained by adding the `@Header(AmqpHeaders.LAST_IN_BATCH)` boolean last` parameter to your listener method. +The header is mapped from `MessageProperties.isLastInBatch()`. +In addition, `AmqpHeaders.BATCH_SIZE` is populated with the size of the batch in every message fragment. + +In addition, a new property `consumerBatchEnabled` has been added to the `SimpleMessageListenerContainer`. +When this is true, the container will create a batch of messages, up to `batchSize`; a partial batch is delivered if `receiveTimeout` elapses with no new messages arriving. +If a producer-created batch is received, it is debatched and added to the consumer-side batch; therefore the actual number of messages delivered may exceed `batchSize`, which represents the number of messages received from the broker.`deBatchingEnabled` must be true when `consumerBatchEnabled` is true; the container factory will enforce this requirement. + +``` +@Bean +public SimpleRabbitListenerContainerFactory consumerBatchContainerFactory() { + SimpleRabbitListenerContainerFactory factory = new SimpleRabbitListenerContainerFactory(); + factory.setConnectionFactory(rabbitConnectionFactory()); + factory.setConsumerTagStrategy(consumerTagStrategy()); + factory.setBatchListener(true); // configures a BatchMessageListenerAdapter + factory.setBatchSize(2); + factory.setConsumerBatchEnabled(true); + return factory; +} +``` + +When using `consumerBatchEnabled` with `@RabbitListener`: + +``` +@RabbitListener(queues = "batch.1", containerFactory = "consumerBatchContainerFactory") +public void consumerBatch1(List amqpMessages) { + this.amqpMessagesReceived = amqpMessages; + this.batch1Latch.countDown(); +} + +@RabbitListener(queues = "batch.2", containerFactory = "consumerBatchContainerFactory") +public void consumerBatch2(List> messages) { + this.messagingMessagesReceived = messages; + this.batch2Latch.countDown(); +} + +@RabbitListener(queues = "batch.3", containerFactory = "consumerBatchContainerFactory") +public void consumerBatch3(List strings) { + this.batch3Strings = strings; + this.batch3Latch.countDown(); +} +``` + +* the first is called with the raw, unconverted `org.springframework.amqp.core.Message` s received. + +* the second is called with the `org.springframework.messaging.Message` s with converted payloads and mapped headers/properties. + +* the third is called with the converted payloads, with no access to headers/properteis. + +You can also add a `Channel` parameter, often used when using `MANUAL` ack mode. +This is not very useful with the third example because you don’t have access to the `delivery_tag` property. + +##### Using Container Factories + +Listener container factories were introduced to support the `@RabbitListener` and registering containers with the `RabbitListenerEndpointRegistry`, as discussed in [Programmatic Endpoint Registration](#async-annotation-driven-registration). + +Starting with version 2.1, they can be used to create any listener container — even a container without a listener (such as for use in Spring Integration). +Of course, a listener must be added before the container is started. + +There are two ways to create such containers: + +* Use a SimpleRabbitListenerEndpoint + +* Add the listener after creation + +The following example shows how to use a `SimpleRabbitListenerEndpoint` to create a listener container: + +``` +@Bean +public SimpleMessageListenerContainer factoryCreatedContainerSimpleListener( + SimpleRabbitListenerContainerFactory rabbitListenerContainerFactory) { + SimpleRabbitListenerEndpoint endpoint = new SimpleRabbitListenerEndpoint(); + endpoint.setQueueNames("queue.1"); + endpoint.setMessageListener(message -> { + ... + }); + return rabbitListenerContainerFactory.createListenerContainer(endpoint); +} +``` + +The following example shows how to add the listener after creation: + +``` +@Bean +public SimpleMessageListenerContainer factoryCreatedContainerNoListener( + SimpleRabbitListenerContainerFactory rabbitListenerContainerFactory) { + SimpleMessageListenerContainer container = rabbitListenerContainerFactory.createListenerContainer(); + container.setMessageListener(message -> { + ... + }); + container.setQueueNames("test.no.listener.yet"); + return container; +} +``` + +In either case, the listener can also be a `ChannelAwareMessageListener`, since it is now a sub-interface of `MessageListener`. + +These techniques are useful if you wish to create several containers with similar properties or use a pre-configured container factory such as the one provided by Spring Boot auto configuration or both. + +| |Containers created this way are normal `@Bean` instances and are not registered in the `RabbitListenerEndpointRegistry`.| +|---|------------------------------------------------------------------------------------------------------------------------| + +##### Asynchronous `@RabbitListener` Return Types + +Starting with version 2.1, `@RabbitListener` (and `@RabbitHandler`) methods can be specified with asynchronous return types `ListenableFuture` and `Mono`, letting the reply be sent asynchronously. + +| |The listener container factory must be configured with `AcknowledgeMode.MANUAL` so that the consumer thread will not ack the message; instead, the asynchronous completion will ack or nack the message when the async operation completes.
When the async result is completed with an error, whether the message is requeued or not depends on the exception type thrown, the container configuration, and the container error handler.
By default, the message will be requeued, unless the container’s `defaultRequeueRejected` property is set to `false` (it is `true` by default).
If the async result is completed with an `AmqpRejectAndDontRequeueException`, the message will not be requeued.
If the container’s `defaultRequeueRejected` property is `false`, you can override that by setting the future’s exception to a `ImmediateRequeueException` and the message will be requeued.
If some exception occurs within the listener method that prevents creation of the async result object, you MUST catch that exception and return an appropriate return object that will cause the message to be acknowledged or requeued.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Starting with versions 2.2.21, 2.3.13, 2.4.1, the `AcknowledgeMode` will be automatically set the `MANUAL` when async return types are detected. +In addition, incoming messages with fatal exceptions will be negatively acknowledged individually, previously any prior unacknowledged message were also negatively acknowledged. + +##### Threading and Asynchronous Consumers + +A number of different threads are involved with asynchronous consumers. + +Threads from the `TaskExecutor` configured in the `SimpleMessageListenerContainer` are used to invoke the `MessageListener` when a new message is delivered by `RabbitMQ Client`. +If not configured, a `SimpleAsyncTaskExecutor` is used. +If you use a pooled executor, you need to ensure the pool size is sufficient to handle the configured concurrency. +With the `DirectMessageListenerContainer`, the `MessageListener` is invoked directly on a `RabbitMQ Client` thread. +In this case, the `taskExecutor` is used for the task that monitors the consumers. + +| |When using the default `SimpleAsyncTaskExecutor`, for the threads the listener is invoked on, the listener container `beanName` is used in the `threadNamePrefix`.
This is useful for log analysis.
We generally recommend always including the thread name in the logging appender configuration.
When a `TaskExecutor` is specifically provided through the `taskExecutor` property on the container, it is used as is, without modification.
It is recommended that you use a similar technique to name the threads created by a custom `TaskExecutor` bean definition, to aid with thread identification in log messages.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The `Executor` configured in the `CachingConnectionFactory` is passed into the `RabbitMQ Client` when creating the connection, and its threads are used to deliver new messages to the listener container. +If this is not configured, the client uses an internal thread pool executor with (at the time of writing) a pool size of `Runtime.getRuntime().availableProcessors() * 2` for each connection. + +If you have a large number of factories or are using `CacheMode.CONNECTION`, you may wish to consider using a shared `ThreadPoolTaskExecutor` with enough threads to satisfy your workload. + +| |With the `DirectMessageListenerContainer`, you need to ensure that the connection factory is configured with a task executor that has sufficient threads to support your desired concurrency across all listener containers that use that factory.
The default pool size (at the time of writing) is `Runtime.getRuntime().availableProcessors() * 2`.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The `RabbitMQ client` uses a `ThreadFactory` to create threads for low-level I/O (socket) operations. +To modify this factory, you need to configure the underlying RabbitMQ `ConnectionFactory`, as discussed in [Configuring the Underlying Client Connection Factory](#connection-factory). + +##### Choosing a Container + +Version 2.0 introduced the `DirectMessageListenerContainer` (DMLC). +Previously, only the `SimpleMessageListenerContainer` (SMLC) was available. +The SMLC uses an internal queue and a dedicated thread for each consumer. +If a container is configured to listen to multiple queues, the same consumer thread is used to process all the queues. +Concurrency is controlled by `concurrentConsumers` and other properties. +As messages arrive from the RabbitMQ client, the client thread hands them off to the consumer thread through the queue. +This architecture was required because, in early versions of the RabbitMQ client, multiple concurrent deliveries were not possible. +Newer versions of the client have a revised threading model and can now support concurrency. +This has allowed the introduction of the DMLC where the listener is now invoked directly on the RabbitMQ Client thread. +Its architecture is, therefore, actually “simpler” than the SMLC. +However, there are some limitations with this approach, and certain features of the SMLC are not available with the DMLC. +Also, concurrency is controlled by `consumersPerQueue` (and the client library’s thread pool). +The `concurrentConsumers` and associated properties are not available with this container. + +The following features are available with the SMLC but not the DMLC: + +* `batchSize`: With the SMLC, you can set this to control how many messages are delivered in a transaction or to reduce the number of acks, but it may cause the number of duplicate deliveries to increase after a failure. + (The DMLC does have `messagesPerAck`, which you can use to reduce the acks, the same as with `batchSize` and the SMLC, but it cannot be used with transactions — each message is delivered and ack’d in a separate transaction). + +* `consumerBatchEnabled`: enables batching of discrete messages in the consumer; see [Message Listener Container Configuration](#containerAttributes) for more information. + +* `maxConcurrentConsumers` and consumer scaling intervals or triggers — there is no auto-scaling in the DMLC. + It does, however, let you programmatically change the `consumersPerQueue` property and the consumers are adjusted accordingly. + +However, the DMLC has the following benefits over the SMLC: + +* Adding and removing queues at runtime is more efficient. + With the SMLC, the entire consumer thread is restarted (all consumers canceled and re-created). + With the DMLC, unaffected consumers are not canceled. + +* The context switch between the RabbitMQ Client thread and the consumer thread is avoided. + +* Threads are shared across consumers rather than having a dedicated thread for each consumer in the SMLC. + However, see the IMPORTANT note about the connection factory configuration in [Threading and Asynchronous Consumers](#threading). + +See [Message Listener Container Configuration](#containerAttributes) for information about which configuration properties apply to each container. + +##### Detecting Idle Asynchronous Consumers + +While efficient, one problem with asynchronous consumers is detecting when they are idle — users might want to take +some action if no messages arrive for some period of time. + +Starting with version 1.6, it is now possible to configure the listener container to publish a`ListenerContainerIdleEvent` when some time passes with no message delivery. +While the container is idle, an event is published every `idleEventInterval` milliseconds. + +To configure this feature, set `idleEventInterval` on the container. +The following example shows how to do so in XML and in Java (for both a `SimpleMessageListenerContainer` and a `SimpleRabbitListenerContainerFactory`): + +``` + + + +``` + +``` +@Bean +public SimpleMessageListenerContainer(ConnectionFactory connectionFactory) { + SimpleMessageListenerContainer container = new SimpleMessageListenerContainer(connectionFactory); + ... + container.setIdleEventInterval(60000L); + ... + return container; +} +``` + +``` +@Bean +public SimpleRabbitListenerContainerFactory rabbitListenerContainerFactory() { + SimpleRabbitListenerContainerFactory factory = new SimpleRabbitListenerContainerFactory(); + factory.setConnectionFactory(rabbitConnectionFactory()); + factory.setIdleEventInterval(60000L); + ... + return factory; +} +``` + +In each of these cases, an event is published once per minute while the container is idle. + +###### Event Consumption + +You can capture idle events by implementing `ApplicationListener` — either a general listener, or one narrowed to only +receive this specific event. +You can also use `@EventListener`, introduced in Spring Framework 4.2. + +The following example combines the `@RabbitListener` and `@EventListener` into a single class. +You need to understand that the application listener gets events for all containers, so you may need to +check the listener ID if you want to take specific action based on which container is idle. +You can also use the `@EventListener` `condition` for this purpose. + +The events have four properties: + +* `source`: The listener container instance + +* `id`: The listener ID (or container bean name) + +* `idleTime`: The time the container had been idle when the event was published + +* `queueNames`: The names of the queue(s) that the container listens to + +The following example shows how to create listeners by using both the `@RabbitListener` and the `@EventListener` annotations: + +``` +public class Listener { + + @RabbitListener(id="someId", queues="#{queue.name}") + public String listen(String foo) { + return foo.toUpperCase(); + } + + @EventListener(condition = "event.listenerId == 'someId'") + public void onApplicationEvent(ListenerContainerIdleEvent event) { + ... + } + +} +``` + +| |Event listeners see events for all containers.
Consequently, in the preceding example, we narrow the events received based on the listener ID.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------| + +| |If you wish to use the idle event to stop the lister container, you should not call `container.stop()` on the thread that calls the listener.
Doing so always causes delays and unnecessary log messages.
Instead, you should hand off the event to a different thread that can then stop the container.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Monitoring Listener Performance + +Starting with version 2.2, the listener containers will automatically create and update Micrometer `Timer` s for the listener, if `Micrometer` is detected on the class path, and a `MeterRegistry` is present in the application context. +The timers can be disabled by setting the container property `micrometerEnabled` to `false`. + +Two timers are maintained - one for successful calls to the listener and one for failures. +With a simple `MessageListener`, there is a pair of timers for each configured queue. + +The timers are named `spring.rabbitmq.listener` and have the following tags: + +* `listenerId` : (listener id or container bean name) + +* `queue` : (the queue name for a simple listener or list of configured queue names when `consumerBatchEnabled` is `true` - because a batch may contain messages from multiple queues) + +* `result` : `success` or `failure` + +* `exception` : `none` or `ListenerExecutionFailedException` + +You can add additional tags using the `micrometerTags` container property. + +#### 4.1.7. Containers and Broker-Named queues + +While it is preferable to use `AnonymousQueue` instances as auto-delete queues, starting with version 2.1, you can use broker named queues with listener containers. +The following example shows how to do so: + +``` +@Bean +public Queue queue() { + return new Queue("", false, true, true); +} + +@Bean +public SimpleMessageListenerContainer container() { + SimpleMessageListenerContainer container = new SimpleMessageListenerContainer(cf()); + container.setQueues(queue()); + container.setMessageListener(m -> { + ... + }); + container.setMissingQueuesFatal(false); + return container; +} +``` + +Notice the empty `String` for the name. +When the `RabbitAdmin` declares queues, it updates the `Queue.actualName` property with the name returned by the broker. +You must use `setQueues()` when you configure the container for this to work, so that the container can access the declared name at runtime. +Just setting the names is insufficient. + +| |You cannot add broker-named queues to the containers while they are running.| +|---|----------------------------------------------------------------------------| + +| |When a connection is reset and a new one is established, the new queue gets a new name.
Since there is a race condition between the container restarting and the queue being re-declared, it is important to set the container’s `missingQueuesFatal` property to `false`, since the container is likely to initially try to reconnect to the old queue.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 4.1.8. Message Converters + +The `AmqpTemplate` also defines several methods for sending and receiving messages that delegate to a `MessageConverter`. +The `MessageConverter` provides a single method for each direction: one for converting **to** a `Message` and another for converting **from** a `Message`. +Notice that, when converting to a `Message`, you can also provide properties in addition to the object. +The `object` parameter typically corresponds to the Message body. +The following listing shows the `MessageConverter` interface definition: + +``` +public interface MessageConverter { + + Message toMessage(Object object, MessageProperties messageProperties) + throws MessageConversionException; + + Object fromMessage(Message message) throws MessageConversionException; + +} +``` + +The relevant `Message`-sending methods on the `AmqpTemplate` are simpler than the methods we discussed previously, because they do not require the `Message` instance. +Instead, the `MessageConverter` is responsible for “creating” each `Message` by converting the provided object to the byte array for the `Message` body and then adding any provided `MessageProperties`. +The following listing shows the definitions of the various methods: + +``` +void convertAndSend(Object message) throws AmqpException; + +void convertAndSend(String routingKey, Object message) throws AmqpException; + +void convertAndSend(String exchange, String routingKey, Object message) + throws AmqpException; + +void convertAndSend(Object message, MessagePostProcessor messagePostProcessor) + throws AmqpException; + +void convertAndSend(String routingKey, Object message, + MessagePostProcessor messagePostProcessor) throws AmqpException; + +void convertAndSend(String exchange, String routingKey, Object message, + MessagePostProcessor messagePostProcessor) throws AmqpException; +``` + +On the receiving side, there are only two methods: one that accepts the queue name and one that relies on the template’s “queue” property having been set. +The following listing shows the definitions of the two methods: + +``` +Object receiveAndConvert() throws AmqpException; + +Object receiveAndConvert(String queueName) throws AmqpException; +``` + +| |The `MessageListenerAdapter` mentioned in [Asynchronous Consumer](#async-consumer) also uses a `MessageConverter`.| +|---|------------------------------------------------------------------------------------------------------------------| + +##### `SimpleMessageConverter` + +The default implementation of the `MessageConverter` strategy is called `SimpleMessageConverter`. +This is the converter that is used by an instance of `RabbitTemplate` if you do not explicitly configure an alternative. +It handles text-based content, serialized Java objects, and byte arrays. + +###### Converting From a `Message` + +If the content type of the input `Message` begins with "text" (for example, +"text/plain"), it also checks for the content-encoding property to determine the charset to be used when converting the `Message` body byte array to a Java `String`. +If no content-encoding property had been set on the input `Message`, it uses the UTF-8 charset by default. +If you need to override that default setting, you can configure an instance of `SimpleMessageConverter`, set its `defaultCharset` property, and inject that into a `RabbitTemplate` instance. + +If the content-type property value of the input `Message` is set to "application/x-java-serialized-object", the `SimpleMessageConverter` tries to deserialize (rehydrate) the byte array into a Java object. +While that might be useful for simple prototyping, we do not recommend relying on Java serialization, since it leads to tight coupling between the producer and the consumer. +Of course, it also rules out usage of non-Java systems on either side. +With AMQP being a wire-level protocol, it would be unfortunate to lose much of that advantage with such restrictions. +In the next two sections, we explore some alternatives for passing rich domain object content without relying on Java serialization. + +For all other content-types, the `SimpleMessageConverter` returns the `Message` body content directly as a byte array. + +See [Java Deserialization](#java-deserialization) for important information. + +###### Converting To a `Message` + +When converting to a `Message` from an arbitrary Java Object, the `SimpleMessageConverter` likewise deals with byte arrays, strings, and serializable instances. +It converts each of these to bytes (in the case of byte arrays, there is nothing to convert), and it ses the content-type property accordingly. +If the `Object` to be converted does not match one of those types, the `Message` body is null. + +##### `SerializerMessageConverter` + +This converter is similar to the `SimpleMessageConverter` except that it can be configured with other Spring Framework`Serializer` and `Deserializer` implementations for `application/x-java-serialized-object` conversions. + +See [Java Deserialization](#java-deserialization) for important information. + +##### Jackson2JsonMessageConverter + +This section covers using the `Jackson2JsonMessageConverter` to convert to and from a `Message`. +It has the following sections: + +* [Converting to a `Message`](#Jackson2JsonMessageConverter-to-message) + +* [Converting from a `Message`](#Jackson2JsonMessageConverter-from-message) + +###### Converting to a `Message` + +As mentioned in the previous section, relying on Java serialization is generally not recommended. +One rather common alternative that is more flexible and portable across different languages and platforms is JSON +(JavaScript Object Notation). +The converter can be configured on any `RabbitTemplate` instance to override its usage of the `SimpleMessageConverter`default. +The `Jackson2JsonMessageConverter` uses the `com.fasterxml.jackson` 2.x library. +The following example configures a `Jackson2JsonMessageConverter`: + +``` + + + + + + + + + +``` + +As shown above, `Jackson2JsonMessageConverter` uses a `DefaultClassMapper` by default. +Type information is added to (and retrieved from) `MessageProperties`. +If an inbound message does not contain type information in `MessageProperties`, but you know the expected type, you +can configure a static type by using the `defaultType` property, as the following example shows: + +``` + + + + + + + +``` + +In addition, you can provide custom mappings from the value in the `*TypeId*` header. +The following example shows how to do so: + +``` +@Bean +public Jackson2JsonMessageConverter jsonMessageConverter() { + Jackson2JsonMessageConverter jsonConverter = new Jackson2JsonMessageConverter(); + jsonConverter.setClassMapper(classMapper()); + return jsonConverter; +} + +@Bean +public DefaultClassMapper classMapper() { + DefaultClassMapper classMapper = new DefaultClassMapper(); + Map> idClassMapping = new HashMap<>(); + idClassMapping.put("thing1", Thing1.class); + idClassMapping.put("thing2", Thing2.class); + classMapper.setIdClassMapping(idClassMapping); + return classMapper; +} +``` + +Now, if the sending system sets the header to `thing1`, the converter creates a `Thing1` object, and so on. +See the [Receiving JSON from Non-Spring Applications](#spring-rabbit-json) sample application for a complete discussion about converting messages from non-Spring applications. + +###### Converting from a `Message` + +Inbound messages are converted to objects according to the type information added to headers by the sending system. + +In versions prior to 1.6, if type information is not present, conversion would fail. +Starting with version 1.6, if type information is missing, the converter converts the JSON by using Jackson defaults (usually a map). + +Also, starting with version 1.6, when you use `@RabbitListener` annotations (on methods), the inferred type information is added to the `MessageProperties`. +This lets the converter convert to the argument type of the target method. +This only applies if there is one parameter with no annotations or a single parameter with the `@Payload` annotation. +Parameters of type `Message` are ignored during the analysis. + +| |By default, the inferred type information will override the inbound `*TypeId*` and related headers created
by the sending system.
This lets the receiving system automatically convert to a different domain object.
This applies only if the parameter type is concrete (not abstract or an interface) or it is from the `java.util`package.
In all other cases, the `*TypeId*` and related headers is used.
There are cases where you might wish to override the default behavior and always use the `*TypeId*` information.
For example, suppose you have a `@RabbitListener` that takes a `Thing1` argument but the message contains a `Thing2` that
is a subclass of `Thing1` (which is concrete).
The inferred type would be incorrect.
To handle this situation, set the `TypePrecedence` property on the `Jackson2JsonMessageConverter` to `TYPE_ID` instead
of the default `INFERRED`.
(The property is actually on the converter’s `DefaultJackson2JavaTypeMapper`, but a setter is provided on the converter
for convenience.)
If you inject a custom type mapper, you should set the property on the mapper instead.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |When converting from the `Message`, an incoming `MessageProperties.getContentType()` must be JSON-compliant (`contentType.contains("json")` is used to check).
Starting with version 2.2, `application/json` is assumed if there is no `contentType` property, or it has the default value `application/octet-stream`.
To revert to the previous behavior (return an unconverted `byte[]`), set the converter’s `assumeSupportedContentType` property to `false`.
If the content type is not supported, a `WARN` log message `Could not convert incoming message with content-type […​]`, is emitted and `message.getBody()` is returned as is — as a `byte[]`.
So, to meet the `Jackson2JsonMessageConverter` requirements on the consumer side, the producer must add the `contentType` message property — for example, as `application/json` or `text/x-json` or by using the `Jackson2JsonMessageConverter`, which sets the header automatically.
The following listing shows a number of converter calls:| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +``` +@RabbitListener +public void thing1(Thing1 thing1) {...} + +@RabbitListener +public void thing1(@Payload Thing1 thing1, @Header("amqp_consumerQueue") String queue) {...} + +@RabbitListener +public void thing1(Thing1 thing1, o.s.amqp.core.Message message) {...} + +@RabbitListener +public void thing1(Thing1 thing1, o.s.messaging.Message message) {...} + +@RabbitListener +public void thing1(Thing1 thing1, String bar) {...} + +@RabbitListener +public void thing1(Thing1 thing1, o.s.messaging.Message message) {...} +``` + +In the first four cases in the preceding listing, the converter tries to convert to the `Thing1` type. +The fifth example is invalid because we cannot determine which argument should receive the message payload. +With the sixth example, the Jackson defaults apply due to the generic type being a `WildcardType`. + +You can, however, create a custom converter and use the `targetMethod` message property to decide which type to convert +the JSON to. + +| |This type inference can only be achieved when the `@RabbitListener` annotation is declared at the method level.
With class-level `@RabbitListener`, the converted type is used to select which `@RabbitHandler` method to invoke.
For this reason, the infrastructure provides the `targetObject` message property, which you can use in a custom
converter to determine the type.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Starting with version 1.6.11, `Jackson2JsonMessageConverter` and, therefore, `DefaultJackson2JavaTypeMapper` (`DefaultClassMapper`) provide the `trustedPackages` option to overcome [Serialization Gadgets](https://pivotal.io/security/cve-2017-4995) vulnerability.
By default and for backward compatibility, the `Jackson2JsonMessageConverter` trusts all packages — that is, it uses `*` for the option.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +###### Deserializing Abstract Classes + +Prior to version 2.2.8, if the inferred type of a `@RabbitListener` was an abstract class (including interfaces), the converter would fall back to looking for type information in the headers and, if present, used that information; if that was not present, it would try to create the abstract class. +This caused a problem when a custom `ObjectMapper` that is configured with a custom deserializer to handle the abstract class is used, but the incoming message has invalid type headers. + +Starting with version 2.2.8, the previous behavior is retained by default. If you have such a custom `ObjectMapper` and you want to ignore type headers, and always use the inferred type for conversion, set the `alwaysConvertToInferredType` to `true`. +This is needed for backwards compatibility and to avoid the overhead of an attempted conversion when it would fail (with a standard `ObjectMapper`). + +###### Using Spring Data Projection Interfaces + +Starting with version 2.2, you can convert JSON to a Spring Data Projection interface instead of a concrete type. +This allows very selective, and low-coupled bindings to data, including the lookup of values from multiple places inside the JSON document. +For example the following interface can be defined as message payload type: + +``` +interface SomeSample { + + @JsonPath({ "$.username", "$.user.name" }) + String getUsername(); + +} +``` + +``` +@RabbitListener(queues = "projection") +public void projection(SomeSample in) { + String username = in.getUsername(); + ... +} +``` + +Accessor methods will be used to lookup the property name as field in the received JSON document by default. +The `@JsonPath` expression allows customization of the value lookup, and even to define multiple JSON path expressions, to lookup values from multiple places until an expression returns an actual value. + +To enable this feature, set the `useProjectionForInterfaces` to `true` on the message converter. +You must also add `spring-data:spring-data-commons` and `com.jayway.jsonpath:json-path` to the class path. + +When used as the parameter to a `@RabbitListener` method, the interface type is automatically passed to the converter as normal. + +###### Converting From a `Message` With `RabbitTemplate` + +As mentioned earlier, type information is conveyed in message headers to assist the converter when converting from a message. +This works fine in most cases. +However, when using generic types, it can only convert simple objects and known “container” objects (lists, arrays, and maps). +Starting with version 2.0, the `Jackson2JsonMessageConverter` implements `SmartMessageConverter`, which lets it be used with the new `RabbitTemplate` methods that take a `ParameterizedTypeReference` argument. +This allows conversion of complex generic types, as shown in the following example: + +``` +Thing1> thing1 = + rabbitTemplate.receiveAndConvert(new ParameterizedTypeReference>>() { }); +``` + +| |Starting with version 2.1, the `AbstractJsonMessageConverter` class has been removed.
It is no longer the base class for `Jackson2JsonMessageConverter`.
It has been replaced by `AbstractJackson2MessageConverter`.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### `MarshallingMessageConverter` + +Yet another option is the `MarshallingMessageConverter`. +It delegates to the Spring OXM library’s implementations of the `Marshaller` and `Unmarshaller` strategy interfaces. +You can read more about that library [here](https://docs.spring.io/spring/docs/current/spring-framework-reference/html/oxm.html). +In terms of configuration, it is most common to provide only the constructor argument, since most implementations of `Marshaller` also implement `Unmarshaller`. +The following example shows how to configure a `MarshallingMessageConverter`: + +``` + + + + + + + + +``` + +##### `Jackson2XmlMessageConverter` + +This class was introduced in version 2.1 and can be used to convert messages from and to XML. + +Both `Jackson2XmlMessageConverter` and `Jackson2JsonMessageConverter` have the same base class: `AbstractJackson2MessageConverter`. + +| |The `AbstractJackson2MessageConverter` class is introduced to replace a removed class: `AbstractJsonMessageConverter`.| +|---|----------------------------------------------------------------------------------------------------------------------| + +The `Jackson2XmlMessageConverter` uses the `com.fasterxml.jackson` 2.x library. + +You can use it the same way as `Jackson2JsonMessageConverter`, except it supports XML instead of JSON. +The following example configures a `Jackson2JsonMessageConverter`: + +``` + + + + + + + +``` + +See [Jackson2JsonMessageConverter](#json-message-converter) for more information. + +| |Starting with version 2.2, `application/xml` is assumed if there is no `contentType` property, or it has the default value `application/octet-stream`.
To revert to the previous behavior (return an unconverted `byte[]`), set the converter’s `assumeSupportedContentType` property to `false`.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### `ContentTypeDelegatingMessageConverter` + +This class was introduced in version 1.4.2 and allows delegation to a specific `MessageConverter` based on the content type property in the `MessageProperties`. +By default, it delegates to a `SimpleMessageConverter` if there is no `contentType` property or there is a value that matches none of the configured converters. +The following example configures a `ContentTypeDelegatingMessageConverter`: + +``` + + + + + + + + +``` + +##### Java Deserialization + +This section covers how to deserialize Java objects. + +| |There is a possible vulnerability when deserializing java objects from untrusted sources.

If you accept messages from untrusted sources with a `content-type` of `application/x-java-serialized-object`, you should
consider configuring which packages and classes are allowed to be deserialized.
This applies to both the `SimpleMessageConverter` and `SerializerMessageConverter` when it is configured to use a`DefaultDeserializer` either implicitly or via configuration.

By default, the allowed list is empty, meaning all classes are deserialized.

You can set a list of patterns, such as `thing1.`**, `thing1.thing2.Cat` or ``**`.MySafeClass`.

The patterns are checked in order until a match is found.
If there is no match, a `SecurityException` is thrown.

You can set the patterns using the `allowedListPatterns` property on these converters.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Message Properties Converters + +The `MessagePropertiesConverter` strategy interface is used to convert between the Rabbit Client `BasicProperties` and Spring AMQP `MessageProperties`. +The default implementation (`DefaultMessagePropertiesConverter`) is usually sufficient for most purposes, but you can implement your own if needed. +The default properties converter converts `BasicProperties` elements of type `LongString` to `String` instances when the size is not greater than `1024` bytes. +Larger `LongString` instances are not converted (see the next paragraph). +This limit can be overridden with a constructor argument. + +Starting with version 1.6, headers longer than the long string limit (default: 1024) are now left as`LongString` instances by default by the `DefaultMessagePropertiesConverter`. +You can access the contents through the `getBytes[]`, `toString()`, or `getStream()` methods. + +Previously, the `DefaultMessagePropertiesConverter` “converted” such headers to a `DataInputStream` (actually it just referenced the `LongString` instance’s `DataInputStream`). +On output, this header was not converted (except to a String — for example, `[[email protected]](/cdn-cgi/l/email-protection)` by calling `toString()` on the stream). + +Large incoming `LongString` headers are now correctly “converted” on output, too (by default). + +A new constructor is provided to let you configure the converter to work as before. +The following listing shows the Javadoc comment and declaration of the method: + +``` +/** + * Construct an instance where LongStrings will be returned + * unconverted or as a java.io.DataInputStream when longer than this limit. + * Use this constructor with 'true' to restore pre-1.6 behavior. + * @param longStringLimit the limit. + * @param convertLongLongStrings LongString when false, + * DataInputStream when true. + * @since 1.6 + */ +public DefaultMessagePropertiesConverter(int longStringLimit, boolean convertLongLongStrings) { ... } +``` + +Also starting with version 1.6, a new property called `correlationIdString` has been added to `MessageProperties`. +Previously, when converting to and from `BasicProperties` used by the RabbitMQ client, an unnecessary `byte[] <→ String` conversion was performed because `MessageProperties.correlationId` is a `byte[]`, but `BasicProperties` uses a `String`. +(Ultimately, the RabbitMQ client uses UTF-8 to convert the `String` to bytes to put in the protocol message). + +To provide maximum backwards compatibility, a new property called `correlationIdPolicy` has been added to the`DefaultMessagePropertiesConverter`. +This takes a `DefaultMessagePropertiesConverter.CorrelationIdPolicy` enum argument. +By default it is set to `BYTES`, which replicates the previous behavior. + +For inbound messages: + +* `STRING`: Only the `correlationIdString` property is mapped + +* `BYTES`: Only the `correlationId` property is mapped + +* `BOTH`: Both properties are mapped + +For outbound messages: + +* `STRING`: Only the `correlationIdString` property is mapped + +* `BYTES`: Only the `correlationId` property is mapped + +* `BOTH`: Both properties are considered, with the `String` property taking precedence + +Also starting with version 1.6, the inbound `deliveryMode` property is no longer mapped to `MessageProperties.deliveryMode`. +It is mapped to `MessageProperties.receivedDeliveryMode` instead. +Also, the inbound `userId` property is no longer mapped to `MessageProperties.userId`. +It is mapped to `MessageProperties.receivedUserId` instead. +These changes are to avoid unexpected propagation of these properties if the same `MessageProperties` object is used for an outbound message. + +Starting with version 2.2, the `DefaultMessagePropertiesConverter` converts any custom headers with values of type `Class` using `getName()` instead of `toString()`; this avoids consuming application having to parse the class name out of the `toString()` representation. +For rolling upgrades, you may need to change your consumers to understand both formats until all producers are upgraded. + +#### 4.1.9. Modifying Messages - Compression and More + +A number of extension points exist. +They let you perform some processing on a message, either before it is sent to RabbitMQ or immediately after it is received. + +As can be seen in [Message Converters](#message-converters), one such extension point is in the `AmqpTemplate` `convertAndReceive` operations, where you can provide a `MessagePostProcessor`. +For example, after your POJO has been converted, the `MessagePostProcessor` lets you set custom headers or properties on the `Message`. + +Starting with version 1.4.2, additional extension points have been added to the `RabbitTemplate` - `setBeforePublishPostProcessors()` and `setAfterReceivePostProcessors()`. +The first enables a post processor to run immediately before sending to RabbitMQ. +When using batching (see [Batching](#template-batching)), this is invoked after the batch is assembled and before the batch is sent. +The second is invoked immediately after a message is received. + +These extension points are used for such features as compression and, for this purpose, several `MessagePostProcessor` implementations are provided.`GZipPostProcessor`, `ZipPostProcessor` and `DeflaterPostProcessor` compress messages before sending, and `GUnzipPostProcessor`, `UnzipPostProcessor` and `InflaterPostProcessor` decompress received messages. + +| |Starting with version 2.1.5, the `GZipPostProcessor` can be configured with the `copyProperties = true` option to make a copy of the original message properties.
By default, these properties are reused for performance reasons, and modified with compression content encoding and the optional `MessageProperties.SPRING_AUTO_DECOMPRESS` header.
If you retain a reference to the original outbound message, its properties will change as well.
So, if your application retains a copy of an outbound message with these message post processors, consider turning the `copyProperties` option on.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Starting with version 2.2.12, you can configure the delimiter that the compressing post processors use between content encoding elements.
With versions 2.2.11 and before, this was hard-coded as `:`, it is now set to `, ` by default.
The decompressors will work with both delimiters.
However, if you publish messages with 2.3 or later and consume with 2.2.11 or earlier, you MUST set the `encodingDelimiter` property on the compressor(s) to `:`.
When your consumers are upgraded to 2.2.11 or later, you can revert to the default of `, `.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Similarly, the `SimpleMessageListenerContainer` also has a `setAfterReceivePostProcessors()` method, letting the decompression be performed after messages are received by the container. + +Starting with version 2.1.4, `addBeforePublishPostProcessors()` and `addAfterReceivePostProcessors()` have been added to the `RabbitTemplate` to allow appending new post processors to the list of before publish and after receive post processors respectively. +Also there are methods provided to remove the post processors. +Similarly, `AbstractMessageListenerContainer` also has `addAfterReceivePostProcessors()` and `removeAfterReceivePostProcessor()` methods added. +See the Javadoc of `RabbitTemplate` and `AbstractMessageListenerContainer` for more detail. + +#### 4.1.10. Request/Reply Messaging + +The `AmqpTemplate` also provides a variety of `sendAndReceive` methods that accept the same argument options that were described earlier for the one-way send operations (`exchange`, `routingKey`, and `Message`). +Those methods are quite useful for request-reply scenarios, since they handle the configuration of the necessary `reply-to` property before sending and can listen for the reply message on an exclusive queue that is created internally for that purpose. + +Similar request-reply methods are also available where the `MessageConverter` is applied to both the request and reply. +Those methods are named `convertSendAndReceive`. +See the [Javadoc of `AmqpTemplate`](https://docs.spring.io/spring-amqp/docs/latest-ga/api/org/springframework/amqp/core/AmqpTemplate.html) for more detail. + +Starting with version 1.5.0, each of the `sendAndReceive` method variants has an overloaded version that takes `CorrelationData`. +Together with a properly configured connection factory, this enables the receipt of publisher confirms for the send side of the operation. +See [Correlated Publisher Confirms and Returns](#template-confirms) and the [Javadoc for `RabbitOperations`](https://docs.spring.io/spring-amqp/docs/latest-ga/api/org/springframework/amqp/rabbit/core/RabbitOperations.html) for more information. + +Starting with version 2.0, there are variants of these methods (`convertSendAndReceiveAsType`) that take an additional `ParameterizedTypeReference` argument to convert complex returned types. +The template must be configured with a `SmartMessageConverter`. +See [Converting From a `Message` With `RabbitTemplate`](#json-complex) for more information. + +Starting with version 2.1, you can configure the `RabbitTemplate` with the `noLocalReplyConsumer` option to control a `noLocal` flag for reply consumers. +This is `false` by default. + +##### Reply Timeout + +By default, the send and receive methods timeout after five seconds and return null. +You can modify this behavior by setting the `replyTimeout` property. +Starting with version 1.5, if you set the `mandatory` property to `true` (or the `mandatory-expression` evaluates to `true` for a particular message), if the message cannot be delivered to a queue, an `AmqpMessageReturnedException` is thrown. +This exception has `returnedMessage`, `replyCode`, and `replyText` properties, as well as the `exchange` and `routingKey` used for the send. + +| |This feature uses publisher returns.
You can enable it by setting `publisherReturns` to `true` on the `CachingConnectionFactory` (see [Publisher Confirms and Returns](#cf-pub-conf-ret)).
Also, you must not have registered your own `ReturnCallback` with the `RabbitTemplate`.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Starting with version 2.1.2, a `replyTimedOut` method has been added, letting subclasses be informed of the timeout so that they can clean up any retained state. + +Starting with versions 2.0.11 and 2.1.3, when you use the default `DirectReplyToMessageListenerContainer`, you can add an error handler by setting the template’s `replyErrorHandler` property. +This error handler is invoked for any failed deliveries, such as late replies and messages received without a correlation header. +The exception passed in is a `ListenerExecutionFailedException`, which has a `failedMessage` property. + +##### RabbitMQ Direct reply-to + +| |Starting with version 3.4.0, the RabbitMQ server supports [direct reply-to](https://www.rabbitmq.com/direct-reply-to.html).
This eliminates the main reason for a fixed reply queue (to avoid the need to create a temporary queue for each request).
Starting with Spring AMQP version 1.4.1 direct reply-to is used by default (if supported by the server) instead of creating temporary reply queues.
When no `replyQueue` is provided (or it is set with a name of `amq.rabbitmq.reply-to`), the `RabbitTemplate` automatically detects whether direct reply-to is supported and either uses it or falls back to using a temporary reply queue.
When using direct reply-to, a `reply-listener` is not required and should not be configured.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Reply listeners are still supported with named queues (other than `amq.rabbitmq.reply-to`), allowing control of reply concurrency and so on. + +Starting with version 1.6, if you wish to use a temporary, exclusive, auto-delete queue for each +reply, set the `useTemporaryReplyQueues` property to `true`. +This property is ignored if you set a `replyAddress`. + +You can change the criteria that dictate whether to use direct reply-to by subclassing `RabbitTemplate` and overriding `useDirectReplyTo()` to check different criteria. +The method is called once only, when the first request is sent. + +Prior to version 2.0, the `RabbitTemplate` created a new consumer for each request and canceled the consumer when the reply was received (or timed out). +Now the template uses a `DirectReplyToMessageListenerContainer` instead, letting the consumers be reused. +The template still takes care of correlating the replies, so there is no danger of a late reply going to a different sender. +If you want to revert to the previous behavior, set the `useDirectReplyToContainer` (`direct-reply-to-container` when using XML configuration) property to false. + +The `AsyncRabbitTemplate` has no such option. +It always used a `DirectReplyToContainer` for replies when direct reply-to is used. + +Starting with version 2.3.7, the template has a new property `useChannelForCorrelation`. +When this is `true`, the server does not have to copy the correlation id from the request message headers to the reply message. +Instead, the channel used to send the request is used to correlate the reply to the request. + +##### Message Correlation With A Reply Queue + +When using a fixed reply queue (other than `amq.rabbitmq.reply-to`), you must provide correlation data so that replies can be correlated to requests. +See [RabbitMQ Remote Procedure Call (RPC)](https://www.rabbitmq.com/tutorials/tutorial-six-java.html). +By default, the standard `correlationId` property is used to hold the correlation data. +However, if you wish to use a custom property to hold correlation data, you can set the `correlation-key` attribute on the \. +Explicitly setting the attribute to `correlationId` is the same as omitting the attribute. +The client and server must use the same header for correlation data. + +| |Spring AMQP version 1.1 used a custom property called `spring_reply_correlation` for this data.
If you wish to revert to this behavior with the current version (perhaps to maintain compatibility with another application using 1.1), you must set the attribute to `spring_reply_correlation`.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +By default, the template generates its own correlation ID (ignoring any user-supplied value). +If you wish to use your own correlation ID, set the `RabbitTemplate` instance’s `userCorrelationId` property to `true`. + +| |The correlation ID must be unique to avoid the possibility of a wrong reply being returned for a request.| +|---|---------------------------------------------------------------------------------------------------------| + +##### Reply Listener Container + +When using RabbitMQ versions prior to 3.4.0, a new temporary queue is used for each reply. +However, a single reply queue can be configured on the template, which can be more efficient and also lets you set arguments on that queue. +In this case, however, you must also provide a \ sub element. +This element provides a listener container for the reply queue, with the template being the listener. +All of the [Message Listener Container Configuration](#containerAttributes) attributes allowed on a \ are allowed on the element, except for `connection-factory` and `message-converter`, which are inherited from the template’s configuration. + +| |If you run multiple instances of your application or use multiple `RabbitTemplate` instances, you **MUST** use a unique reply queue for each.
RabbitMQ has no ability to select messages from a queue, so, if they all use the same queue, each instance would compete for replies and not necessarily receive their own.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The following example defines a rabbit template with a connection factory: + +``` + + + +``` + +While the container and template share a connection factory, they do not share a channel. +Therefore, requests and replies are not performed within the same transaction (if transactional). + +| |Prior to version 1.5.0, the `reply-address` attribute was not available.
Replies were always routed by using the default exchange and the `reply-queue` name as the routing key.
This is still the default, but you can now specify the new `reply-address` attribute.
The `reply-address` can contain an address with the form `/` and the reply is routed to the specified exchange and routed to a queue bound with the routing key.
The `reply-address` has precedence over `reply-queue`.
When only `reply-address` is in use, the `` must be configured as a separate `` component.
The `reply-address` and `reply-queue` (or `queues` attribute on the ``) must refer to the same queue logically.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +With this configuration, a `SimpleListenerContainer` is used to receive the replies, with the `RabbitTemplate` being the `MessageListener`. +When defining a template with the `` namespace element, as shown in the preceding example, the parser defines the container and wires in the template as the listener. + +| |When the template does not use a fixed `replyQueue` (or is using direct reply-to — see [RabbitMQ Direct reply-to](#direct-reply-to)), a listener container is not needed.
Direct `reply-to` is the preferred mechanism when using RabbitMQ 3.4.0 or later.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +If you define your `RabbitTemplate` as a `` or use an `@Configuration` class to define it as an `@Bean` or when you create the template programmatically, you need to define and wire up the reply listener container yourself. +If you fail to do this, the template never receives the replies and eventually times out and returns null as the reply to a call to a `sendAndReceive` method. + +Starting with version 1.5, the `RabbitTemplate` detects if it has been +configured as a `MessageListener` to receive replies. +If not, attempts to send and receive messages with a reply address +fail with an `IllegalStateException` (because the replies are never received). + +Further, if a simple `replyAddress` (queue name) is used, the reply listener container verifies that it is listening +to a queue with the same name. +This check cannot be performed if the reply address is an exchange and routing key and a debug log message is written. + +| |When wiring the reply listener and template yourself, it is important to ensure that the template’s `replyAddress` and the container’s `queues` (or `queueNames`) properties refer to the same queue.
The template inserts the reply address into the outbound message `replyTo` property.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The following listing shows examples of how to manually wire up the beans: + +``` + + + + + + + + + + + + + + + + +``` + +``` + @Bean + public RabbitTemplate amqpTemplate() { + RabbitTemplate rabbitTemplate = new RabbitTemplate(connectionFactory()); + rabbitTemplate.setMessageConverter(msgConv()); + rabbitTemplate.setReplyAddress(replyQueue().getName()); + rabbitTemplate.setReplyTimeout(60000); + rabbitTemplate.setUseDirectReplyToContainer(false); + return rabbitTemplate; + } + + @Bean + public SimpleMessageListenerContainer replyListenerContainer() { + SimpleMessageListenerContainer container = new SimpleMessageListenerContainer(); + container.setConnectionFactory(connectionFactory()); + container.setQueues(replyQueue()); + container.setMessageListener(amqpTemplate()); + return container; + } + + @Bean + public Queue replyQueue() { + return new Queue("my.reply.queue"); + } +``` + +A complete example of a `RabbitTemplate` wired with a fixed reply queue, together with a “remote” listener container that handles the request and returns the reply is shown in [this test case](https://github.com/spring-projects/spring-amqp/tree/main/spring-rabbit/src/test/java/org/springframework/amqp/rabbit/listener/JavaConfigFixedReplyQueueTests.java). + +| |When the reply times out (`replyTimeout`), the `sendAndReceive()` methods return null.| +|---|--------------------------------------------------------------------------------------| + +Prior to version 1.3.6, late replies for timed out messages were only logged. +Now, if a late reply is received, it is rejected (the template throws an `AmqpRejectAndDontRequeueException`). +If the reply queue is configured to send rejected messages to a dead letter exchange, the reply can be retrieved for later analysis. +To do so, bind a queue to the configured dead letter exchange with a routing key equal to the reply queue’s name. + +See the [RabbitMQ Dead Letter Documentation](https://www.rabbitmq.com/dlx.html) for more information about configuring dead lettering. +You can also take a look at the `FixedReplyQueueDeadLetterTests` test case for an example. + +##### Async Rabbit Template + +Version 1.6 introduced the `AsyncRabbitTemplate`. +This has similar `sendAndReceive` (and `convertSendAndReceive`) methods to those on the [`AmqpTemplate`](#amqp-template). +However, instead of blocking, they return a `ListenableFuture`. + +The `sendAndReceive` methods return a `RabbitMessageFuture`. +The `convertSendAndReceive` methods return a `RabbitConverterFuture`. + +You can either synchronously retrieve the result later, by invoking `get()` on the future, or you can register a callback that is called asynchronously with the result. +The following listing shows both approaches: + +``` +@Autowired +private AsyncRabbitTemplate template; + +... + +public void doSomeWorkAndGetResultLater() { + + ... + + ListenableFuture future = this.template.convertSendAndReceive("foo"); + + // do some more work + + String reply = null; + try { + reply = future.get(); + } + catch (ExecutionException e) { + ... + } + + ... + +} + +public void doSomeWorkAndGetResultAsync() { + + ... + + RabbitConverterFuture future = this.template.convertSendAndReceive("foo"); + future.addCallback(new ListenableFutureCallback() { + + @Override + public void onSuccess(String result) { + ... + } + + @Override + public void onFailure(Throwable ex) { + ... + } + + }); + + ... + +} +``` + +If `mandatory` is set and the message cannot be delivered, the future throws an `ExecutionException` with a cause of `AmqpMessageReturnedException`, which encapsulates the returned message and information about the return. + +If `enableConfirms` is set, the future has a property called `confirm`, which is itself a `ListenableFuture` with `true` indicating a successful publish. +If the confirm future is `false`, the `RabbitFuture` has a further property called `nackCause`, which contains the reason for the failure, if available. + +| |The publisher confirm is discarded if it is received after the reply, since the reply implies a successful publish.| +|---|-------------------------------------------------------------------------------------------------------------------| + +You can set the `receiveTimeout` property on the template to time out replies (it defaults to `30000` - 30 seconds). +If a timeout occurs, the future is completed with an `AmqpReplyTimeoutException`. + +The template implements `SmartLifecycle`. +Stopping the template while there are pending replies causes the pending `Future` instances to be canceled. + +Starting with version 2.0, the asynchronous template now supports [direct reply-to](https://www.rabbitmq.com/direct-reply-to.html) instead of a configured reply queue. +To enable this feature, use one of the following constructors: + +``` +public AsyncRabbitTemplate(ConnectionFactory connectionFactory, String exchange, String routingKey) + +public AsyncRabbitTemplate(RabbitTemplate template) +``` + +See [RabbitMQ Direct reply-to](#direct-reply-to) to use direct reply-to with the synchronous `RabbitTemplate`. + +Version 2.0 introduced variants of these methods (`convertSendAndReceiveAsType`) that take an additional `ParameterizedTypeReference` argument to convert complex returned types. +You must configure the underlying `RabbitTemplate` with a `SmartMessageConverter`. +See [Converting From a `Message` With `RabbitTemplate`](#json-complex) for more information. + +##### Spring Remoting with AMQP + +| |This feature is deprecated and will be removed in 3.0.
It has been superseded for a long time by [Handling Exceptions](#annotation-error-handling) with the `returnExceptions` being set to true, and configuring a `RemoteInvocationAwareMessageConverterAdapter` on the sending side.
See [Handling Exceptions](#annotation-error-handling) for more information.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The Spring Framework has a general remoting capability, allowing [Remote Procedure Calls (RPC)](https://docs.spring.io/spring/docs/current/spring-framework-reference/html/remoting.html) that use various transports. +Spring-AMQP supports a similar mechanism with a `AmqpProxyFactoryBean` on the client and a `AmqpInvokerServiceExporter` on the server. +This provides RPC over AMQP. +On the client side, a `RabbitTemplate` is used as described [earlier](#reply-listener). +On the server side, the invoker (configured as a `MessageListener`) receives the message, invokes the configured service, and returns the reply by using the inbound message’s `replyTo` information. + +You can inject the client factory bean into any bean (by using its `serviceInterface`). +The client can then invoke methods on the proxy, resulting in remote execution over AMQP. + +| |With the default `MessageConverter` instances, the method parameters and returned value must be instances of `Serializable`.| +|---|----------------------------------------------------------------------------------------------------------------------------| + +On the server side, the `AmqpInvokerServiceExporter` has both `AmqpTemplate` and `MessageConverter` properties. +Currently, the template’s `MessageConverter` is not used. +If you need to supply a custom message converter, you should provide it by setting the `messageConverter` property. +On the client side, you can add a custom message converter to the `AmqpTemplate`, which is provided to the `AmqpProxyFactoryBean` by using its `amqpTemplate` property. + +The following listing shows sample client and server configurations: + +``` + + + + + + + + + + + + + + + + + + +``` + +``` + + + + + + + + + + + + + + + + + +``` + +| |The `AmqpInvokerServiceExporter` can process only properly formed messages, such as those sent from the `AmqpProxyFactoryBean`.
If it receives a message that it cannot interpret, a serialized `RuntimeException` is sent as a reply.
If the message has no `replyToAddress` property, the message is rejected and permanently lost if no dead letter exchange has been configured.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |By default, if the request message cannot be delivered, the calling thread eventually times out and a `RemoteProxyFailureException` is thrown.
By default, the timeout is five seconds.
You can modify that duration by setting the `replyTimeout` property on the `RabbitTemplate`.
Starting with version 1.5, by setting the `mandatory` property to `true` and enabling returns on the connection factory (see [Publisher Confirms and Returns](#cf-pub-conf-ret)), the calling thread throws an `AmqpMessageReturnedException`.
See [Reply Timeout](#reply-timeout) for more information.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 4.1.11. Configuring the Broker + +The AMQP specification describes how the protocol can be used to configure queues, exchanges, and bindings on the broker. +These operations (which are portable from the 0.8 specification and higher) are present in the `AmqpAdmin` interface in the `org.springframework.amqp.core` package. +The RabbitMQ implementation of that class is `RabbitAdmin` located in the `org.springframework.amqp.rabbit.core` package. + +The `AmqpAdmin` interface is based on using the Spring AMQP domain abstractions and is shown in the following listing: + +``` +public interface AmqpAdmin { + + // Exchange Operations + + void declareExchange(Exchange exchange); + + void deleteExchange(String exchangeName); + + // Queue Operations + + Queue declareQueue(); + + String declareQueue(Queue queue); + + void deleteQueue(String queueName); + + void deleteQueue(String queueName, boolean unused, boolean empty); + + void purgeQueue(String queueName, boolean noWait); + + // Binding Operations + + void declareBinding(Binding binding); + + void removeBinding(Binding binding); + + Properties getQueueProperties(String queueName); + +} +``` + +See also [Scoped Operations](#scoped-operations). + +The `getQueueProperties()` method returns some limited information about the queue (message count and consumer count). +The keys for the properties returned are available as constants in the `RabbitTemplate` (`QUEUE_NAME`,`QUEUE_MESSAGE_COUNT`, and `QUEUE_CONSUMER_COUNT`). +The [RabbitMQ REST API](#management-rest-api) provides much more information in the `QueueInfo` object. + +The no-arg `declareQueue()` method defines a queue on the broker with a name that is automatically generated. +The additional properties of this auto-generated queue are `exclusive=true`, `autoDelete=true`, and `durable=false`. + +The `declareQueue(Queue queue)` method takes a `Queue` object and returns the name of the declared queue. +If the `name` property of the provided `Queue` is an empty `String`, the broker declares the queue with a generated name. +That name is returned to the caller. +That name is also added to the `actualName` property of the `Queue`. +You can use this functionality programmatically only by invoking the `RabbitAdmin` directly. +When using auto-declaration by the admin when defining a queue declaratively in the application context, you can set the name property to `""` (the empty string). +The broker then creates the name. +Starting with version 2.1, listener containers can use queues of this type. +See [Containers and Broker-Named queues](#containers-and-broker-named-queues) for more information. + +This is in contrast to an `AnonymousQueue` where the framework generates a unique (`UUID`) name and sets `durable` to`false` and `exclusive`, `autoDelete` to `true`. +A `` with an empty (or missing) `name` attribute always creates an `AnonymousQueue`. + +See [`AnonymousQueue`](#anonymous-queue) to understand why `AnonymousQueue` is preferred over broker-generated queue names as well as +how to control the format of the name. +Starting with version 2.1, anonymous queues are declared with argument `Queue.X_QUEUE_LEADER_LOCATOR` set to `client-local` by default. +This ensures that the queue is declared on the node to which the application is connected. +Declarative queues must have fixed names because they might be referenced elsewhere in the context — such as in the +listener shown in the following example: + +``` + + + +``` + +See [Automatic Declaration of Exchanges, Queues, and Bindings](#automatic-declaration). + +The RabbitMQ implementation of this interface is `RabbitAdmin`, which, when configured by using Spring XML, resembles the following example: + +``` + + + +``` + +When the `CachingConnectionFactory` cache mode is `CHANNEL` (the default), the `RabbitAdmin` implementation does automatic lazy declaration of queues, exchanges, and bindings declared in the same `ApplicationContext`. +These components are declared as soon as a `Connection` is opened to the broker. +There are some namespace features that make this very convenient — for example, +in the Stocks sample application, we have the following: + +``` + + + + + + + + + + + + + + + +``` + +In the preceding example, we use anonymous queues (actually, internally, just queues with names generated by the framework, not by the broker) and refer to them by ID. +We can also declare queues with explicit names, which also serve as identifiers for their bean definitions in the context. +The following example configures a queue with an explicit name: + +``` + +``` + +| |You can provide both `id` and `name` attributes.
This lets you refer to the queue (for example, in a binding) by an ID that is independent of the queue name.
It also allows standard Spring features (such as property placeholders and SpEL expressions for the queue name).
These features are not available when you use the name as the bean identifier.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Queues can be configured with additional arguments — for example, `x-message-ttl`. +When you use the namespace support, they are provided in the form of a `Map` of argument-name/argument-value pairs, which are defined by using the `` element. +The following example shows how to do so: + +``` + + + + + + +``` + +By default, the arguments are assumed to be strings. +For arguments of other types, you must provide the type. +The following example shows how to specify the type: + +``` + + + + + +``` + +When providing arguments of mixed types, you must provide the type for each entry element. +The following example shows how to do so: + +``` + + + + 100 + + + + + +``` + +With Spring Framework 3.2 and later, this can be declared a little more succinctly, as follows: + +``` + + + + + + +``` + +When you use Java configuration, the `Queue.X_QUEUE_LEADER_LOCATOR` argument is supported as a first class property through the `setLeaderLocator()` method on the `Queue` class. +Starting with version 2.1, anonymous queues are declared with this property set to `client-local` by default. +This ensures that the queue is declared on the node the application is connected to. + +| |The RabbitMQ broker does not allow declaration of a queue with mismatched arguments.
For example, if a `queue` already exists with no `time to live` argument, and you attempt to declare it with (for example) `key="x-message-ttl" value="100"`, an exception is thrown.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +By default, the `RabbitAdmin` immediately stops processing all declarations when any exception occurs. +This could cause downstream issues, such as a listener container failing to initialize because another queue (defined after the one in error) is not declared. + +This behavior can be modified by setting the `ignore-declaration-exceptions` attribute to `true` on the `RabbitAdmin` instance. +This option instructs the `RabbitAdmin` to log the exception and continue declaring other elements. +When configuring the `RabbitAdmin` using Java, this property is called `ignoreDeclarationExceptions`. +This is a global setting that applies to all elements. +Queues, exchanges, and bindings have a similar property that applies to just those elements. + +Prior to version 1.6, this property took effect only if an `IOException` occurred on the channel, such as when there is a mismatch between current and desired properties. +Now, this property takes effect on any exception, including `TimeoutException` and others. + +In addition, any declaration exceptions result in the publishing of a `DeclarationExceptionEvent`, which is an `ApplicationEvent` that can be consumed by any `ApplicationListener` in the context. +The event contains a reference to the admin, the element that was being declared, and the `Throwable`. + +##### Headers Exchange + +Starting with version 1.3, you can configure the `HeadersExchange` to match on multiple headers. +You can also specify whether any or all headers must match. +The following example shows how to do so: + +``` + + + + + + + + + + + +``` + +Starting with version 1.6, you can configure `Exchanges` with an `internal` flag (defaults to `false`) and such an`Exchange` is properly configured on the Broker through a `RabbitAdmin` (if one is present in the application context). +If the `internal` flag is `true` for an exchange, RabbitMQ does not let clients use the exchange. +This is useful for a dead letter exchange or exchange-to-exchange binding, where you do not wish the exchange to be used +directly by publishers. + +To see how to use Java to configure the AMQP infrastructure, look at the Stock sample application, +where there is the `@Configuration` class `AbstractStockRabbitConfiguration`, which ,in turn has`RabbitClientConfiguration` and `RabbitServerConfiguration` subclasses. +The following listing shows the code for `AbstractStockRabbitConfiguration`: + +``` +@Configuration +public abstract class AbstractStockAppRabbitConfiguration { + + @Bean + public CachingConnectionFactory connectionFactory() { + CachingConnectionFactory connectionFactory = + new CachingConnectionFactory("localhost"); + connectionFactory.setUsername("guest"); + connectionFactory.setPassword("guest"); + return connectionFactory; + } + + @Bean + public RabbitTemplate rabbitTemplate() { + RabbitTemplate template = new RabbitTemplate(connectionFactory()); + template.setMessageConverter(jsonMessageConverter()); + configureRabbitTemplate(template); + return template; + } + + @Bean + public Jackson2JsonMessageConverter jsonMessageConverter() { + return new Jackson2JsonMessageConverter(); + } + + @Bean + public TopicExchange marketDataExchange() { + return new TopicExchange("app.stock.marketdata"); + } + + // additional code omitted for brevity + +} +``` + +In the Stock application, the server is configured by using the following `@Configuration` class: + +``` +@Configuration +public class RabbitServerConfiguration extends AbstractStockAppRabbitConfiguration { + + @Bean + public Queue stockRequestQueue() { + return new Queue("app.stock.request"); + } +} +``` + +This is the end of the whole inheritance chain of `@Configuration` classes. +The end result is that `TopicExchange` and `Queue` are declared to the broker upon application startup. +There is no binding of `TopicExchange` to a queue in the server configuration, as that is done in the client application. +The stock request queue, however, is automatically bound to the AMQP default exchange. +This behavior is defined by the specification. + +The client `@Configuration` class is a little more interesting. +Its declaration follows: + +``` +@Configuration +public class RabbitClientConfiguration extends AbstractStockAppRabbitConfiguration { + + @Value("${stocks.quote.pattern}") + private String marketDataRoutingKey; + + @Bean + public Queue marketDataQueue() { + return amqpAdmin().declareQueue(); + } + + /** + * Binds to the market data exchange. + * Interested in any stock quotes + * that match its routing key. + */ + @Bean + public Binding marketDataBinding() { + return BindingBuilder.bind( + marketDataQueue()).to(marketDataExchange()).with(marketDataRoutingKey); + } + + // additional code omitted for brevity + +} +``` + +The client declares another queue through the `declareQueue()` method on the `AmqpAdmin`. +It binds that queue to the market data exchange with a routing pattern that is externalized in a properties file. + +##### Builder API for Queues and Exchanges + +Version 1.6 introduces a convenient fluent API for configuring `Queue` and `Exchange` objects when using Java configuration. +The following example shows how to use it: + +``` +@Bean +public Queue queue() { + return QueueBuilder.nonDurable("foo") + .autoDelete() + .exclusive() + .withArgument("foo", "bar") + .build(); +} + +@Bean +public Exchange exchange() { + return ExchangeBuilder.directExchange("foo") + .autoDelete() + .internal() + .withArgument("foo", "bar") + .build(); +} +``` + +See the Javadoc for [`org.springframework.amqp.core.QueueBuilder`](https://docs.spring.io/spring-amqp/docs/latest-ga/api/org/springframework/amqp/core/QueueBuilder.html) and [`org.springframework.amqp.core.ExchangeBuilder`](https://docs.spring.io/spring-amqp/docs/latest-ga/api/org/springframework/amqp/core/ExchangeBuilder.html) for more information. + +Starting with version 2.0, the `ExchangeBuilder` now creates durable exchanges by default, to be consistent with the simple constructors on the individual `AbstractExchange` classes. +To make a non-durable exchange with the builder, use `.durable(false)` before invoking `.build()`. +The `durable()` method with no parameter is no longer provided. + +Version 2.2 introduced fluent APIs to add "well known" exchange and queue arguments…​ + +``` +@Bean +public Queue allArgs1() { + return QueueBuilder.nonDurable("all.args.1") + .ttl(1000) + .expires(200_000) + .maxLength(42) + .maxLengthBytes(10_000) + .overflow(Overflow.rejectPublish) + .deadLetterExchange("dlx") + .deadLetterRoutingKey("dlrk") + .maxPriority(4) + .lazy() + .leaderLocator(LeaderLocator.minLeaders) + .singleActiveConsumer() + .build(); +} + +@Bean +public DirectExchange ex() { + return ExchangeBuilder.directExchange("ex.with.alternate") + .durable(true) + .alternate("alternate") + .build(); +} +``` + +##### Declaring Collections of Exchanges, Queues, and Bindings + +You can wrap collections of `Declarable` objects (`Queue`, `Exchange`, and `Binding`) in `Declarables` objects. +The `RabbitAdmin` detects such beans (as well as discrete `Declarable` beans) in the application context, and declares the contained objects on the broker whenever a connection is established (initially and after a connection failure). +The following example shows how to do so: + +``` +@Configuration +public static class Config { + + @Bean + public CachingConnectionFactory cf() { + return new CachingConnectionFactory("localhost"); + } + + @Bean + public RabbitAdmin admin(ConnectionFactory cf) { + return new RabbitAdmin(cf); + } + + @Bean + public DirectExchange e1() { + return new DirectExchange("e1", false, true); + } + + @Bean + public Queue q1() { + return new Queue("q1", false, false, true); + } + + @Bean + public Binding b1() { + return BindingBuilder.bind(q1()).to(e1()).with("k1"); + } + + @Bean + public Declarables es() { + return new Declarables( + new DirectExchange("e2", false, true), + new DirectExchange("e3", false, true)); + } + + @Bean + public Declarables qs() { + return new Declarables( + new Queue("q2", false, false, true), + new Queue("q3", false, false, true)); + } + + @Bean + @Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE) + public Declarables prototypes() { + return new Declarables(new Queue(this.prototypeQueueName, false, false, true)); + } + + @Bean + public Declarables bs() { + return new Declarables( + new Binding("q2", DestinationType.QUEUE, "e2", "k2", null), + new Binding("q3", DestinationType.QUEUE, "e3", "k3", null)); + } + + @Bean + public Declarables ds() { + return new Declarables( + new DirectExchange("e4", false, true), + new Queue("q4", false, false, true), + new Binding("q4", DestinationType.QUEUE, "e4", "k4", null)); + } + +} +``` + +| |In versions prior to 2.1, you could declare multiple `Declarable` instances by defining beans of type `Collection`.
This can cause undesirable side effects in some cases, because the admin has to iterate over all `Collection` beans.
This feature is now disabled in favor of `Declarables`, as discussed earlier in this section.
You can revert to the previous behavior by setting the `RabbitAdmin` property called `declareCollections` to `true`.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Version 2.2 added the `getDeclarablesByType` method to `Declarables`; this can be used as a convenience, for example, when declaring the listener container bean(s). + +``` +public SimpleMessageListenerContainer container(ConnectionFactory connectionFactory, + Declarables mixedDeclarables, MessageListener listener) { + + SimpleMessageListenerContainer container = new SimpleMessageListenerContainer(connectionFactory); + container.setQueues(mixedDeclarables.getDeclarablesByType(Queue.class).toArray(new Queue[0])); + container.setMessageListener(listener); + return container; +} +``` + +##### Conditional Declaration + +By default, all queues, exchanges, and bindings are declared by all `RabbitAdmin` instances (assuming they have `auto-startup="true"`) in the application context. + +Starting with version 2.1.9, the `RabbitAdmin` has a new property `explicitDeclarationsOnly` (which is `false` by default); when this is set to `true`, the admin will only declare beans that are explicitly configured to be declared by that admin. + +| |Starting with the 1.2 release, you can conditionally declare these elements.
This is particularly useful when an application connects to multiple brokers and needs to specify with which brokers a particular element should be declared.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The classes representing these elements implement `Declarable`, which has two methods: `shouldDeclare()` and `getDeclaringAdmins()`. +The `RabbitAdmin` uses these methods to determine whether a particular instance should actually process the declarations on its `Connection`. + +The properties are available as attributes in the namespace, as shown in the following examples: + +``` + + + + + + + + + + + + + + + + + + + +``` + +| |By default, the `auto-declare` attribute is `true` and, if the `declared-by` is not supplied (or is empty), then all `RabbitAdmin` instances declare the object (as long as the admin’s `auto-startup` attribute is `true`, the default, and the admin’s `explicit-declarations-only` attribute is false).| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Similarly, you can use Java-based `@Configuration` to achieve the same effect. +In the following example, the components are declared by `admin1` but not by`admin2`: + +``` +@Bean +public RabbitAdmin admin1() { + return new RabbitAdmin(cf1()); +} + +@Bean +public RabbitAdmin admin2() { + return new RabbitAdmin(cf2()); +} + +@Bean +public Queue queue() { + Queue queue = new Queue("foo"); + queue.setAdminsThatShouldDeclare(admin1()); + return queue; +} + +@Bean +public Exchange exchange() { + DirectExchange exchange = new DirectExchange("bar"); + exchange.setAdminsThatShouldDeclare(admin1()); + return exchange; +} + +@Bean +public Binding binding() { + Binding binding = new Binding("foo", DestinationType.QUEUE, exchange().getName(), "foo", null); + binding.setAdminsThatShouldDeclare(admin1()); + return binding; +} +``` + +##### A Note On the `id` and `name` Attributes + +The `name` attribute on `` and `` elements reflects the name of the entity in the broker. +For queues, if the `name` is omitted, an anonymous queue is created (see [`AnonymousQueue`](#anonymous-queue)). + +In versions prior to 2.0, the `name` was also registered as a bean name alias (similar to `name` on `` elements). + +This caused two problems: + +* It prevented the declaration of a queue and exchange with the same name. + +* The alias was not resolved if it contained a SpEL expression (`#{…​}`). + +Starting with version 2.0, if you declare one of these elements with both an `id` *and* a `name` attribute, the name is no longer declared as a bean name alias. +If you wish to declare a queue and exchange with the same `name`, you must provide an `id`. + +There is no change if the element has only a `name` attribute. +The bean can still be referenced by the `name` — for example, in binding declarations. +However, you still cannot reference it if the name contains SpEL — you must provide an `id` for reference purposes. + +##### `AnonymousQueue` + +In general, when you need a uniquely-named, exclusive, auto-delete queue, we recommend that you use the `AnonymousQueue`instead of broker-defined queue names (using `""` as a `Queue` name causes the broker to generate the queue +name). + +This is because: + +1. The queues are actually declared when the connection to the broker is established. + This is long after the beans are created and wired together. + Beans that use the queue need to know its name. + In fact, the broker might not even be running when the application is started. + +2. If the connection to the broker is lost for some reason, the admin re-declares the `AnonymousQueue` with the same name. + If we used broker-declared queues, the queue name would change. + +You can control the format of the queue name used by `AnonymousQueue` instances. + +By default, the queue name is prefixed by `spring.gen-` followed by a base64 representation of the `UUID` — for example: `spring.gen-MRBv9sqISkuCiPfOYfpo4g`. + +You can provide an `AnonymousQueue.NamingStrategy` implementation in a constructor argument. +The following example shows how to do so: + +``` +@Bean +public Queue anon1() { + return new AnonymousQueue(); +} + +@Bean +public Queue anon2() { + return new AnonymousQueue(new AnonymousQueue.Base64UrlNamingStrategy("something-")); +} + +@Bean +public Queue anon3() { + return new AnonymousQueue(AnonymousQueue.UUIDNamingStrategy.DEFAULT); +} +``` + +The first bean generates a queue name prefixed by `spring.gen-` followed by a base64 representation of the `UUID` — for +example: `spring.gen-MRBv9sqISkuCiPfOYfpo4g`. +The second bean generates a queue name prefixed by `something-` followed by a base64 representation of the `UUID`. +The third bean generates a name by using only the UUID (no base64 conversion) — for example, `f20c818a-006b-4416-bf91-643590fedb0e`. + +The base64 encoding uses the “URL and Filename Safe Alphabet” from RFC 4648. +Trailing padding characters (`=`) are removed. + +You can provide your own naming strategy, whereby you can include other information (such as the application name or client host) in the queue name. + +You can specify the naming strategy when you use XML configuration. +The `naming-strategy` attribute is present on the `` element +for a bean reference that implements `AnonymousQueue.NamingStrategy`. +The following examples show how to specify the naming strategy in various ways: + +``` + + + + + + + + + + + +``` + +The first example creates names such as `spring.gen-MRBv9sqISkuCiPfOYfpo4g`. +The second example creates names with a String representation of a UUID. +The third example creates names such as `custom.gen-MRBv9sqISkuCiPfOYfpo4g`. + +You can also provide your own naming strategy bean. + +Starting with version 2.1, anonymous queues are declared with argument `Queue.X_QUEUE_LEADER_LOCATOR` set to `client-local` by default. +This ensures that the queue is declared on the node to which the application is connected. +You can revert to the previous behavior by calling `queue.setLeaderLocator(null)` after constructing the instance. + +##### Recovering Auto-Delete Declarations + +Normally, the `RabbitAdmin` (s) only recover queues/exchanges/bindings that are declared as beans in the application context; if any such declarations are auto-delete, they will be removed by the broker if the connection is lost. +When the connection is re-established, the admin will redeclare the entities. +Normally, entities created by calling `admin.declareQueue(…​)`, `admin.declareExchange(…​)` and `admin.declareBinding(…​)` will not be recovered. + +Starting with version 2.4, the admin has a new property `redeclareManualDeclarations`; when true, the admin will recover these entities in addition to the beans in the application context. + +Recovery of individual declarations will not be performed if `deleteQueue(…​)`, `deleteExchange(…​)` or `removeBinding(…​)` is called. +Associated bindings are removed from the recoverable entities when queues and exchanges are deleted. + +Finally, calling `resetAllManualDeclarations()` will prevent the recovery of any previously declared entities. + +#### 4.1.12. Broker Event Listener + +When the [Event Exchange Plugin](https://www.rabbitmq.com/event-exchange.html) is enabled, if you add a bean of type `BrokerEventListener` to the application context, it publishes selected broker events as `BrokerEvent` instances, which can be consumed with a normal Spring `ApplicationListener` or `@EventListener` method. +Events are published by the broker to a topic exchange `amq.rabbitmq.event` with a different routing key for each event type. +The listener uses event keys, which are used to bind an `AnonymousQueue` to the exchange so the listener receives only selected events. +Since it is a topic exchange, wildcards can be used (as well as explicitly requesting specific events), as the following example shows: + +``` +@Bean +public BrokerEventListener eventListener() { + return new BrokerEventListener(connectionFactory(), "user.deleted", "channel.#", "queue.#"); +} +``` + +You can further narrow the received events in individual event listeners, by using normal Spring techniques, as the following example shows: + +``` +@EventListener(condition = "event.eventType == 'queue.created'") +public void listener(BrokerEvent event) { + ... +} +``` + +#### 4.1.13. Delayed Message Exchange + +Version 1.6 introduces support for the[Delayed Message Exchange Plugin](https://www.rabbitmq.com/blog/2015/04/16/scheduling-messages-with-rabbitmq/) + +| |The plugin is currently marked as experimental but has been available for over a year (at the time of writing).
If changes to the plugin make it necessary, we plan to add support for such changes as soon as practical.
For that reason, this support in Spring AMQP should be considered experimental, too.
This functionality was tested with RabbitMQ 3.6.0 and version 0.0.1 of the plugin.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +To use a `RabbitAdmin` to declare an exchange as delayed, you can set the `delayed` property on the exchange bean to`true`. +The `RabbitAdmin` uses the exchange type (`Direct`, `Fanout`, and so on) to set the `x-delayed-type` argument and +declare the exchange with type `x-delayed-message`. + +The `delayed` property (default: `false`) is also available when configuring exchange beans using XML. +The following example shows how to use it: + +``` + +``` + +To send a delayed message, you can set the `x-delay` header through `MessageProperties`, as the following examples show: + +``` +MessageProperties properties = new MessageProperties(); +properties.setDelay(15000); +template.send(exchange, routingKey, + MessageBuilder.withBody("foo".getBytes()).andProperties(properties).build()); +``` + +``` +rabbitTemplate.convertAndSend(exchange, routingKey, "foo", new MessagePostProcessor() { + + @Override + public Message postProcessMessage(Message message) throws AmqpException { + message.getMessageProperties().setDelay(15000); + return message; + } + +}); +``` + +To check if a message was delayed, use the `getReceivedDelay()` method on the `MessageProperties`. +It is a separate property to avoid unintended propagation to an output message generated from an input message. + +#### 4.1.14. RabbitMQ REST API + +When the management plugin is enabled, the RabbitMQ server exposes a REST API to monitor and configure the broker. +A [Java Binding for the API](https://github.com/rabbitmq/hop) is now provided. +The `com.rabbitmq.http.client.Client` is a standard, immediate, and, therefore, blocking API. +It is based on the [Spring Web](https://docs.spring.io/spring/docs/current/spring-framework-reference/web.html#spring-web) module and its `RestTemplate` implementation. +On the other hand, the `com.rabbitmq.http.client.ReactorNettyClient` is a reactive, non-blocking implementation based on the [Reactor Netty](https://projectreactor.io/docs/netty/release/reference/docs/index.html) project. + +The hop dependency (`com.rabbitmq:http-client`) is now also `optional`. + +See their Javadoc for more information. + +#### 4.1.15. Exception Handling + +Many operations with the RabbitMQ Java client can throw checked exceptions. +For example, there are a lot of cases where `IOException` instances may be thrown. +The `RabbitTemplate`, `SimpleMessageListenerContainer`, and other Spring AMQP components catch those exceptions and convert them into one of the exceptions within `AmqpException` hierarchy. +Those are defined in the 'org.springframework.amqp' package, and `AmqpException` is the base of the hierarchy. + +When a listener throws an exception, it is wrapped in a `ListenerExecutionFailedException`. +Normally the message is rejected and requeued by the broker. +Setting `defaultRequeueRejected` to `false` causes messages to be discarded (or routed to a dead letter exchange). +As discussed in [Message Listeners and the Asynchronous Case](#async-listeners), the listener can throw an `AmqpRejectAndDontRequeueException` (or `ImmediateRequeueAmqpException`) to conditionally control this behavior. + +However, there is a class of errors where the listener cannot control the behavior. +When a message that cannot be converted is encountered (for example, an invalid `content_encoding` header), some exceptions are thrown before the message reaches user code. +With `defaultRequeueRejected` set to `true` (default) (or throwing an `ImmediateRequeueAmqpException`), such messages would be redelivered over and over. +Before version 1.3.2, users needed to write a custom `ErrorHandler`, as discussed in [Exception Handling](#exception-handling), to avoid this situation. + +Starting with version 1.3.2, the default `ErrorHandler` is now a `ConditionalRejectingErrorHandler` that rejects (and does not requeue) messages that fail with an irrecoverable error. +Specifically, it rejects messages that fail with the following errors: + +* `o.s.amqp…​MessageConversionException`: Can be thrown when converting the incoming message payload using a `MessageConverter`. + +* `o.s.messaging…​MessageConversionException`: Can be thrown by the conversion service if additional conversion is required when mapping to a `@RabbitListener` method. + +* `o.s.messaging…​MethodArgumentNotValidException`: Can be thrown if validation (for example, `@Valid`) is used in the listener and the validation fails. + +* `o.s.messaging…​MethodArgumentTypeMismatchException`: Can be thrown if the inbound message was converted to a type that is not correct for the target method. + For example, the parameter is declared as `Message` but `Message` is received. + +* `java.lang.NoSuchMethodException`: Added in version 1.6.3. + +* `java.lang.ClassCastException`: Added in version 1.6.3. + +You can configure an instance of this error handler with a `FatalExceptionStrategy` so that users can provide their own rules for conditional message rejection — for example, a delegate implementation to the `BinaryExceptionClassifier` from Spring Retry ([Message Listeners and the Asynchronous Case](#async-listeners)). +In addition, the `ListenerExecutionFailedException` now has a `failedMessage` property that you can use in the decision. +If the `FatalExceptionStrategy.isFatal()` method returns `true`, the error handler throws an `AmqpRejectAndDontRequeueException`. +The default `FatalExceptionStrategy` logs a warning message when an exception is determined to be fatal. + +Since version 1.6.3, a convenient way to add user exceptions to the fatal list is to subclass `ConditionalRejectingErrorHandler.DefaultExceptionStrategy` and override the `isUserCauseFatal(Throwable cause)` method to return `true` for fatal exceptions. + +A common pattern for handling DLQ messages is to set a `time-to-live` on those messages as well as additional DLQ configuration such that these messages expire and are routed back to the main queue for retry. +The problem with this technique is that messages that cause fatal exceptions loop forever. +Starting with version 2.1, the `ConditionalRejectingErrorHandler` detects an `x-death` header on a message that causes a fatal exception to be thrown. +The message is logged and discarded. +You can revert to the previous behavior by setting the `discardFatalsWithXDeath` property on the `ConditionalRejectingErrorHandler` to `false`. + +| |Starting with version 2.1.9, messages with these fatal exceptions are rejected and NOT requeued by default, even if the container acknowledge mode is MANUAL.
These exceptions generally occur before the listener is invoked so the listener does not have a chance to ack or nack the message so it remained in the queue in an un-acked state.
To revert to the previous behavior, set the `rejectManual` property on the `ConditionalRejectingErrorHandler` to `false`.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 4.1.16. Transactions + +The Spring Rabbit framework has support for automatic transaction management in the synchronous and asynchronous use cases with a number of different semantics that can be selected declaratively, as is familiar to existing users of Spring transactions. +This makes many if not most common messaging patterns easy to implement. + +There are two ways to signal the desired transaction semantics to the framework. +In both the `RabbitTemplate` and `SimpleMessageListenerContainer`, there is a flag `channelTransacted` which, if `true`, tells the framework to use a transactional channel and to end all operations (send or receive) with a commit or rollback (depending on the outcome), with an exception signaling a rollback. +Another signal is to provide an external transaction with one of Spring’s `PlatformTransactionManager` implementations as a context for the ongoing operation. +If there is already a transaction in progress when the framework is sending or receiving a message, and the `channelTransacted` flag is `true`, the commit or rollback of the messaging transaction is deferred until the end of the current transaction. +If the `channelTransacted` flag is `false`, no transaction semantics apply to the messaging operation (it is auto-acked). + +The `channelTransacted` flag is a configuration time setting. +It is declared and processed once when the AMQP components are created, usually at application startup. +The external transaction is more dynamic in principle because the system responds to the current thread state at runtime. +However, in practice, it is often also a configuration setting, when the transactions are layered onto an application declaratively. + +For synchronous use cases with `RabbitTemplate`, the external transaction is provided by the caller, either declaratively or imperatively according to taste (the usual Spring transaction model). +The following example shows a declarative approach (usually preferred because it is non-invasive), where the template has been configured with `channelTransacted=true`: + +``` +@Transactional +public void doSomething() { + String incoming = rabbitTemplate.receiveAndConvert(); + // do some more database processing... + String outgoing = processInDatabaseAndExtractReply(incoming); + rabbitTemplate.convertAndSend(outgoing); +} +``` + +In the preceding example, a `String` payload is received, converted, and sent as a message body inside a method marked as `@Transactional`. +If the database processing fails with an exception, the incoming message is returned to the broker, and the outgoing message is not sent. +This applies to any operations with the `RabbitTemplate` inside a chain of transactional methods (unless, for instance, the `Channel` is directly manipulated to commit the transaction early). + +For asynchronous use cases with `SimpleMessageListenerContainer`, if an external transaction is needed, it has to be requested by the container when it sets up the listener. +To signal that an external transaction is required, the user provides an implementation of `PlatformTransactionManager` to the container when it is configured. +The following example shows how to do so: + +``` +@Configuration +public class ExampleExternalTransactionAmqpConfiguration { + + @Bean + public SimpleMessageListenerContainer messageListenerContainer() { + SimpleMessageListenerContainer container = new SimpleMessageListenerContainer(); + container.setConnectionFactory(rabbitConnectionFactory()); + container.setTransactionManager(transactionManager()); + container.setChannelTransacted(true); + container.setQueueName("some.queue"); + container.setMessageListener(exampleListener()); + return container; + } + +} +``` + +In the preceding example, the transaction manager is added as a dependency injected from another bean definition (not shown), and the `channelTransacted` flag is also set to `true`. +The effect is that if the listener fails with an exception, the transaction is rolled back, and the message is also returned to the broker. +Significantly, if the transaction fails to commit (for example, because of +a database constraint error or connectivity problem), the AMQP transaction is also rolled back, and the message is returned to the broker. +This is sometimes known as a “Best Efforts 1 Phase Commit”, and is a very powerful pattern for reliable messaging. +If the `channelTransacted` flag was set to `false` (the default) in the preceding example, the external transaction would still be provided for the listener, but all messaging operations would be auto-acked, so the effect is to commit the messaging operations even on a rollback of the business operation. + +##### Conditional Rollback + +Prior to version 1.6.6, adding a rollback rule to a container’s `transactionAttribute` when using an external transaction manager (such as JDBC) had no effect. +Exceptions always rolled back the transaction. + +Also, when using a [transaction advice](https://docs.spring.io/spring-framework/docs/current/spring-framework-reference/html/transaction.html#transaction-declarative) in the container’s advice chain, conditional rollback was not very useful, because all listener exceptions are wrapped in a `ListenerExecutionFailedException`. + +The first problem has been corrected, and the rules are now applied properly. +Further, the `ListenerFailedRuleBasedTransactionAttribute` is now provided. +It is a subclass of `RuleBasedTransactionAttribute`, with the only difference being that it is aware of the `ListenerExecutionFailedException` and uses the cause of such exceptions for the rule. +This transaction attribute can be used directly in the container or through a transaction advice. + +The following example uses this rule: + +``` +@Bean +public AbstractMessageListenerContainer container() { + ... + container.setTransactionManager(transactionManager); + RuleBasedTransactionAttribute transactionAttribute = + new ListenerFailedRuleBasedTransactionAttribute(); + transactionAttribute.setRollbackRules(Collections.singletonList( + new NoRollbackRuleAttribute(DontRollBackException.class))); + container.setTransactionAttribute(transactionAttribute); + ... +} +``` + +##### A note on Rollback of Received Messages + +AMQP transactions apply only to messages and acks sent to the broker. +Consequently, when there is a rollback of a Spring transaction and a message has been received, Spring AMQP has to not only rollback the transaction but also manually reject the message (sort of a nack, but that is not what the specification calls it). +The action taken on message rejection is independent of transactions and depends on the `defaultRequeueRejected` property (default: `true`). +For more information about rejecting failed messages, see [Message Listeners and the Asynchronous Case](#async-listeners). + +For more information about RabbitMQ transactions and their limitations, see [RabbitMQ Broker Semantics](https://www.rabbitmq.com/semantics.html). + +| |Prior to RabbitMQ 2.7.0, such messages (and any that are unacked when a channel is closed or aborts) went to the back of the queue on a Rabbit broker.
Since 2.7.0, rejected messages go to the front of the queue, in a similar manner to JMS rolled back messages.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Previously, message requeue on transaction rollback was inconsistent between local transactions and when a `TransactionManager` was provided.
In the former case, the normal requeue logic (`AmqpRejectAndDontRequeueException` or `defaultRequeueRejected=false`) applied (see [Message Listeners and the Asynchronous Case](#async-listeners)).
With a transaction manager, the message was unconditionally requeued on rollback.
Starting with version 2.0, the behavior is consistent and the normal requeue logic is applied in both cases.
To revert to the previous behavior, you can set the container’s `alwaysRequeueWithTxManagerRollback` property to `true`.
See [Message Listener Container Configuration](#containerAttributes).| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Using `RabbitTransactionManager` + +The [RabbitTransactionManager](https://docs.spring.io/spring-amqp/docs/latest_ga/api/org/springframework/amqp/rabbit/transaction/RabbitTransactionManager.html) is an alternative to executing Rabbit operations within, and synchronized with, external transactions. +This transaction manager is an implementation of the [`PlatformTransactionManager`](https://docs.spring.io/spring/docs/current/javadoc-api/org/springframework/transaction/PlatformTransactionManager.html) interface and should be used with a single Rabbit `ConnectionFactory`. + +| |This strategy is not able to provide XA transactions — for example, in order to share transactions between messaging and database access.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------| + +Application code is required to retrieve the transactional Rabbit resources through `ConnectionFactoryUtils.getTransactionalResourceHolder(ConnectionFactory, boolean)` instead of a standard `Connection.createChannel()` call with subsequent channel creation. +When using Spring AMQP’s [RabbitTemplate](https://docs.spring.io/spring-amqp/docs/latest_ga/api/org/springframework/amqp/rabbit/core/RabbitTemplate.html), it will autodetect a thread-bound Channel and automatically participate in its transaction. + +With Java Configuration, you can setup a new RabbitTransactionManager by using the following bean: + +``` +@Bean +public RabbitTransactionManager rabbitTransactionManager() { + return new RabbitTransactionManager(connectionFactory); +} +``` + +If you prefer XML configuration, you can declare the following bean in your XML Application Context file: + +``` + + + +``` + +##### Transaction Synchronization + +Synchronizing a RabbitMQ transaction with some other (e.g. DBMS) transaction provides "Best Effort One Phase Commit" semantics. +It is possible that the RabbitMQ transaction fails to commit during the after completion phase of transaction synchronization. +This is logged by the `spring-tx` infrastructure as an error, but no exception is thrown to the calling code. +Starting with version 2.3.10, you can call `ConnectionUtils.checkAfterCompletion()` after the transaction has committed on the same thread that processed the transaction. +It will simply return if no exception occurred; otherwise it will throw an `AfterCompletionFailedException` which will have a property representing the synchronization status of the completion. + +Enable this feature by calling `ConnectionFactoryUtils.enableAfterCompletionFailureCapture(true)`; this is a global flag and applies to all threads. + +#### 4.1.17. Message Listener Container Configuration + +There are quite a few options for configuring a `SimpleMessageListenerContainer` (SMLC) and a `DirectMessageListenerContainer` (DMLC) related to transactions and quality of service, and some of them interact with each other. +Properties that apply to the SMLC, DMLC, or `StreamListenerContainer` (StLC) (see [Using the RabbitMQ Stream Plugin](#stream-support) are indicated by the check mark in the appropriate column. +See [Choosing a Container](#choose-container) for information to help you decide which container is appropriate for your application. + +The following table shows the container property names and their equivalent attribute names (in parentheses) when using the namespace to configure a ``. +The `type` attribute on that element can be `simple` (default) or `direct` to specify an `SMLC` or `DMLC` respectively. +Some properties are not exposed by the namespace. +These are indicated by `N/A` for the attribute. + +| Property
(Attribute) | Description | SMLC | DMLC | StLC | +|-----------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------|--------------------------------|--------------------------------| +| | | +| | | +| | | +| | | +| | | +| | | +| | Prior to version 1.6, if there was more than one admin in the context, the container would randomly select one.
If there were no admins, it would create one internally.
In either case, this could cause unexpected results.
Starting with version 1.6, for `autoDeclare` to work, there must be exactly one `RabbitAdmin` in the context, or a reference to a specific instance must be configured on the container using the `rabbitAdmin` property. | | | | +| | +| | | | +| | | +| | | +| | | | +| | | | +| | | +| | | | +| | | | +| | | | +| | +| | | | +| | | +| | | +| | | +| | | +| | | | +| | | +| | | +| | | +| | | +| | | +| | | +| | | +| (group) | This is available only when using the namespace.
When specified, a bean of type `Collection` is registered with this name, and the
container for each `` element is added to the collection.
This allows, for example, starting and stopping the group of containers by iterating over the collection.
If multiple `` elements have the same group value, the containers in the collection form
an aggregate of all containers so designated. |![tickmark](https://docs.spring.io/spring-amqp/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-amqp/docs/current/reference/html/images/tickmark.png)| | +| | | +| | | +| | | | +| | | +| | | +| | If the broker is not available during initial startup, the container starts and the conditions are checked when the connection is established. | | | | +| | The check is done against all queues in the context, not just the queues that a particular listener is configured to use.
If you wish to limit the checks to just those queues used by a container, you should configure a separate `RabbitAdmin` for the container, and provide a reference to it using the `rabbitAdmin` property.
See [Conditional Declaration](#conditional-declaration) for more information. | | | | +| | Mismatched queue argument detection is disabled while starting a container for a `@RabbitListener` in a bean that is marked `@Lazy`.
This is to avoid a potential deadlock which can delay the start of such containers for up to 60 seconds.
Applications using lazy listener beans should check the queue arguments before getting a reference to the lazy bean. | | | | +| | | +| | Missing queue detection is disabled while starting a container for a `@RabbitListener` in a bean that is marked `@Lazy`.
This is to avoid a potential deadlock which can delay the start of such containers for up to 60 seconds.
Applications using lazy listener beans should check the queue(s) before getting a reference to the lazy bean. | | | | +| | | +| | | +| | | +|| | +| | | +| | There are scenarios where the prefetch value should
be low — for example, with large messages, especially if the processing is slow (messages could add up
to a large amount of memory in the client process), and if strict message ordering is necessary
(the prefetch value should be set back to 1 in this case).
Also, with low-volume messaging and multiple consumers (including concurrency within a single listener container instance), you may wish to reduce the prefetch to get a more even distribution of messages across consumers. | | | | +| | | +| | | | +| | | +| | | +| | | | +| | | +| | | | +| | | +| | | | +| | +| | | +| | | +| | | + +#### 4.1.18. Listener Concurrency + +##### SimpleMessageListenerContainer + +By default, the listener container starts a single consumer that receives messages from the queues. + +When examining the table in the previous section, you can see a number of properties and attributes that control concurrency. +The simplest is `concurrentConsumers`, which creates that (fixed) number of consumers that concurrently process messages. + +Prior to version 1.3.0, this was the only setting available and the container had to be stopped and started again to change the setting. + +Since version 1.3.0, you can now dynamically adjust the `concurrentConsumers` property. +If it is changed while the container is running, consumers are added or removed as necessary to adjust to the new setting. + +In addition, a new property called `maxConcurrentConsumers` has been added and the container dynamically adjusts the concurrency based on workload. +This works in conjunction with four additional properties: `consecutiveActiveTrigger`, `startConsumerMinInterval`, `consecutiveIdleTrigger`, and `stopConsumerMinInterval`. +With the default settings, the algorithm to increase consumers works as follows: + +If the `maxConcurrentConsumers` has not been reached and an existing consumer is active for ten consecutive cycles AND at least 10 seconds has elapsed since the last consumer was started, a new consumer is started. +A consumer is considered active if it received at least one message in `batchSize` \* `receiveTimeout` milliseconds. + +With the default settings, the algorithm to decrease consumers works as follows: + +If there are more than `concurrentConsumers` running and a consumer detects ten consecutive timeouts (idle) AND the last consumer was stopped at least 60 seconds ago, a consumer is stopped. +The timeout depends on the `receiveTimeout` and the `batchSize` properties. +A consumer is considered idle if it receives no messages in `batchSize` \* `receiveTimeout` milliseconds. +So, with the default timeout (one second) and a `batchSize` of four, stopping a consumer is considered after 40 seconds of idle time (four timeouts correspond to one idle detection). + +| |Practically, consumers can be stopped only if the whole container is idle for some time.
This is because the broker shares its work across all the active consumers.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Each consumer uses a single channel, regardless of the number of configured queues. + +Starting with version 2.0, the `concurrentConsumers` and `maxConcurrentConsumers` properties can be set with the `concurrency` property — for example, `2-4`. + +##### Using `DirectMessageListenerContainer` + +With this container, concurrency is based on the configured queues and `consumersPerQueue`. +Each consumer for each queue uses a separate channel, and the concurrency is controlled by the rabbit client library. +By default, at the time of writing, it uses a pool of `DEFAULT_NUM_THREADS = Runtime.getRuntime().availableProcessors() * 2` threads. + +You can configure a `taskExecutor` to provide the required maximum concurrency. + +#### 4.1.19. Exclusive Consumer + +Starting with version 1.3, you can configure the listener container with a single exclusive consumer. +This prevents other containers from consuming from the queues until the current consumer is cancelled. +The concurrency of such a container must be `1`. + +When using exclusive consumers, other containers try to consume from the queues according to the `recoveryInterval` property and log a `WARN` message if the attempt fails. + +#### 4.1.20. Listener Container Queues + +Version 1.3 introduced a number of improvements for handling multiple queues in a listener container. + +The container must be configured to listen on at least one queue. +This was the case previously, too, but now queues can be added and removed at runtime. +The container recycles (cancels and re-creates) the consumers when any pre-fetched messages have been processed. +See the [Javadoc](https://docs.spring.io/spring-amqp/docs/latest-ga/api/org/springframework/amqp/rabbit/listener/AbstractMessageListenerContainer.html) for the `addQueues`, `addQueueNames`, `removeQueues` and `removeQueueNames` methods. +When removing queues, at least one queue must remain. + +A consumer now starts if any of its queues are available. +Previously, the container would stop if any queues were unavailable. +Now, this is only the case if none of the queues are available. +If not all queues are available, the container tries to passively declare (and consume from) the missing queues every 60 seconds. + +Also, if a consumer receives a cancel from the broker (for example, if a queue is deleted) the consumer tries to recover, and the recovered consumer continues to process messages from any other configured queues. +Previously, a cancel on one queue cancelled the entire consumer and, eventually, the container would stop due to the missing queue. + +If you wish to permanently remove a queue, you should update the container before or after deleting to queue, to avoid future attempts trying to consume from it. + +#### 4.1.21. Resilience: Recovering from Errors and Broker Failures + +Some of the key (and most popular) high-level features that Spring AMQP provides are to do with recovery and automatic re-connection in the event of a protocol error or broker failure. +We have seen all the relevant components already in this guide, but it should help to bring them all together here and call out the features and recovery scenarios individually. + +The primary reconnection features are enabled by the `CachingConnectionFactory` itself. +It is also often beneficial to use the `RabbitAdmin` auto-declaration features. +In addition, if you care about guaranteed delivery, you probably also need to use the `channelTransacted` flag in `RabbitTemplate` and `SimpleMessageListenerContainer` and the `AcknowledgeMode.AUTO` (or manual if you do the acks yourself) in the `SimpleMessageListenerContainer`. + +##### Automatic Declaration of Exchanges, Queues, and Bindings + +The `RabbitAdmin` component can declare exchanges, queues, and bindings on startup. +It does this lazily, through a `ConnectionListener`. +Consequently, if the broker is not present on startup, it does not matter. +The first time a `Connection` is used (for example, +by sending a message) the listener fires and the admin features is applied. +A further benefit of doing the auto declarations in a listener is that, if the connection is dropped for any reason (for example, +broker death, network glitch, and others), they are applied again when the connection is re-established. + +| |Queues declared this way must have fixed names — either explicitly declared or generated by the framework for `AnonymousQueue` instances.
Anonymous queues are non-durable, exclusive, and auto-deleting.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Automatic declaration is performed only when the `CachingConnectionFactory` cache mode is `CHANNEL` (the default).
This limitation exists because exclusive and auto-delete queues are bound to the connection.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Starting with version 2.2.2, the `RabbitAdmin` will detect beans of type `DeclarableCustomizer` and apply the function before actually processing the declaration. +This is useful, for example, to set a new argument (property) before it has first class support within the framework. + +``` +@Bean +public DeclarableCustomizer customizer() { + return dec -> { + if (dec instanceof Queue && ((Queue) dec).getName().equals("my.queue")) { + dec.addArgument("some.new.queue.argument", true); + } + return dec; + }; +} +``` + +It is also useful in projects that don’t provide direct access to the `Declarable` bean definitions. + +See also [RabbitMQ Automatic Connection/Topology recovery](#auto-recovery). + +##### Failures in Synchronous Operations and Options for Retry + +If you lose your connection to the broker in a synchronous sequence when using `RabbitTemplate` (for instance), Spring AMQP throws an `AmqpException` (usually, but not always, `AmqpIOException`). +We do not try to hide the fact that there was a problem, so you have to be able to catch and respond to the exception. +The easiest thing to do if you suspect that the connection was lost (and it was not your fault) is to try the operation again. +You can do this manually, or you could look at using Spring Retry to handle the retry (imperatively or declaratively). + +Spring Retry provides a couple of AOP interceptors and a great deal of flexibility to specify the parameters of the retry (number of attempts, exception types, backoff algorithm, and others). +Spring AMQP also provides some convenience factory beans for creating Spring Retry interceptors in a convenient form for AMQP use cases, with strongly typed callback interfaces that you can use to implement custom recovery logic. +See the Javadoc and properties of `StatefulRetryOperationsInterceptor` and `StatelessRetryOperationsInterceptor` for more detail. +Stateless retry is appropriate if there is no transaction or if a transaction is started inside the retry callback. +Note that stateless retry is simpler to configure and analyze than stateful retry, but it is not usually appropriate if there is an ongoing transaction that must be rolled back or definitely is going to roll back. +A dropped connection in the middle of a transaction should have the same effect as a rollback. +Consequently, for reconnections where the transaction is started higher up the stack, stateful retry is usually the best choice. +Stateful retry needs a mechanism to uniquely identify a message. +The simplest approach is to have the sender put a unique value in the `MessageId` message property. +The provided message converters provide an option to do this: you can set `createMessageIds` to `true`. +Otherwise, you can inject a `MessageKeyGenerator` implementation into the interceptor. +The key generator must return a unique key for each message. +In versions prior to version 2.0, a `MissingMessageIdAdvice` was provided. +It enabled messages without a `messageId` property to be retried exactly once (ignoring the retry settings). +This advice is no longer provided, since, along with `spring-retry` version 1.2, its functionality is built into the interceptor and message listener containers. + +| |For backwards compatibility, a message with a null message ID is considered fatal for the consumer (consumer is stopped) by default (after one retry).
To replicate the functionality provided by the `MissingMessageIdAdvice`, you can set the `statefulRetryFatalWithNullMessageId` property to `false` on the listener container.
With that setting, the consumer continues to run and the message is rejected (after one retry).
It is discarded or routed to the dead letter queue (if one is configured).| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Starting with version 1.3, a builder API is provided to aid in assembling these interceptors by using Java (in `@Configuration` classes). +The following example shows how to do so: + +``` +@Bean +public StatefulRetryOperationsInterceptor interceptor() { + return RetryInterceptorBuilder.stateful() + .maxAttempts(5) + .backOffOptions(1000, 2.0, 10000) // initialInterval, multiplier, maxInterval + .build(); +} +``` + +Only a subset of retry capabilities can be configured this way. +More advanced features would need the configuration of a `RetryTemplate` as a Spring bean. +See the [Spring Retry Javadoc](https://docs.spring.io/spring-retry/docs/api/current/) for complete information about available policies and their configuration. + +##### Retry with Batch Listeners + +It is not recommended to configure retry with a batch listener, unless the batch was created by the producer, in a single record. +See [Batched Messages](#de-batching) for information about consumer and producer-created batches. +With a consumer-created batch, the framework has no knowledge about which message in the batch caused the failure so recovery after the retries are exhausted is not possible. +With producer-created batches, since there is only one message that actually failed, the whole message can be recovered. +Applications may want to inform a custom recoverer where in the batch the failure occurred, perhaps by setting an index property of the thrown exception. + +A retry recoverer for a batch listener must implement `MessageBatchRecoverer`. + +##### Message Listeners and the Asynchronous Case + +If a `MessageListener` fails because of a business exception, the exception is handled by the message listener container, which then goes back to listening for another message. +If the failure is caused by a dropped connection (not a business exception), the consumer that is collecting messages for the listener has to be cancelled and restarted. +The `SimpleMessageListenerContainer` handles this seamlessly, and it leaves a log to say that the listener is being restarted. +In fact, it loops endlessly, trying to restart the consumer. +Only if the consumer is very badly behaved indeed will it give up. +One side effect is that if the broker is down when the container starts, it keeps trying until a connection can be established. + +Business exception handling, as opposed to protocol errors and dropped connections, might need more thought and some custom configuration, especially if transactions or container acks are in use. +Prior to 2.8.x, RabbitMQ had no definition of dead letter behavior. +Consequently, by default, a message that is rejected or rolled back because of a business exception can be redelivered endlessly. +To put a limit on the client on the number of re-deliveries, one choice is a `StatefulRetryOperationsInterceptor` in the advice chain of the listener. +The interceptor can have a recovery callback that implements a custom dead letter action — whatever is appropriate for your particular environment. + +Another alternative is to set the container’s `defaultRequeueRejected` property to `false`. +This causes all failed messages to be discarded. +When using RabbitMQ 2.8.x or higher, this also facilitates delivering the message to a dead letter exchange. + +Alternatively, you can throw a `AmqpRejectAndDontRequeueException`. +Doing so prevents message requeuing, regardless of the setting of the `defaultRequeueRejected` property. + +Starting with version 2.1, an `ImmediateRequeueAmqpException` is introduced to perform exactly the opposite logic: the message will be requeued, regardless of the setting of the `defaultRequeueRejected` property. + +Often, a combination of both techniques is used. +You can use a `StatefulRetryOperationsInterceptor` in the advice chain with a `MessageRecoverer` that throws an `AmqpRejectAndDontRequeueException`. +The `MessageRecover` is called when all retries have been exhausted. +The `RejectAndDontRequeueRecoverer` does exactly that. +The default `MessageRecoverer` consumes the errant message and emits a `WARN` message. + +Starting with version 1.3, a new `RepublishMessageRecoverer` is provided, to allow publishing of failed messages after retries are exhausted. + +When a recoverer consumes the final exception, the message is ack’d and is not sent to the dead letter exchange, if any. + +| |When `RepublishMessageRecoverer` is used on the consumer side, the received message has `deliveryMode` in the `receivedDeliveryMode` message property.
In this case the `deliveryMode` is `null`.
That means a `NON_PERSISTENT` delivery mode on the broker.
Starting with version 2.0, you can configure the `RepublishMessageRecoverer` for the `deliveryMode` to set into the message to republish if it is `null`.
By default, it uses `MessageProperties` default value - `MessageDeliveryMode.PERSISTENT`.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The following example shows how to set a `RepublishMessageRecoverer` as the recoverer: + +``` +@Bean +RetryOperationsInterceptor interceptor() { + return RetryInterceptorBuilder.stateless() + .maxAttempts(5) + .recoverer(new RepublishMessageRecoverer(amqpTemplate(), "something", "somethingelse")) + .build(); +} +``` + +The `RepublishMessageRecoverer` publishes the message with additional information in message headers, such as the exception message, stack trace, original exchange, and routing key. +Additional headers can be added by creating a subclass and overriding `additionalHeaders()`. +The `deliveryMode` (or any other properties) can also be changed in the `additionalHeaders()`, as the following example shows: + +``` +RepublishMessageRecoverer recoverer = new RepublishMessageRecoverer(amqpTemplate, "error") { + + protected Map additionalHeaders(Message message, Throwable cause) { + message.getMessageProperties() + .setDeliveryMode(message.getMessageProperties().getReceivedDeliveryMode()); + return null; + } + +}; +``` + +Starting with version 2.0.5, the stack trace may be truncated if it is too large; this is because all headers have to fit in a single frame. +By default, if the stack trace would cause less than 20,000 bytes ('headroom') to be available for other headers, it will be truncated. +This can be adjusted by setting the recoverer’s `frameMaxHeadroom` property, if you need more or less space for other headers. +Starting with versions 2.1.13, 2.2.3, the exception message is included in this calculation, and the amount of stack trace will be maximized using the following algorithm: + +* if the stack trace alone would exceed the limit, the exception message header will be truncated to 97 bytes plus `…​` and the stack trace is truncated too. + +* if the stack trace is small, the message will be truncated (plus `…​`) to fit in the available bytes (but the message within the stack trace itself is truncated to 97 bytes plus `…​`). + +Whenever a truncation of any kind occurs, the original exception will be logged to retain the complete information. + +Starting with version 2.3.3, a new subclass `RepublishMessageRecovererWithConfirms` is provided; this supports both styles of publisher confirms and will wait for the confirmation before returning (or throw an exception if not confirmed or the message is returned). + +If the confirm type is `CORRELATED`, the subclass will also detect if a message is returned and throw an `AmqpMessageReturnedException`; if the publication is negatively acknowledged, it will throw an `AmqpNackReceivedException`. + +If the confirm type is `SIMPLE`, the subclass will invoke the `waitForConfirmsOrDie` method on the channel. + +See [Publisher Confirms and Returns](#cf-pub-conf-ret) for more information about confirms and returns. + +Starting with version 2.1, an `ImmediateRequeueMessageRecoverer` is added to throw an `ImmediateRequeueAmqpException`, which notifies a listener container to requeue the current failed message. + +##### Exception Classification for Spring Retry + +Spring Retry has a great deal of flexibility for determining which exceptions can invoke retry. +The default configuration retries for all exceptions. +Given that user exceptions are wrapped in a `ListenerExecutionFailedException`, we need to ensure that the classification examines the exception causes. +The default classifier looks only at the top level exception. + +Since Spring Retry 1.0.3, the `BinaryExceptionClassifier` has a property called `traverseCauses` (default: `false`). +When `true`, it travers exception causes until it finds a match or there is no cause. + +To use this classifier for retry, you can use a `SimpleRetryPolicy` created with the constructor that takes the max attempts, the `Map` of `Exception` instances, and the boolean (`traverseCauses`) and inject this policy into the `RetryTemplate`. + +#### Support + +Version 2.3 added more convenience when communicating between a single application and multiple brokers or broker clusters. +The main benefit, on the consumer side, is that the infrastructure can automatically associate auto-declared queues with the appropriate broker. + +This is best illustrated with an example: + +``` +@SpringBootApplication(exclude = RabbitAutoConfiguration.class) +public class Application { + + public static void main(String[] args) { + SpringApplication.run(Application.class, args); + } + + @Bean + CachingConnectionFactory cf1() { + return new CachingConnectionFactory("localhost"); + } + + @Bean + CachingConnectionFactory cf2() { + return new CachingConnectionFactory("otherHost"); + } + + @Bean + CachingConnectionFactory cf3() { + return new CachingConnectionFactory("thirdHost"); + } + + @Bean + SimpleRoutingConnectionFactory rcf(CachingConnectionFactory cf1, + CachingConnectionFactory cf2, CachingConnectionFactory cf3) { + + SimpleRoutingConnectionFactory rcf = new SimpleRoutingConnectionFactory(); + rcf.setDefaultTargetConnectionFactory(cf1); + rcf.setTargetConnectionFactories(Map.of("one", cf1, "two", cf2, "three", cf3)); + return rcf; + } + + @Bean("factory1-admin") + RabbitAdmin admin1(CachingConnectionFactory cf1) { + return new RabbitAdmin(cf1); + } + + @Bean("factory2-admin") + RabbitAdmin admin2(CachingConnectionFactory cf2) { + return new RabbitAdmin(cf2); + } + + @Bean("factory3-admin") + RabbitAdmin admin3(CachingConnectionFactory cf3) { + return new RabbitAdmin(cf3); + } + + @Bean + public RabbitListenerEndpointRegistry rabbitListenerEndpointRegistry() { + return new RabbitListenerEndpointRegistry(); + } + + @Bean + public RabbitListenerAnnotationBeanPostProcessor postProcessor(RabbitListenerEndpointRegistry registry) { + MultiRabbitListenerAnnotationBeanPostProcessor postProcessor + = new MultiRabbitListenerAnnotationBeanPostProcessor(); + postProcessor.setEndpointRegistry(registry); + postProcessor.setContainerFactoryBeanName("defaultContainerFactory"); + return postProcessor; + } + + @Bean + public SimpleRabbitListenerContainerFactory factory1(CachingConnectionFactory cf1) { + SimpleRabbitListenerContainerFactory factory = new SimpleRabbitListenerContainerFactory(); + factory.setConnectionFactory(cf1); + return factory; + } + + @Bean + public SimpleRabbitListenerContainerFactory factory2(CachingConnectionFactory cf2) { + SimpleRabbitListenerContainerFactory factory = new SimpleRabbitListenerContainerFactory(); + factory.setConnectionFactory(cf2); + return factory; + } + + @Bean + public SimpleRabbitListenerContainerFactory factory3(CachingConnectionFactory cf3) { + SimpleRabbitListenerContainerFactory factory = new SimpleRabbitListenerContainerFactory(); + factory.setConnectionFactory(cf3); + return factory; + } + + @Bean + RabbitTemplate template(RoutingConnectionFactory rcf) { + return new RabbitTemplate(rcf); + } + + @Bean + ConnectionFactoryContextWrapper wrapper(SimpleRoutingConnectionFactory rcf) { + return new ConnectionFactoryContextWrapper(rcf); + } + +} + +@Component +class Listeners { + + @RabbitListener(queuesToDeclare = @Queue("q1"), containerFactory = "factory1") + public void listen1(String in) { + + } + + @RabbitListener(queuesToDeclare = @Queue("q2"), containerFactory = "factory2") + public void listen2(String in) { + + } + + @RabbitListener(queuesToDeclare = @Queue("q3"), containerFactory = "factory3") + public void listen3(String in) { + + } + +} +``` + +As you can see, we have declared 3 sets of infrastructure (connection factories, admins, container factories). +As discussed earlier, `@RabbitListener` can define which container factory to use; in this case, they also use `queuesToDeclare` which causes the queue(s) to be declared on the broker, if it doesn’t exist. +By naming the `RabbitAdmin` beans with the convention `-admin`, the infrastructure is able to determine which admin should declare the queue. +This will also work with `bindings = @QueueBinding(…​)` whereby the exchange and binding will also be declared. +It will NOT work with `queues`, since that expects the queue(s) to already exist. + +On the producer side, a convenient `ConnectionFactoryContextWrapper` class is provided, to make using the `RoutingConnectionFactory` (see [Routing Connection Factory](#routing-connection-factory)) simpler. + +As you can see above, a `SimpleRoutingConnectionFactory` bean has been added with routing keys `one`, `two` and `three`. +There is also a `RabbitTemplate` that uses that factory. +Here is an example of using that template with the wrapper to route to one of the broker clusters. + +``` +@Bean +public ApplicationRunner runner(RabbitTemplate template, ConnectionFactoryContextWrapper wrapper) { + return args -> { + wrapper.run("one", () -> template.convertAndSend("q1", "toCluster1")); + wrapper.run("two", () -> template.convertAndSend("q2", "toCluster2")); + wrapper.run("three", () -> template.convertAndSend("q3", "toCluster3")); + }; +} +``` + +#### 4.1.23. Debugging + +Spring AMQP provides extensive logging, especially at the `DEBUG` level. + +If you wish to monitor the AMQP protocol between the application and broker, you can use a tool such as WireShark, which has a plugin to decode the protocol. +Alternatively, the RabbitMQ Java client comes with a very useful class called `Tracer`. +When run as a `main`, by default, it listens on port 5673 and connects to port 5672 on localhost. +You can run it and change your connection factory configuration to connect to port 5673 on localhost. +It displays the decoded protocol on the console. +Refer to the `Tracer` Javadoc for more information. + +### 4.2. Using the RabbitMQ Stream Plugin + +Version 2.4 introduces initial support for the [RabbitMQ Stream Plugin Java Client](https://github.com/rabbitmq/rabbitmq-stream-java-client) for the [RabbitMQ Stream Plugin](https://rabbitmq.com/stream.html). + +* `RabbitStreamTemplate` + +* `StreamListenerContainer` + +#### 4.2.1. Sending Messages + +The `RabbitStreamTemplate` provides a subset of the `RabbitTemplate` (AMQP) functionality. + +Example 1. RabbitStreamOperations + +``` +public interface RabbitStreamOperations extends AutoCloseable { + + ListenableFuture send(Message message); + + ListenableFuture convertAndSend(Object message); + + ListenableFuture convertAndSend(Object message, @Nullable MessagePostProcessor mpp); + + ListenableFuture send(com.rabbitmq.stream.Message message); + + MessageBuilder messageBuilder(); + + MessageConverter messageConverter(); + + StreamMessageConverter streamMessageConverter(); + + @Override + void close() throws AmqpException; + +} +``` + +The `RabbitStreamTemplate` implementation has the following constructor and properties: + +Example 2. RabbitStreamTemplate + +``` +public RabbitStreamTemplate(Environment environment, String streamName) { +} + +public void setMessageConverter(MessageConverter messageConverter) { +} + +public void setStreamConverter(StreamMessageConverter streamConverter) { +} + +public synchronized void setProducerCustomizer(ProducerCustomizer producerCustomizer) { +} +``` + +The `MessageConverter` is used in the `convertAndSend` methods to convert the object to a Spring AMQP `Message`. + +The `StreamMessageConverter` is used to convert from a Spring AMQP `Message` to a native stream `Message`. + +You can also send native stream `Message` s directly; with the `messageBuilder()` method provding access to the `Producer` 's message builder. + +The `ProducerCustomizer` provides a mechanism to customize the producer before it is built. + +Refer to the [Java Client Documentation](https://rabbitmq.github.io/rabbitmq-stream-java-client/stable/htmlsingle/) about customizing the `Environment` and `Producer`. + +#### 4.2.2. Receiving Messages + +Asynchronous message reception is provided by the `StreamListenerContainer` (and the `StreamRabbitListenerContainerFactory` when using `@RabbitListener`). + +The listener container requires an `Environment` as well as a single stream name. + +You can either receive Spring AMQP `Message` s using the classic `MessageListener`, or you can receive native stream `Message` s using a new interface: + +``` +public interface StreamMessageListener extends MessageListener { + + void onStreamMessage(Message message, Context context); + +} +``` + +See [Message Listener Container Configuration](#containerAttributes) for information about supported properties. + +Similar the template, the container has a `ConsumerCustomizer` property. + +Refer to the [Java Client Documentation](https://rabbitmq.github.io/rabbitmq-stream-java-client/stable/htmlsingle/) about customizing the `Environment` and `Consumer`. + +When using `@RabbitListener`, configure a `StreamRabbitListenerContainerFactory`; at this time, most `@RabbitListener` properties (`concurrency`, etc) are ignored. Only `id`, `queues`, `autoStartup` and `containerFactory` are supported. +In addition, `queues` can only contain one stream name. + +#### 4.2.3. Examples + +``` +@Bean +RabbitStreamTemplate streamTemplate(Environment env) { + RabbitStreamTemplate template = new RabbitStreamTemplate(env, "test.stream.queue1"); + template.setProducerCustomizer((name, builder) -> builder.name("test")); + return template; +} + +@Bean +RabbitListenerContainerFactory rabbitListenerContainerFactory(Environment env) { + return new StreamRabbitListenerContainerFactory(env); +} + +@RabbitListener(queues = "test.stream.queue1") +void listen(String in) { + ... +} + +@Bean +RabbitListenerContainerFactory nativeFactory(Environment env) { + StreamRabbitListenerContainerFactory factory = new StreamRabbitListenerContainerFactory(env); + factory.setNativeListener(true); + factory.setConsumerCustomizer((id, builder) -> { + builder.name("myConsumer") + .offset(OffsetSpecification.first()) + .manualTrackingStrategy(); + }); + return factory; +} + +@RabbitListener(id = "test", queues = "test.stream.queue2", containerFactory = "nativeFactory") +void nativeMsg(Message in, Context context) { + ... + context.storeOffset(); +} +``` + +### 4.3. Logging Subsystem AMQP Appenders + +The framework provides logging appenders for some popular logging subsystems: + +* logback (since Spring AMQP version 1.4) + +* log4j2 (since Spring AMQP version 1.6) + +The appenders are configured by using the normal mechanisms for the logging subsystem, available properties are specified in the following sections. + +#### 4.3.1. Common properties + +The following properties are available with all appenders: + +| Property | Default | Description | +|-------------------------------------------|-------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| ```
exchangeName
``` | ```
logs
``` | Name of the exchange to which to publish log events. | +| ```
exchangeType
``` | ```
topic
``` | Type of the exchange to which to publish log events — needed only if the appender declares the exchange.
See `declareExchange`. | +| ```
routingKeyPattern
``` | ```
%c.%p
``` | Logging subsystem pattern format to use to generate a routing key. | +| ```
applicationId
``` | ```

``` | Application ID — added to the routing key if the pattern includes `%X{applicationId}`. | +| ```
senderPoolSize
``` | ```
2
``` | The number of threads to use to publish log events. | +| ```
maxSenderRetries
``` | ```
30
``` | How many times to retry sending a message if the broker is unavailable or there is some other error.
Retries are delayed as follows: `N ^ log(N)`, where `N` is the retry number. | +| ```
addresses
``` | ```

``` | A comma-delimited list of broker addresses in the following form: `host:port[,host:port]*` - overrides `host` and `port`. | +| ```
host
``` | ```
localhost
``` | RabbitMQ host to which to connect . | +| ```
port
``` | ```
5672
``` | RabbitMQ port to which to connect. | +| ```
virtualHost
``` | ```
/
``` | RabbitMQ virtual host to which to connect. | +| ```
username
``` | ```
guest
``` | RabbitMQ user to use when connecting. | +| ```
password
``` | ```
guest
``` | RabbitMQ password for this user. | +| ```
useSsl
``` | ```
false
``` | Whether to use SSL for the RabbitMQ connection.
See [`RabbitConnectionFactoryBean` and Configuring SSL](#rabbitconnectionfactorybean-configuring-ssl) | +| ```
verifyHostname
``` | ```
true
``` | Enable server hostname verification for TLS connections.
See [`RabbitConnectionFactoryBean` and Configuring SSL](#rabbitconnectionfactorybean-configuring-ssl) | +| ```
sslAlgorithm
``` | ```
null
``` | The SSL algorithm to use. | +| ```
sslPropertiesLocation
``` | ```
null
``` | Location of the SSL properties file. | +| ```
keyStore
``` | ```
null
``` | Location of the keystore. | +| ```
keyStorePassphrase
``` | ```
null
``` | Passphrase for the keystore. | +| ```
keyStoreType
``` | ```
JKS
``` | The keystore type. | +| ```
trustStore
``` | ```
null
``` | Location of the truststore. | +| ```
trustStorePassphrase
``` | ```
null
``` | Passphrase for the truststore. | +| ```
trustStoreType
``` | ```
JKS
``` | The truststore type. | +| ```
saslConfig
``` |```
null (RabbitMQ client default applies)
```| The `saslConfig` - see the javadoc for `RabbitUtils.stringToSaslConfig` for valid values. | +| ```
contentType
``` | ```
text/plain
``` | `content-type` property of log messages. | +| ```
contentEncoding
``` | ```

``` | `content-encoding` property of log messages. | +| ```
declareExchange
``` | ```
false
``` | Whether or not to declare the configured exchange when this appender starts.
See also `durable` and `autoDelete`. | +| ```
durable
``` | ```
true
``` | When `declareExchange` is `true`, the durable flag is set to this value. | +| ```
autoDelete
``` | ```
false
``` | When `declareExchange` is `true`, the auto-delete flag is set to this value. | +| ```
charset
``` | ```
null
``` | Character set to use when converting `String` to `byte[]`.
Default: null (the system default charset is used).
If the character set is unsupported on the current platform, we fall back to using the system character set. | +| ```
deliveryMode
``` | ```
PERSISTENT
``` | `PERSISTENT` or `NON_PERSISTENT` to determine whether or not RabbitMQ should persist the messages. | +| ```
generateId
``` | ```
false
``` | Used to determine whether the `messageId` property is set to a unique value. | +|```
clientConnectionProperties
```| ```
null
``` | A comma-delimited list of `key:value` pairs for custom client properties to the RabbitMQ connection. | +| ```
addMdcAsHeaders
``` | ```
true
``` |MDC properties were always added into RabbitMQ message headers until this property was introduced.
It can lead to issues for big MDC as while RabbitMQ has limited buffer size for all headers and this buffer is pretty small.
This property was introduced to avoid issues in cases of big MDC.
By default this value set to `true` for backward compatibility.
The `false` turns off serialization MDC into headers.
Please note, the `JsonLayout` adds MDC into the message by default.| + +#### 4.3.2. Log4j 2 Appender + +The following example shows how to configure a Log4j 2 appender: + +``` + + ... + + + +``` + +| |Starting with versions 1.6.10 and 1.7.3, by default, the log4j2 appender publishes the messages to RabbitMQ on the calling thread.
This is because Log4j 2 does not, by default, create thread-safe events.
If the broker is down, the `maxSenderRetries` is used to retry, with no delay between retries.
If you wish to restore the previous behavior of publishing the messages on separate threads (`senderPoolSize`), you can set the `async` property to `true`.
However, you also need to configure Log4j 2 to use the `DefaultLogEventFactory` instead of the `ReusableLogEventFactory`.
One way to do that is to set the system property `-Dlog4j2.enable.threadlocals=false`.
If you use asynchronous publishing with the `ReusableLogEventFactory`, events have a high likelihood of being corrupted due to cross-talk.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 4.3.3. Logback Appender + +The following example shows how to configure a logback appender: + +``` + + + %n ]]> + + foo:5672,bar:5672 + 36 + false + myApplication + %property{applicationId}.%c.%p + true + UTF-8 + false + NON_PERSISTENT + true + false + +``` + +Starting with version 1.7.1, the Logback `AmqpAppender` provides an `includeCallerData` option, which is `false` by default. +Extracting caller data can be rather expensive, because the log event has to create a throwable and inspect it to determine the calling location. +Therefore, by default, caller data associated with an event is not extracted when the event is added to the event queue. +You can configure the appender to include caller data by setting the `includeCallerData` property to `true`. + +Starting with version 2.0.0, the Logback `AmqpAppender` supports [Logback encoders](https://logback.qos.ch/manual/encoders.html) with the `encoder` option. +The `encoder` and `layout` options are mutually exclusive. + +#### 4.3.4. Customizing the Messages + +By default, AMQP appenders populate the following message properties: + +* `deliveryMode` + +* contentType + +* `contentEncoding`, if configured + +* `messageId`, if `generateId` is configured + +* `timestamp` of the log event + +* `appId`, if applicationId is configured + +In addition they populate headers with the following values: + +* `categoryName` of the log event + +* The level of the log event + +* `thread`: the name of the thread where log event happened + +* The location of the stack trace of the log event call + +* A copy of all the MDC properties (unless `addMdcAsHeaders` is set to `false`) + +Each of the appenders can be subclassed, letting you modify the messages before publishing. +The following example shows how to customize log messages: + +``` +public class MyEnhancedAppender extends AmqpAppender { + + @Override + public Message postProcessMessageBeforeSend(Message message, Event event) { + message.getMessageProperties().setHeader("foo", "bar"); + return message; + } + +} +``` + +Starting with 2.2.4, the log4j2 `AmqpAppender` can be extended using `@PluginBuilderFactory` and extending also `AmqpAppender.Builder` + +``` +@Plugin(name = "MyEnhancedAppender", category = "Core", elementType = "appender", printObject = true) +public class MyEnhancedAppender extends AmqpAppender { + + public MyEnhancedAppender(String name, Filter filter, Layout layout, + boolean ignoreExceptions, AmqpManager manager, BlockingQueue eventQueue, String foo, String bar) { + super(name, filter, layout, ignoreExceptions, manager, eventQueue); + + @Override + public Message postProcessMessageBeforeSend(Message message, Event event) { + message.getMessageProperties().setHeader("foo", "bar"); + return message; + } + + @PluginBuilderFactory + public static Builder newBuilder() { + return new Builder(); + } + + protected static class Builder extends AmqpAppender.Builder { + + @Override + protected AmqpAppender buildInstance(String name, Filter filter, Layout layout, + boolean ignoreExceptions, AmqpManager manager, BlockingQueue eventQueue) { + return new MyEnhancedAppender(name, filter, layout, ignoreExceptions, manager, eventQueue); + } + } + +} +``` + +#### 4.3.5. Customizing the Client Properties + +You can add custom client properties by adding either string properties or more complex properties. + +##### Simple String Properties + +Each appender supports adding client properties to the RabbitMQ connection. + +The following example shows how to add a custom client property for logback: + +``` + + ... + thing1:thing2,cat:hat + ... + +``` + +Example 3. log4j2 + +``` + + ... + + +``` + +The properties are a comma-delimited list of `key:value` pairs. +Keys and values cannot contain commas or colons. + +These properties appear on the RabbitMQ Admin UI when the connection is viewed. + +##### Advanced Technique for Logback + +You can subclass the Logback appender. +Doing so lets you modify the client connection properties before the connection is established. +The following example shows how to do so: + +``` +public class MyEnhancedAppender extends AmqpAppender { + + private String thing1; + + @Override + protected void updateConnectionClientProperties(Map clientProperties) { + clientProperties.put("thing1", this.thing1); + } + + public void setThing1(String thing1) { + this.thing1 = thing1; + } + +} +``` + +Then you can add `thing2` to logback.xml. + +For String properties such as those shown in the preceding example, the previous technique can be used. +Subclasses allow for adding richer properties (such as adding a `Map` or numeric property). + +#### 4.3.6. Providing a Custom Queue Implementation + +The `AmqpAppenders` use a `BlockingQueue` to asynchronously publish logging events to RabbitMQ. +By default, a `LinkedBlockingQueue` is used. +However, you can supply any kind of custom `BlockingQueue` implementation. + +The following example shows how to do so for Logback: + +``` +public class MyEnhancedAppender extends AmqpAppender { + + @Override + protected BlockingQueue createEventQueue() { + return new ArrayBlockingQueue(); + } + +} +``` + +The Log4j 2 appender supports using a [`BlockingQueueFactory`](https://logging.apache.org/log4j/2.x/manual/appenders.html#BlockingQueueFactory), as the following example shows: + +``` + + ... + + + + +``` + +### 4.4. Sample Applications + +The [Spring AMQP Samples](https://github.com/SpringSource/spring-amqp-samples) project includes two sample applications. +The first is a simple “Hello World” example that demonstrates both synchronous and asynchronous message reception. +It provides an excellent starting point for acquiring an understanding of the essential components. +The second sample is based on a stock-trading use case to demonstrate the types of interaction that would be common in real world applications. +In this chapter, we provide a quick walk-through of each sample so that you can focus on the most important components. +The samples are both Maven-based, so you should be able to import them directly into any Maven-aware IDE (such as [SpringSource Tool Suite](https://www.springsource.org/sts)). + +#### 4.4.1. The “Hello World” Sample + +The “Hello World” sample demonstrates both synchronous and asynchronous message reception. +You can import the `spring-rabbit-helloworld` sample into the IDE and then follow the discussion below. + +##### Synchronous Example + +Within the `src/main/java` directory, navigate to the `org.springframework.amqp.helloworld` package. +Open the `HelloWorldConfiguration` class and notice that it contains the `@Configuration` annotation at the class level and notice some `@Bean` annotations at method-level. +This is an example of Spring’s Java-based configuration. +You can read more about that [here](https://docs.spring.io/spring/docs/current/spring-framework-reference/html/beans.html#beans-java). + +The following listing shows how the connection factory is created: + +``` +@Bean +public CachingConnectionFactory connectionFactory() { + CachingConnectionFactory connectionFactory = + new CachingConnectionFactory("localhost"); + connectionFactory.setUsername("guest"); + connectionFactory.setPassword("guest"); + return connectionFactory; +} +``` + +The configuration also contains an instance of `RabbitAdmin`, which, by default, looks for any beans of type exchange, queue, or binding and then declares them on the broker. +In fact, the `helloWorldQueue` bean that is generated in `HelloWorldConfiguration` is an example because it is an instance of `Queue`. + +The following listing shows the `helloWorldQueue` bean definition: + +``` +@Bean +public Queue helloWorldQueue() { + return new Queue(this.helloWorldQueueName); +} +``` + +Looking back at the `rabbitTemplate` bean configuration, you can see that it has the name of `helloWorldQueue` set as its `queue` property (for receiving messages) and for its `routingKey` property (for sending messages). + +Now that we have explored the configuration, we can look at the code that actually uses these components. +First, open the `Producer` class from within the same package. +It contains a `main()` method where the Spring `ApplicationContext` is created. + +The following listing shows the `main` method: + +``` +public static void main(String[] args) { + ApplicationContext context = + new AnnotationConfigApplicationContext(RabbitConfiguration.class); + AmqpTemplate amqpTemplate = context.getBean(AmqpTemplate.class); + amqpTemplate.convertAndSend("Hello World"); + System.out.println("Sent: Hello World"); +} +``` + +In the preceding example, the `AmqpTemplate` bean is retrieved and used for sending a `Message`. +Since the client code should rely on interfaces whenever possible, the type is `AmqpTemplate` rather than `RabbitTemplate`. +Even though the bean created in `HelloWorldConfiguration` is an instance of `RabbitTemplate`, relying on the interface means that this code is more portable (you can change the configuration independently of the code). +Since the `convertAndSend()` method is invoked, the template delegates to its `MessageConverter` instance. +In this case, it uses the default `SimpleMessageConverter`, but a different implementation could be provided to the `rabbitTemplate` bean, as defined in `HelloWorldConfiguration`. + +Now open the `Consumer` class. +It actually shares the same configuration base class, which means it shares the `rabbitTemplate` bean. +That is why we configured that template with both a `routingKey` (for sending) and a `queue` (for receiving). +As we describe in [`AmqpTemplate`](#amqp-template), you could instead pass the 'routingKey' argument to the send method and the 'queue' argument to the receive method. +The `Consumer` code is basically a mirror image of the Producer, calling `receiveAndConvert()` rather than `convertAndSend()`. + +The following listing shows the main method for the `Consumer`: + +``` +public static void main(String[] args) { + ApplicationContext context = + new AnnotationConfigApplicationContext(RabbitConfiguration.class); + AmqpTemplate amqpTemplate = context.getBean(AmqpTemplate.class); + System.out.println("Received: " + amqpTemplate.receiveAndConvert()); +} +``` + +If you run the `Producer` and then run the `Consumer`, you should see `Received: Hello World` in the console output. + +##### Asynchronous Example + +[Synchronous Example](#hello-world-sync) walked through the synchronous Hello World sample. +This section describes a slightly more advanced but significantly more powerful option. +With a few modifications, the Hello World sample can provide an example of asynchronous reception, also known as message-driven POJOs. +In fact, there is a sub-package that provides exactly that: `org.springframework.amqp.samples.helloworld.async`. + +Again, we start with the sending side. +Open the `ProducerConfiguration` class and notice that it creates a `connectionFactory` and a `rabbitTemplate` bean. +This time, since the configuration is dedicated to the message sending side, we do not even need any queue definitions, and the `RabbitTemplate` has only the 'routingKey' property set. +Recall that messages are sent to an exchange rather than being sent directly to a queue. +The AMQP default exchange is a direct exchange with no name. +All queues are bound to that default exchange with their name as the routing key. +That is why we only need to provide the routing key here. + +The following listing shows the `rabbitTemplate` definition: + +``` +public RabbitTemplate rabbitTemplate() { + RabbitTemplate template = new RabbitTemplate(connectionFactory()); + template.setRoutingKey(this.helloWorldQueueName); + return template; +} +``` + +Since this sample demonstrates asynchronous message reception, the producing side is designed to continuously send messages (if it were a message-per-execution model like the synchronous version, it would not be quite so obvious that it is, in fact, a message-driven consumer). +The component responsible for continuously sending messages is defined as an inner class within the `ProducerConfiguration`. +It is configured to run every three seconds. + +The following listing shows the component: + +``` +static class ScheduledProducer { + + @Autowired + private volatile RabbitTemplate rabbitTemplate; + + private final AtomicInteger counter = new AtomicInteger(); + + @Scheduled(fixedRate = 3000) + public void sendMessage() { + rabbitTemplate.convertAndSend("Hello World " + counter.incrementAndGet()); + } +} +``` + +You do not need to understand all of the details, since the real focus should be on the receiving side (which we cover next). +However, if you are not yet familiar with Spring task scheduling support, you can learn more [here](https://docs.spring.io/spring/docs/current/spring-framework-reference/html/scheduling.html#scheduling-annotation-support). +The short story is that the `postProcessor` bean in the `ProducerConfiguration` registers the task with a scheduler. + +Now we can turn to the receiving side. +To emphasize the message-driven POJO behavior, we start with the component that react to the messages. +The class is called `HelloWorldHandler` and is shown in the following listing: + +``` +public class HelloWorldHandler { + + public void handleMessage(String text) { + System.out.println("Received: " + text); + } + +} +``` + +That class is a POJO. +It does not extend any base class, it does not implement any interfaces, and it does not even contain any imports. +It is being “adapted” to the `MessageListener` interface by the Spring AMQP `MessageListenerAdapter`. +You can then configure that adapter on a `SimpleMessageListenerContainer`. +For this sample, the container is created in the `ConsumerConfiguration` class. +You can see the POJO wrapped in the adapter there. + +The following listing shows how the `listenerContainer` is defined: + +``` +@Bean +public SimpleMessageListenerContainer listenerContainer() { + SimpleMessageListenerContainer container = new SimpleMessageListenerContainer(); + container.setConnectionFactory(connectionFactory()); + container.setQueueName(this.helloWorldQueueName); + container.setMessageListener(new MessageListenerAdapter(new HelloWorldHandler())); + return container; +} +``` + +The `SimpleMessageListenerContainer` is a Spring lifecycle component and, by default, starts automatically. +If you look in the `Consumer` class, you can see that its `main()` method consists of nothing more than a one-line bootstrap to create the `ApplicationContext`. +The Producer’s `main()` method is also a one-line bootstrap, since the component whose method is annotated with `@Scheduled` also starts automatically. +You can start the `Producer` and `Consumer` in any order, and you should see messages being sent and received every three seconds. + +#### 4.4.2. Stock Trading + +The Stock Trading sample demonstrates more advanced messaging scenarios than [the Hello World sample](#hello-world-sample). +However, the configuration is very similar, if a bit more involved. +Since we walked through the Hello World configuration in detail, here, we focus on what makes this sample different. +There is a server that pushes market data (stock quotations) to a topic exchange. +Then, clients can subscribe to the market data feed by binding a queue with a routing pattern (for example,`app.stock.quotes.nasdaq.*`). +The other main feature of this demo is a request-reply “stock trade” interaction that is initiated by the client and handled by the server. +That involves a private `replyTo` queue that is sent by the client within the order request message itself. + +The server’s core configuration is in the `RabbitServerConfiguration` class within the `org.springframework.amqp.rabbit.stocks.config.server` package. +It extends the `AbstractStockAppRabbitConfiguration`. +That is where the resources common to the server and client are defined, including the market data topic exchange (whose name is 'app.stock.marketdata') and the queue that the server exposes for stock trades (whose name is 'app.stock.request'). +In that common configuration file, you also see that a `Jackson2JsonMessageConverter` is configured on the `RabbitTemplate`. + +The server-specific configuration consists of two things. +First, it configures the market data exchange on the `RabbitTemplate` so that it does not need to provide that exchange name with every call to send a `Message`. +It does this within an abstract callback method defined in the base configuration class. +The following listing shows that method: + +``` +public void configureRabbitTemplate(RabbitTemplate rabbitTemplate) { + rabbitTemplate.setExchange(MARKET_DATA_EXCHANGE_NAME); +} +``` + +Second, the stock request queue is declared. +It does not require any explicit bindings in this case, because it is bound to the default no-name exchange with its own name as the routing key. +As mentioned earlier, the AMQP specification defines that behavior. +The following listing shows the definition of the `stockRequestQueue` bean: + +``` +@Bean +public Queue stockRequestQueue() { + return new Queue(STOCK_REQUEST_QUEUE_NAME); +} +``` + +Now that you have seen the configuration of the server’s AMQP resources, navigate to the `org.springframework.amqp.rabbit.stocks` package under the `src/test/java` directory. +There, you can see the actual `Server` class that provides a `main()` method. +It creates an `ApplicationContext` based on the `server-bootstrap.xml` config file. +There, you can see the scheduled task that publishes dummy market data. +That configuration relies upon Spring’s `task` namespace support. +The bootstrap config file also imports a few other files. +The most interesting one is `server-messaging.xml`, which is directly under `src/main/resources`. +There, you can see the `messageListenerContainer` bean that is responsible for handling the stock trade requests. +Finally, have a look at the `serverHandler` bean that is defined in `server-handlers.xml` (which is also in 'src/main/resources'). +That bean is an instance of the `ServerHandler` class and is a good example of a message-driven POJO that can also send reply messages. +Notice that it is not itself coupled to the framework or any of the AMQP concepts. +It accepts a `TradeRequest` and returns a `TradeResponse`. +The following listing shows the definition of the `handleMessage` method: + +``` +public TradeResponse handleMessage(TradeRequest tradeRequest) { ... +} +``` + +Now that we have seen the most important configuration and code for the server, we can turn to the client. +The best starting point is probably `RabbitClientConfiguration`, in the `org.springframework.amqp.rabbit.stocks.config.client` package. +Notice that it declares two queues without providing explicit names. +The following listing shows the bean definitions for the two queues: + +``` +@Bean +public Queue marketDataQueue() { + return amqpAdmin().declareQueue(); +} + +@Bean +public Queue traderJoeQueue() { + return amqpAdmin().declareQueue(); +} +``` + +Those are private queues, and unique names are generated automatically. +The first generated queue is used by the client to bind to the market data exchange that has been exposed by the server. +Recall that, in AMQP, consumers interact with queues while producers interact with exchanges. +The “binding” of queues to exchanges is what tells the broker to deliver (or route) messages from a given exchange to a queue. +Since the market data exchange is a topic exchange, the binding can be expressed with a routing pattern. +The `RabbitClientConfiguration` does so with a `Binding` object, and that object is generated with the `BindingBuilder` fluent API. +The following listing shows the `Binding`: + +``` +@Value("${stocks.quote.pattern}") +private String marketDataRoutingKey; + +@Bean +public Binding marketDataBinding() { + return BindingBuilder.bind( + marketDataQueue()).to(marketDataExchange()).with(marketDataRoutingKey); +} +``` + +Notice that the actual value has been externalized in a properties file (`client.properties` under `src/main/resources`), and that we use Spring’s `@Value` annotation to inject that value. +This is generally a good idea. +Otherwise, the value would have been hardcoded in a class and unmodifiable without recompilation. +In this case, it is much easier to run multiple versions of the client while making changes to the routing pattern used for binding. +We can try that now. + +Start by running `org.springframework.amqp.rabbit.stocks.Server` and then `org.springframework.amqp.rabbit.stocks.Client`. +You should see dummy quotations for `NASDAQ` stocks, because the current value associated with the 'stocks.quote.pattern' key in client.properties is 'app.stock.quotes.nasdaq.**'. +Now, while keeping the existing `Server` and `Client` running, change that property value to 'app.stock.quotes.nyse.**' and start a second `Client` instance. +You should see that the first client still receives NASDAQ quotes while the second client receives NYSE quotes. +You could instead change the pattern to get all stocks or even an individual ticker. + +The final feature we explore is the request-reply interaction from the client’s perspective. +Recall that we have already seen the `ServerHandler` that accepts `TradeRequest` objects and returns `TradeResponse` objects. +The corresponding code on the `Client` side is `RabbitStockServiceGateway` in the `org.springframework.amqp.rabbit.stocks.gateway` package. +It delegates to the `RabbitTemplate` in order to send messages. +The following listing shows the `send` method: + +``` +public void send(TradeRequest tradeRequest) { + getRabbitTemplate().convertAndSend(tradeRequest, new MessagePostProcessor() { + public Message postProcessMessage(Message message) throws AmqpException { + message.getMessageProperties().setReplyTo(new Address(defaultReplyToQueue)); + try { + message.getMessageProperties().setCorrelationId( + UUID.randomUUID().toString().getBytes("UTF-8")); + } + catch (UnsupportedEncodingException e) { + throw new AmqpException(e); + } + return message; + } + }); +} +``` + +Notice that, prior to sending the message, it sets the `replyTo` address. +It provides the queue that was generated by the `traderJoeQueue` bean definition (shown earlier). +The following listing shows the `@Bean` definition for the `StockServiceGateway` class itself: + +``` +@Bean +public StockServiceGateway stockServiceGateway() { + RabbitStockServiceGateway gateway = new RabbitStockServiceGateway(); + gateway.setRabbitTemplate(rabbitTemplate()); + gateway.setDefaultReplyToQueue(traderJoeQueue()); + return gateway; +} +``` + +If you are no longer running the server and client, start them now. +Try sending a request with the format of '100 TCKR'. +After a brief artificial delay that simulates “processing” of the request, you should see a confirmation message appear on the client. + +#### 4.4.3. Receiving JSON from Non-Spring Applications + +Spring applications, when sending JSON, set the `*TypeId*` header to the fully qualified class name to assist the receiving application in converting the JSON back to a Java object. + +The `spring-rabbit-json` sample explores several techniques to convert the JSON from a non-Spring application. + +See also [Jackson2JsonMessageConverter](#json-message-converter) as well as the [Javadoc for the `DefaultClassMapper`](https://docs.spring.io/spring-amqp/docs/current/api/index.html?org/springframework/amqp/support/converter/DefaultClassMapper.html). + +### 4.5. Testing Support + +Writing integration for asynchronous applications is necessarily more complex than testing simpler applications. +This is made more complex when abstractions such as the `@RabbitListener` annotations come into the picture. +The question is how to verify that, after sending a message, the listener received the message as expected. + +The framework itself has many unit and integration tests. +Some using mocks while, others use integration testing with a live RabbitMQ broker. +You can consult those tests for some ideas for testing scenarios. + +Spring AMQP version 1.6 introduced the `spring-rabbit-test` jar, which provides support for testing some of these more complex scenarios. +It is anticipated that this project will expand over time, but we need community feedback to make suggestions for the features needed to help with testing. +Please use [JIRA](https://jira.spring.io/browse/AMQP) or [GitHub Issues](https://github.com/spring-projects/spring-amqp/issues) to provide such feedback. + +#### 4.5.1. @SpringRabbitTest + +Use this annotation to add infrastructure beans to the Spring test `ApplicationContext`. +This is not necessary when using, for example `@SpringBootTest` since Spring Boot’s auto configuration will add the beans. + +Beans that are registered are: + +* `CachingConnectionFactory` (`autoConnectionFactory`). If `@RabbitEnabled` is present, its connectionn factory is used. + +* `RabbitTemplate` (`autoRabbitTemplate`) + +* `RabbitAdmin` (`autoRabbitAdmin`) + +* `RabbitListenerContainerFactory` (`autoContainerFactory`) + +In addition, the beans associated with `@EnableRabbit` (to support `@RabbitListener`) are added. + +Example 4. Junit5 example + +``` +@SpringJunitConfig +@SpringRabbitTest +public class MyRabbitTests { + + @Autowired + private RabbitTemplate template; + + @Autowired + private RabbitAdmin admin; + + @Autowired + private RabbitListenerEndpointRegistry registry; + + @Test + void test() { + ... + } + + @Configuration + public static class Config { + + ... + + } + +} +``` + +With JUnit4, replace `@SpringJunitConfig` with `@RunWith(SpringRunnner.class)`. + +#### 4.5.2. Mockito `Answer` Implementations + +There are currently two `Answer` implementations to help with testing. + +The first, `LatchCountDownAndCallRealMethodAnswer`, provides an `Answer` that returns `null` and counts down a latch. +The following example shows how to use `LatchCountDownAndCallRealMethodAnswer`: + +``` +LatchCountDownAndCallRealMethodAnswer answer = this.harness.getLatchAnswerFor("myListener", 2); +doAnswer(answer) + .when(listener).foo(anyString(), anyString()); + +... + +assertThat(answer.await(10)).isTrue(); +``` + +The second, `LambdaAnswer` provides a mechanism to optionally call the real method and provides an opportunity +to return a custom result, based on the `InvocationOnMock` and the result (if any). + +Consider the following POJO: + +``` +public class Thing { + + public String thing(String thing) { + return thing.toUpperCase(); + } + +} +``` + +The following class tests the `Thing` POJO: + +``` +Thing thing = spy(new Thing()); + +doAnswer(new LambdaAnswer(true, (i, r) -> r + r)) + .when(thing).thing(anyString()); +assertEquals("THINGTHING", thing.thing("thing")); + +doAnswer(new LambdaAnswer(true, (i, r) -> r + i.getArguments()[0])) + .when(thing).thing(anyString()); +assertEquals("THINGthing", thing.thing("thing")); + +doAnswer(new LambdaAnswer(false, (i, r) -> + "" + i.getArguments()[0] + i.getArguments()[0])).when(thing).thing(anyString()); +assertEquals("thingthing", thing.thing("thing")); +``` + +Starting with version 2.2.3, the answers capture any exceptions thrown by the method under test. +Use `answer.getExceptions()` to get a reference to them. + +When used in conjunction with the [`@RabbitListenerTest` and `RabbitListenerTestHarness`](#test-harness) use `harness.getLambdaAnswerFor("listenerId", true, …​)` to get a properly constructed answer for the listener. + +#### 4.5.3. `@RabbitListenerTest` and `RabbitListenerTestHarness` + +Annotating one of your `@Configuration` classes with `@RabbitListenerTest` causes the framework to replace the +standard `RabbitListenerAnnotationBeanPostProcessor` with a subclass called `RabbitListenerTestHarness` (it also enables`@RabbitListener` detection through `@EnableRabbit`). + +The `RabbitListenerTestHarness` enhances the listener in two ways. +First, it wraps the listener in a `Mockito Spy`, enabling normal `Mockito` stubbing and verification operations. +It can also add an `Advice` to the listener, enabling access to the arguments, result, and any exceptions that are thrown. +You can control which (or both) of these are enabled with attributes on the `@RabbitListenerTest`. +The latter is provided for access to lower-level data about the invocation. +It also supports blocking the test thread until the async listener is called. + +| |`final` `@RabbitListener` methods cannot be spied or advised.
Also, only listeners with an `id` attribute can be spied or advised.| +|---|--------------------------------------------------------------------------------------------------------------------------------------| + +Consider some examples. + +The following example uses spy: + +``` +@Configuration +@RabbitListenerTest +public class Config { + + @Bean + public Listener listener() { + return new Listener(); + } + + ... + +} + +public class Listener { + + @RabbitListener(id="foo", queues="#{queue1.name}") + public String foo(String foo) { + return foo.toUpperCase(); + } + + @RabbitListener(id="bar", queues="#{queue2.name}") + public void foo(@Payload String foo, @Header("amqp_receivedRoutingKey") String rk) { + ... + } + +} + +public class MyTests { + + @Autowired + private RabbitListenerTestHarness harness; (1) + + @Test + public void testTwoWay() throws Exception { + assertEquals("FOO", this.rabbitTemplate.convertSendAndReceive(this.queue1.getName(), "foo")); + + Listener listener = this.harness.getSpy("foo"); (2) + assertNotNull(listener); + verify(listener).foo("foo"); + } + + @Test + public void testOneWay() throws Exception { + Listener listener = this.harness.getSpy("bar"); + assertNotNull(listener); + + LatchCountDownAndCallRealMethodAnswer answer = this.harness.getLatchAnswerFor("bar", 2); (3) + doAnswer(answer).when(listener).foo(anyString(), anyString()); (4) + + this.rabbitTemplate.convertAndSend(this.queue2.getName(), "bar"); + this.rabbitTemplate.convertAndSend(this.queue2.getName(), "baz"); + + assertTrue(answer.await(10)); + verify(listener).foo("bar", this.queue2.getName()); + verify(listener).foo("baz", this.queue2.getName()); + } + +} +``` + +|**1**| Inject the harness into the test case so we can get access to the spy. | +|-----|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| Get a reference to the spy so we can verify it was invoked as expected.
Since this is a send and receive operation, there is no need to suspend the test thread because it was already
suspended in the `RabbitTemplate` waiting for the reply. | +|**3**|In this case, we’re only using a send operation so we need a latch to wait for the asynchronous call to the listener
on the container thread.
We use one of the [Answer\](#mockito-answer) implementations to help with that.
IMPORTANT: Due to the way the listener is spied, it is important to use `harness.getLatchAnswerFor()` to get a properly configured answer for the spy.| +|**4**| Configure the spy to invoke the `Answer`. | + +The following example uses the capture advice: + +``` +@Configuration +@ComponentScan +@RabbitListenerTest(spy = false, capture = true) +public class Config { + +} + +@Service +public class Listener { + + private boolean failed; + + @RabbitListener(id="foo", queues="#{queue1.name}") + public String foo(String foo) { + return foo.toUpperCase(); + } + + @RabbitListener(id="bar", queues="#{queue2.name}") + public void foo(@Payload String foo, @Header("amqp_receivedRoutingKey") String rk) { + if (!failed && foo.equals("ex")) { + failed = true; + throw new RuntimeException(foo); + } + failed = false; + } + +} + +public class MyTests { + + @Autowired + private RabbitListenerTestHarness harness; (1) + + @Test + public void testTwoWay() throws Exception { + assertEquals("FOO", this.rabbitTemplate.convertSendAndReceive(this.queue1.getName(), "foo")); + + InvocationData invocationData = + this.harness.getNextInvocationDataFor("foo", 0, TimeUnit.SECONDS); (2) + assertThat(invocationData.getArguments()[0], equalTo("foo")); (3) + assertThat((String) invocationData.getResult(), equalTo("FOO")); + } + + @Test + public void testOneWay() throws Exception { + this.rabbitTemplate.convertAndSend(this.queue2.getName(), "bar"); + this.rabbitTemplate.convertAndSend(this.queue2.getName(), "baz"); + this.rabbitTemplate.convertAndSend(this.queue2.getName(), "ex"); + + InvocationData invocationData = + this.harness.getNextInvocationDataFor("bar", 10, TimeUnit.SECONDS); (4) + Object[] args = invocationData.getArguments(); + assertThat((String) args[0], equalTo("bar")); + assertThat((String) args[1], equalTo(queue2.getName())); + + invocationData = this.harness.getNextInvocationDataFor("bar", 10, TimeUnit.SECONDS); + args = invocationData.getArguments(); + assertThat((String) args[0], equalTo("baz")); + + invocationData = this.harness.getNextInvocationDataFor("bar", 10, TimeUnit.SECONDS); + args = invocationData.getArguments(); + assertThat((String) args[0], equalTo("ex")); + assertEquals("ex", invocationData.getThrowable().getMessage()); (5) + } + +} +``` + +|**1**| Inject the harness into the test case so we can get access to the spy. | +|-----|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**|Use `harness.getNextInvocationDataFor()` to retrieve the invocation data - in this case since it was a request/reply
scenario there is no need to wait for any time because the test thread was suspended in the `RabbitTemplate` waiting
for the result.| +|**3**| We can then verify that the argument and result was as expected. | +|**4**| This time we need some time to wait for the data, since it’s an async operation on the container thread and we need
to suspend the test thread. | +|**5**| When the listener throws an exception, it is available in the `throwable` property of the invocation data. | + +| |When using custom `Answer` s with the harness, in order to operate properly, such answers should subclass `ForwardsInvocation` and get the actual listener (not the spy) from the harness (`getDelegate("myListener")`) and call `super.answer(invocation)`.
See the provided [Mockito `Answer` Implementations](#mockito-answer) source code for examples.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 4.5.4. Using `TestRabbitTemplate` + +The `TestRabbitTemplate` is provided to perform some basic integration testing without the need for a broker. +When you add it as a `@Bean` in your test case, it discovers all the listener containers in the context, whether declared as `@Bean` or `` or using the `@RabbitListener` annotation. +It currently only supports routing by queue name. +The template extracts the message listener from the container and invokes it directly on the test thread. +Request-reply messaging (`sendAndReceive` methods) is supported for listeners that return replies. + +The following test case uses the template: + +``` +@RunWith(SpringRunner.class) +public class TestRabbitTemplateTests { + + @Autowired + private TestRabbitTemplate template; + + @Autowired + private Config config; + + @Test + public void testSimpleSends() { + this.template.convertAndSend("foo", "hello1"); + assertThat(this.config.fooIn, equalTo("foo:hello1")); + this.template.convertAndSend("bar", "hello2"); + assertThat(this.config.barIn, equalTo("bar:hello2")); + assertThat(this.config.smlc1In, equalTo("smlc1:")); + this.template.convertAndSend("foo", "hello3"); + assertThat(this.config.fooIn, equalTo("foo:hello1")); + this.template.convertAndSend("bar", "hello4"); + assertThat(this.config.barIn, equalTo("bar:hello2")); + assertThat(this.config.smlc1In, equalTo("smlc1:hello3hello4")); + + this.template.setBroadcast(true); + this.template.convertAndSend("foo", "hello5"); + assertThat(this.config.fooIn, equalTo("foo:hello1foo:hello5")); + this.template.convertAndSend("bar", "hello6"); + assertThat(this.config.barIn, equalTo("bar:hello2bar:hello6")); + assertThat(this.config.smlc1In, equalTo("smlc1:hello3hello4hello5hello6")); + } + + @Test + public void testSendAndReceive() { + assertThat(this.template.convertSendAndReceive("baz", "hello"), equalTo("baz:hello")); + } +``` + +``` + @Configuration + @EnableRabbit + public static class Config { + + public String fooIn = ""; + + public String barIn = ""; + + public String smlc1In = "smlc1:"; + + @Bean + public TestRabbitTemplate template() throws IOException { + return new TestRabbitTemplate(connectionFactory()); + } + + @Bean + public ConnectionFactory connectionFactory() throws IOException { + ConnectionFactory factory = mock(ConnectionFactory.class); + Connection connection = mock(Connection.class); + Channel channel = mock(Channel.class); + willReturn(connection).given(factory).createConnection(); + willReturn(channel).given(connection).createChannel(anyBoolean()); + given(channel.isOpen()).willReturn(true); + return factory; + } + + @Bean + public SimpleRabbitListenerContainerFactory rabbitListenerContainerFactory() throws IOException { + SimpleRabbitListenerContainerFactory factory = new SimpleRabbitListenerContainerFactory(); + factory.setConnectionFactory(connectionFactory()); + return factory; + } + + @RabbitListener(queues = "foo") + public void foo(String in) { + this.fooIn += "foo:" + in; + } + + @RabbitListener(queues = "bar") + public void bar(String in) { + this.barIn += "bar:" + in; + } + + @RabbitListener(queues = "baz") + public String baz(String in) { + return "baz:" + in; + } + + @Bean + public SimpleMessageListenerContainer smlc1() throws IOException { + SimpleMessageListenerContainer container = new SimpleMessageListenerContainer(connectionFactory()); + container.setQueueNames("foo", "bar"); + container.setMessageListener(new MessageListenerAdapter(new Object() { + + @SuppressWarnings("unused") + public void handleMessage(String in) { + smlc1In += in; + } + + })); + return container; + } + + } + +} +``` + +#### 4.5.5. JUnit4 `@Rules` + +Spring AMQP version 1.7 and later provide an additional jar called `spring-rabbit-junit`. +This jar contains a couple of utility `@Rule` instances for use when running JUnit4 tests. +See [JUnit5 Conditions](#junit5-conditions) for JUnit5 testing. + +##### Using `BrokerRunning` + +`BrokerRunning` provides a mechanism to let tests succeed when a broker is not running (on `localhost`, by default). + +It also has utility methods to initialize and empty queues and delete queues and exchanges. + +The following example shows its usage: + +``` +@ClassRule +public static BrokerRunning brokerRunning = BrokerRunning.isRunningWithEmptyQueues("foo", "bar"); + +@AfterClass +public static void tearDown() { + brokerRunning.removeTestQueues("some.other.queue.too") // removes foo, bar as well +} +``` + +There are several `isRunning…​` static methods, such as `isBrokerAndManagementRunning()`, which verifies the broker has the management plugin enabled. + +###### Configuring the Rule + +There are times when you want tests to fail if there is no broker, such as a nightly CI build. +To disable the rule at runtime, set an environment variable called `RABBITMQ_SERVER_REQUIRED` to `true`. + +You can override the broker properties, such as hostname with either setters or environment variables: + +The following example shows how to override properties with setters: + +``` +@ClassRule +public static BrokerRunning brokerRunning = BrokerRunning.isRunningWithEmptyQueues("foo", "bar"); + +static { + brokerRunning.setHostName("10.0.0.1") +} + +@AfterClass +public static void tearDown() { + brokerRunning.removeTestQueues("some.other.queue.too") // removes foo, bar as well +} +``` + +You can also override properties by setting the following environment variables: + +``` +public static final String BROKER_ADMIN_URI = "RABBITMQ_TEST_ADMIN_URI"; +public static final String BROKER_HOSTNAME = "RABBITMQ_TEST_HOSTNAME"; +public static final String BROKER_PORT = "RABBITMQ_TEST_PORT"; +public static final String BROKER_USER = "RABBITMQ_TEST_USER"; +public static final String BROKER_PW = "RABBITMQ_TEST_PASSWORD"; +public static final String BROKER_ADMIN_USER = "RABBITMQ_TEST_ADMIN_USER"; +public static final String BROKER_ADMIN_PW = "RABBITMQ_TEST_ADMIN_PASSWORD"; +``` + +These environment variables override the default settings (`localhost:5672` for amqp and `[localhost:15672/api/](http://localhost:15672/api/)` for the management REST API). + +Changing the host name affects both the `amqp` and `management` REST API connection (unless the admin uri is explicitly set). + +`BrokerRunning` also provides a `static` method called `setEnvironmentVariableOverrides` that lets you can pass in a map containing these variables. +They override system environment variables. +This might be useful if you wish to use different configuration for tests in multiple test suites. +IMPORTANT: The method must be called before invoking any of the `isRunning()` static methods that create the rule instance. +Variable values are applied to all instances created after this invocation. +Invoke `clearEnvironmentVariableOverrides()` to reset the rule to use defaults (including any actual environment variables). + +In your test cases, you can use the `brokerRunning` when creating the connection factory; `getConnectionFactory()` returns the rule’s RabbitMQ `ConnectionFactory`. +The following example shows how to do so: + +``` +@Bean +public CachingConnectionFactory rabbitConnectionFactory() { + return new CachingConnectionFactory(brokerRunning.getConnectionFactory()); +} +``` + +##### Using `LongRunningIntegrationTest` + +`LongRunningIntegrationTest` is a rule that disables long running tests. +You might want to use this on a developer system but ensure that the rule is disabled on, for example, nightly CI builds. + +The following example shows its usage: + +``` +@Rule +public LongRunningIntegrationTest longTests = new LongRunningIntegrationTest(); +``` + +To disable the rule at runtime, set an environment variable called `RUN_LONG_INTEGRATION_TESTS` to `true`. + +#### 4.5.6. JUnit5 Conditions + +Version 2.0.2 introduced support for JUnit5. + +##### Using the `@RabbitAvailable` Annotation + +This class-level annotation is similar to the `BrokerRunning` `@Rule` discussed in [JUnit4 `@Rules`](#junit-rules). +It is processed by the `RabbitAvailableCondition`. + +The annotation has three properties: + +* `queues`: An array of queues that are declared (and purged) before each test and deleted when all tests are complete. + +* `management`: Set this to `true` if your tests also require the management plugin installed on the broker. + +* `purgeAfterEach`: (Since version 2.2) when `true` (default), the `queues` will be purged between tests. + +It is used to check whether the broker is available and skip the tests if not. +As discussed in [Configuring the Rule](#brokerRunning-configure), the environment variable called `RABBITMQ_SERVER_REQUIRED`, if `true`, causes the tests to fail fast if there is no broker. +You can configure the condition by using environment variables as discussed in [Configuring the Rule](#brokerRunning-configure). + +In addition, the `RabbitAvailableCondition` supports argument resolution for parameterized test constructors and methods. +Two argument types are supported: + +* `BrokerRunningSupport`: The instance (before 2.2, this was a JUnit 4 `BrokerRunning` instance) + +* `ConnectionFactory`: The `BrokerRunningSupport` instance’s RabbitMQ connection factory + +The following example shows both: + +``` +@RabbitAvailable(queues = "rabbitAvailableTests.queue") +public class RabbitAvailableCTORInjectionTests { + + private final ConnectionFactory connectionFactory; + + public RabbitAvailableCTORInjectionTests(BrokerRunningSupport brokerRunning) { + this.connectionFactory = brokerRunning.getConnectionFactory(); + } + + @Test + public void test(ConnectionFactory cf) throws Exception { + assertSame(cf, this.connectionFactory); + Connection conn = this.connectionFactory.newConnection(); + Channel channel = conn.createChannel(); + DeclareOk declareOk = channel.queueDeclarePassive("rabbitAvailableTests.queue"); + assertEquals(0, declareOk.getConsumerCount()); + channel.close(); + conn.close(); + } + +} +``` + +The preceding test is in the framework itself and verifies the argument injection and that the condition created the queue properly. + +A practical user test might be as follows: + +``` +@RabbitAvailable(queues = "rabbitAvailableTests.queue") +public class RabbitAvailableCTORInjectionTests { + + private final CachingConnectionFactory connectionFactory; + + public RabbitAvailableCTORInjectionTests(BrokerRunningSupport brokerRunning) { + this.connectionFactory = + new CachingConnectionFactory(brokerRunning.getConnectionFactory()); + } + + @Test + public void test() throws Exception { + RabbitTemplate template = new RabbitTemplate(this.connectionFactory); + ... + } +} +``` + +When you use a Spring annotation application context within a test class, you can get a reference to the condition’s connection factory through a static method called `RabbitAvailableCondition.getBrokerRunning()`. + +| |Starting with version 2.2, `getBrokerRunning()` returns a `BrokerRunningSupport` object; previously, the JUnit 4 `BrokerRunnning` instance was returned.
The new class has the same API as `BrokerRunning`.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The following test comes from the framework and demonstrates the usage: + +``` +@RabbitAvailable(queues = { + RabbitTemplateMPPIntegrationTests.QUEUE, + RabbitTemplateMPPIntegrationTests.REPLIES }) +@SpringJUnitConfig +@DirtiesContext(classMode = ClassMode.AFTER_EACH_TEST_METHOD) +public class RabbitTemplateMPPIntegrationTests { + + public static final String QUEUE = "mpp.tests"; + + public static final String REPLIES = "mpp.tests.replies"; + + @Autowired + private RabbitTemplate template; + + @Autowired + private Config config; + + @Test + public void test() { + + ... + + } + + @Configuration + @EnableRabbit + public static class Config { + + @Bean + public CachingConnectionFactory cf() { + return new CachingConnectionFactory(RabbitAvailableCondition + .getBrokerRunning() + .getConnectionFactory()); + } + + @Bean + public RabbitTemplate template() { + + ... + + } + + @Bean + public SimpleRabbitListenerContainerFactory + rabbitListenerContainerFactory() { + + ... + + } + + @RabbitListener(queues = QUEUE) + public byte[] foo(byte[] in) { + return in; + } + + } + +} +``` + +##### Using the `@LongRunning` Annotation + +Similar to the `LongRunningIntegrationTest` JUnit4 `@Rule`, this annotation causes tests to be skipped unless an environment variable (or system property) is set to `true`. +The following example shows how to use it: + +``` +@RabbitAvailable(queues = SimpleMessageListenerContainerLongTests.QUEUE) +@LongRunning +public class SimpleMessageListenerContainerLongTests { + + public static final String QUEUE = "SimpleMessageListenerContainerLongTests.queue"; + +... + +} +``` + +By default, the variable is `RUN_LONG_INTEGRATION_TESTS`, but you can specify the variable name in the annotation’s `value` attribute. + +## 5. Spring Integration - Reference + +This part of the reference documentation provides a quick introduction to the AMQP support within the Spring Integration project. + +### 5.1. Spring Integration AMQP Support + +This brief chapter covers the relationship between the Spring Integration and the Spring AMQP projects. + +#### 5.1.1. Introduction + +The [Spring Integration](https://www.springsource.org/spring-integration) project includes AMQP Channel Adapters and Gateways that build upon the Spring AMQP project. +Those adapters are developed and released in the Spring Integration project. +In Spring Integration, “Channel Adapters” are unidirectional (one-way), whereas “Gateways” are bidirectional (request-reply). +We provide an inbound-channel-adapter, an outbound-channel-adapter, an inbound-gateway, and an outbound-gateway. + +Since the AMQP adapters are part of the Spring Integration release, the documentation is available as part of the Spring Integration distribution. +We provide a quick overview of the main features here. +See the [Spring Integration Reference Guide](https://docs.spring.io/spring-integration/reference/htmlsingle/) for much more detail. + +#### 5.1.2. Inbound Channel Adapter + +To receive AMQP Messages from a queue, you can configure an ``. +The following example shows how to configure an inbound channel adapter: + +``` + +``` + +#### 5.1.3. Outbound Channel Adapter + +To send AMQP Messages to an exchange, you can configure an ``. +You can optionally provide a 'routing-key' in addition to the exchange name. +The following example shows how to define an outbound channel adapter: + +``` + +``` + +#### 5.1.4. Inbound Gateway + +To receive an AMQP Message from a queue and respond to its reply-to address, you can configure an ``. +The following example shows how to define an inbound gateway: + +``` + +``` + +#### 5.1.5. Outbound Gateway + +To send AMQP Messages to an exchange and receive back a response from a remote client, you can configure an ``. +You can optionally provide a 'routing-key' in addition to the exchange name. +The following example shows how to define an outbound gateway: + +``` + +``` + +## 6. Other Resources + +In addition to this reference documentation, there exist a number of other resources that may help you learn about AMQP. + +### 6.1. Further Reading + +For those who are not familiar with AMQP, the [specification](https://www.amqp.org/resources/download) is actually quite readable. +It is, of course, the authoritative source of information, and the Spring AMQP code should be easy to understand for anyone who is familiar with the spec. +Our current implementation of the RabbitMQ support is based on their 2.8.x version, and it officially supports AMQP 0.8 and 0.9.1. +We recommend reading the 0.9.1 document. + +There are many great articles, presentations, and blogs available on the RabbitMQ [Getting Started](https://www.rabbitmq.com/how.html) page. +Since that is currently the only supported implementation for Spring AMQP, we also recommend that as a general starting point for all broker-related concerns. + +## Appendix A: Change History + +This section describes what changes have been made as versions have changed. + +### A.1. Current Release + +See [What’s New](#whats-new). + +### A.2. Previous Releases + +#### A.2.1. Changes in 2.3 Since 2.2 + +This section describes the changes between version 2.2 and version 2.3. +See [Change History](#change-history) for changes in previous versions. + +##### Connection Factory Changes + +Two additional connection factories are now provided. +See [Choosing a Connection Factory](#choosing-factory) for more information. + +##### `@RabbitListener` Changes + +You can now specify a reply content type. +See [Reply ContentType](#reply-content-type) for more information. + +##### Message Converter Changes + +The `Jackson2JMessageConverter` s can now deserialize abstract classes (including interfaces) if the `ObjectMapper` is configured with a custom deserializer. +See [Deserializing Abstract Classes](#jackson-abstract) for more information. + +##### Testing Changes + +A new annotation `@SpringRabbitTest` is provided to automatically configure some infrastructure beans for when you are not using `SpringBootTest`. +See [@SpringRabbitTest](#spring-rabbit-test) for more information. + +##### RabbitTemplate Changes + +The template’s `ReturnCallback` has been refactored as `ReturnsCallback` for simpler use in lambda expressions. +See [Correlated Publisher Confirms and Returns](#template-confirms) for more information. + +When using returns and correlated confirms, the `CorrelationData` now requires a unique `id` property. +See [Correlated Publisher Confirms and Returns](#template-confirms) for more information. + +When using direct reply-to, you can now configure the template such that the server does not need to return correlation data with the reply. +See [RabbitMQ Direct reply-to](#direct-reply-to) for more information. + +##### Listener Container Changes + +A new listener container property `consumeDelay` is now available; it is helpful when using the [RabbitMQ Sharding Plugin](https://github.com/rabbitmq/rabbitmq-sharding). + +The default `JavaLangErrorHandler` now calls `System.exit(99)`. +To revert to the previous behavior (do nothing), add a no-op handler. + +The containers now support the `globalQos` property to apply the `prefetchCount` globally for the channel rather than for each consumer on the channel. + +See [Message Listener Container Configuration](#containerAttributes) for more information. + +##### MessagePostProcessor Changes + +The compressing `MessagePostProcessor` s now use a comma to separate multiple content encodings instead of a colon. +The decompressors can handle both formats but, if you produce messages with this version that are consumed by versions earlier than 2.2.12, you should configure the compressor to use the old delimiter. +See the IMPORTANT note in [Modifying Messages - Compression and More](#post-processing) for more information. + +##### Multiple Broker Support Improvements + +See [Multiple Broker (or Cluster) Support](#multi-rabbit) for more information. + +##### RepublishMessageRecoverer Changes + +A new subclass of this recoverer is not provided that supports publisher confirms. +See [Message Listeners and the Asynchronous Case](#async-listeners) for more information. + +#### A.2.2. Changes in 2.2 Since 2.1 + +This section describes the changes between version 2.1 and version 2.2. + +##### Package Changes + +The following classes/interfaces have been moved from `org.springframework.amqp.rabbit.core.support` to `org.springframework.amqp.rabbit.batch`: + +* `BatchingStrategy` + +* `MessageBatch` + +* `SimpleBatchingStrategy` + +In addition, `ListenerExecutionFailedException` has been moved from `org.springframework.amqp.rabbit.listener.exception` to `org.springframework.amqp.rabbit.support`. + +##### Dependency Changes + +JUnit (4) is now an optional dependency and will no longer appear as a transitive dependency. + +The `spring-rabbit-junit` module is now a **compile** dependency in the `spring-rabbit-test` module for a better target application development experience when with only a single `spring-rabbit-test` we get the full stack of testing utilities for AMQP components. + +##### "Breaking" API Changes + +the JUnit (5) `RabbitAvailableCondition.getBrokerRunning()` now returns a `BrokerRunningSupport` instance instead of a `BrokerRunning`, which depends on JUnit 4. +It has the same API so it’s just a matter of changing the class name of any references. +See [JUnit5 Conditions](#junit5-conditions) for more information. + +##### ListenerContainer Changes + +Messages with fatal exceptions are now rejected and NOT requeued, by default, even if the acknowledge mode is manual. +See [Exception Handling](#exception-handling) for more information. + +Listener performance can now be monitored using Micrometer `Timer` s. +See [Monitoring Listener Performance](#micrometer) for more information. + +##### @RabbitListener Changes + +You can now configure an `executor` on each listener, overriding the factory configuration, to more easily identify threads associated with the listener. +You can now override the container factory’s `acknowledgeMode` property with the annotation’s `ackMode` property. +See [overriding container factory properties](#listener-property-overrides) for more information. + +When using [batching](#receiving-batch), `@RabbitListener` methods can now receive a complete batch of messages in one call instead of getting them one-at-a-time. + +When receiving batched messages one-at-a-time, the last message has the `isLastInBatch` message property set to true. + +In addition, received batched messages now contain the `amqp_batchSize` header. + +Listeners can also consume batches created in the `SimpleMessageListenerContainer`, even if the batch is not created by the producer. +See [Choosing a Container](#choose-container) for more information. + +Spring Data Projection interfaces are now supported by the `Jackson2JsonMessageConverter`. +See [Using Spring Data Projection Interfaces](#data-projection) for more information. + +The `Jackson2JsonMessageConverter` now assumes the content is JSON if there is no `contentType` property, or it is the default (`application/octet-string`). +See [Converting from a `Message`](#Jackson2JsonMessageConverter-from-message) for more information. + +Similarly. the `Jackson2XmlMessageConverter` now assumes the content is XML if there is no `contentType` property, or it is the default (`application/octet-string`). +See [`Jackson2XmlMessageConverter`](#jackson2xml) for more information. + +When a `@RabbitListener` method returns a result, the bean and `Method` are now available in the reply message properties. +This allows configuration of a `beforeSendReplyMessagePostProcessor` to, for example, set a header in the reply to indicate which method was invoked on the server. +See [Reply Management](#async-annotation-driven-reply) for more information. + +You can now configure a `ReplyPostProcessor` to make modifications to a reply message before it is sent. +See [Reply Management](#async-annotation-driven-reply) for more information. + +##### AMQP Logging Appenders Changes + +The Log4J and Logback `AmqpAppender` s now support a `verifyHostname` SSL option. + +Also these appenders now can be configured to not add MDC entries as headers. +The `addMdcAsHeaders` boolean option has been introduces to configure such a behavior. + +The appenders now support the `SaslConfig` property. + +See [Logging Subsystem AMQP Appenders](#logging) for more information. + +##### MessageListenerAdapter Changes + +The `MessageListenerAdapter` provides now a new `buildListenerArguments(Object, Channel, Message)` method to build an array of arguments to be passed into target listener and an old one is deprecated. +See [`MessageListenerAdapter`](#message-listener-adapter) for more information. + +##### Exchange/Queue Declaration Changes + +The `ExchangeBuilder` and `QueueBuilder` fluent APIs used to create `Exchange` and `Queue` objects for declaration by `RabbitAdmin` now support "well known" arguments. +See [Builder API for Queues and Exchanges](#builder-api) for more information. + +The `RabbitAdmin` has a new property `explicitDeclarationsOnly`. +See [Conditional Declaration](#conditional-declaration) for more information. + +##### Connection Factory Changes + +The `CachingConnectionFactory` has a new property `shuffleAddresses`. +When providing a list of broker node addresses, the list will be shuffled before creating a connection so that the order in which the connections are attempted is random. +See [Connecting to a Cluster](#cluster) for more information. + +When using Publisher confirms and returns, the callbacks are now invoked on the connection factory’s `executor`. +This avoids a possible deadlock in the `amqp-clients` library if you perform rabbit operations from within the callback. +See [Correlated Publisher Confirms and Returns](#template-confirms) for more information. + +Also, the publisher confirm type is now specified with the `ConfirmType` enum instead of the two mutually exclusive setter methods. + +The `RabbitConnectionFactoryBean` now uses TLS 1.2 by default when SSL is enabled. +See [`RabbitConnectionFactoryBean` and Configuring SSL](#rabbitconnectionfactorybean-configuring-ssl) for more information. + +##### New MessagePostProcessor Classes + +Classes `DeflaterPostProcessor` and `InflaterPostProcessor` were added to support compression and decompression, respectively, when the message content-encoding is set to `deflate`. + +##### Other Changes + +The `Declarables` object (for declaring multiple queues, exchanges, bindings) now has a filtered getter for each type. +See [Declaring Collections of Exchanges, Queues, and Bindings](#collection-declaration) for more information. + +You can now customize each `Declarable` bean before the `RabbitAdmin` processes the declaration thereof. +See [Automatic Declaration of Exchanges, Queues, and Bindings](#automatic-declaration) for more information. + +`singleActiveConsumer()` has been added to the `QueueBuilder` to set the `x-single-active-consumer` queue argument. +See [Builder API for Queues and Exchanges](#builder-api) for more information. + +Outbound headers with values of type `Class` are now mapped using `getName()` instead of `toString()`. +See [Message Properties Converters](#message-properties-converters) for more information. + +Recovery of failed producer-created batches is now supported. +See [Retry with Batch Listeners](#batch-retry) for more information. + +#### A.2.3. Changes in 2.1 Since 2.0 + +##### AMQP Client library + +Spring AMQP now uses the 5.4.x version of the `amqp-client` library provided by the RabbitMQ team. +This client has auto-recovery configured by default. +See [RabbitMQ Automatic Connection/Topology recovery](#auto-recovery). + +| |As of version 4.0, the client enables automatic recovery by default.
While compatible with this feature, Spring AMQP has its own recovery mechanisms and the client recovery feature generally is not needed.
We recommend disabling `amqp-client` automatic recovery, to avoid getting `AutoRecoverConnectionNotCurrentlyOpenException` instances when the broker is available but the connection has not yet recovered.
Starting with version 1.7.1, Spring AMQP disables it unless you explicitly create your own RabbitMQ connection factory and provide it to the `CachingConnectionFactory`.
RabbitMQ `ConnectionFactory` instances created by the `RabbitConnectionFactoryBean` also have the option disabled by default.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Package Changes + +Certain classes have moved to different packages. +Most are internal classes and do not affect user applications. +Two exceptions are `ChannelAwareMessageListener` and `RabbitListenerErrorHandler`. +These interfaces are now in `org.springframework.amqp.rabbit.listener.api`. + +##### Publisher Confirms Changes + +Channels enabled for publisher confirmations are not returned to the cache while there are outstanding confirmations. +See [Correlated Publisher Confirms and Returns](#template-confirms) for more information. + +##### Listener Container Factory Improvements + +You can now use the listener container factories to create any listener container, not only those for use with `@RabbitListener` annotations or the `@RabbitListenerEndpointRegistry`. +See [Using Container Factories](#using-container-factories) for more information. + +`ChannelAwareMessageListener` now inherits from `MessageListener`. + +##### Broker Event Listener + +A `BrokerEventListener` is introduced to publish selected broker events as `ApplicationEvent` instances. +See [Broker Event Listener](#broker-events) for more information. + +##### RabbitAdmin Changes + +The `RabbitAdmin` discovers beans of type `Declarables` (which is a container for `Declarable` - `Queue`, `Exchange`, and `Binding` objects) and declare the contained objects on the broker. +Users are discouraged from using the old mechanism of declaring `>` (and others) and should use `Declarables` beans instead. +By default, the old mechanism is disabled. +See [Declaring Collections of Exchanges, Queues, and Bindings](#collection-declaration) for more information. + +`AnonymousQueue` instances are now declared with `x-queue-master-locator` set to `client-local` by default, to ensure the queues are created on the node the application is connected to. +See [Configuring the Broker](#broker-configuration) for more information. + +##### RabbitTemplate Changes + +You can now configure the `RabbitTemplate` with the `noLocalReplyConsumer` option to control a `noLocal` flag for reply consumers in the `sendAndReceive()` operations. +See [Request/Reply Messaging](#request-reply) for more information. + +`CorrelationData` for publisher confirmations now has a `ListenableFuture`, which you can use to get the acknowledgment instead of using a callback. +When returns and confirmations are enabled, the correlation data, if provided, is populated with the returned message. +See [Correlated Publisher Confirms and Returns](#template-confirms) for more information. + +A method called `replyTimedOut` is now provided to notify subclasses that a reply has timed out, allowing for any state cleanup. +See [Reply Timeout](#reply-timeout) for more information. + +You can now specify an `ErrorHandler` to be invoked when using request/reply with a `DirectReplyToMessageListenerContainer` (the default) when exceptions occur when replies are delivered (for example, late replies). +See `setReplyErrorHandler` on the `RabbitTemplate`. +(Also since 2.0.11). + +##### Message Conversion + +We introduced a new `Jackson2XmlMessageConverter` to support converting messages from and to XML format. +See [`Jackson2XmlMessageConverter`](#jackson2xml) for more information. + +##### Management REST API + +The `RabbitManagementTemplate` is now deprecated in favor of the direct `com.rabbitmq.http.client.Client` (or `com.rabbitmq.http.client.ReactorNettyClient`) usage. +See [RabbitMQ REST API](#management-rest-api) for more information. + +##### `@RabbitListener` Changes + +The listener container factory can now be configured with a `RetryTemplate` and, optionally, a `RecoveryCallback` used when sending replies. +See [Enable Listener Endpoint Annotations](#async-annotation-driven-enable) for more information. + +##### Async `@RabbitListener` Return + +`@RabbitListener` methods can now return `ListenableFuture` or `Mono`. +See [Asynchronous `@RabbitListener` Return Types](#async-returns) for more information. + +##### Connection Factory Bean Changes + +By default, the `RabbitConnectionFactoryBean` now calls `enableHostnameVerification()`. +To revert to the previous behavior, set the `enableHostnameVerification` property to `false`. + +##### Connection Factory Changes + +The `CachingConnectionFactory` now unconditionally disables auto-recovery in the underlying RabbitMQ `ConnectionFactory`, even if a pre-configured instance is provided in a constructor. +While steps have been taken to make Spring AMQP compatible with auto recovery, certain corner cases have arisen where issues remain. +Spring AMQP has had its own recovery mechanism since 1.0.0 and does not need to use the recovery provided by the client. +While it is still possible to enable the feature (using `cachingConnectionFactory.getRabbitConnectionFactory()` `.setAutomaticRecoveryEnabled()`) after the `CachingConnectionFactory` is constructed, **we strongly recommend that you not do so**. +We recommend that you use a separate RabbitMQ `ConnectionFactory` if you need auto recovery connections when using the client factory directly (rather than using Spring AMQP components). + +##### Listener Container Changes + +The default `ConditionalRejectingErrorHandler` now completely discards messages that cause fatal errors if an `x-death` header is present. +See [Exception Handling](#exception-handling) for more information. + +##### Immediate requeue + +A new `ImmediateRequeueAmqpException` is introduced to notify a listener container that the message has to be re-queued. +To use this feature, a new `ImmediateRequeueMessageRecoverer` implementation is added. + +See [Message Listeners and the Asynchronous Case](#async-listeners) for more information. + +#### A.2.4. Changes in 2.0 Since 1.7 + +##### Using `CachingConnectionFactory` + +Starting with version 2.0.2, you can configure the `RabbitTemplate` to use a different connection to that used by listener containers. +This change avoids deadlocked consumers when producers are blocked for any reason. +See [Using a Separate Connection](#separate-connection) for more information. + +##### AMQP Client library + +Spring AMQP now uses the new 5.0.x version of the `amqp-client` library provided by the RabbitMQ team. +This client has auto recovery configured by default. +See [RabbitMQ Automatic Connection/Topology recovery](#auto-recovery). + +| |As of version 4.0, the client enables automatic recovery by default.
While compatible with this feature, Spring AMQP has its own recovery mechanisms, and the client recovery feature generally is not needed.
We recommend that you disable `amqp-client` automatic recovery, to avoid getting `AutoRecoverConnectionNotCurrentlyOpenException` instances when the broker is available but the connection has not yet recovered.
Starting with version 1.7.1, Spring AMQP disables it unless you explicitly create your own RabbitMQ connection factory and provide it to the `CachingConnectionFactory`.
RabbitMQ `ConnectionFactory` instances created by the `RabbitConnectionFactoryBean` also have the option disabled by default.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### General Changes + +The `ExchangeBuilder` now builds durable exchanges by default. +The `@Exchange` annotation used within a `@QeueueBinding` also declares durable exchanges by default. +The `@Queue` annotation used within a `@RabbitListener` by default declares durable queues if named and non-durable if anonymous. +See [Builder API for Queues and Exchanges](#builder-api) and [Annotation-driven Listener Endpoints](#async-annotation-driven) for more information. + +##### Deleted Classes + +`UniquelyNameQueue` is no longer provided. +It is unusual to create a durable non-auto-delete queue with a unique name. +This class has been deleted. +If you require its functionality, use `new Queue(UUID.randomUUID().toString())`. + +##### New Listener Container + +The `DirectMessageListenerContainer` has been added alongside the existing `SimpleMessageListenerContainer`. +See [Choosing a Container](#choose-container) and [Message Listener Container Configuration](#containerAttributes) for information about choosing which container to use as well as how to configure them. + +##### Log4j Appender + +This appender is no longer available due to the end-of-life of log4j. +See [Logging Subsystem AMQP Appenders](#logging) for information about the available log appenders. + +##### `RabbitTemplate` Changes + +| |Previously, a non-transactional `RabbitTemplate` participated in an existing transaction if it ran on a transactional listener container thread.
This was a serious bug.
However, users might have relied on this behavior.
Starting with version 1.6.2, you must set the `channelTransacted` boolean on the template for it to participate in the container transaction.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The `RabbitTemplate` now uses a `DirectReplyToMessageListenerContainer` (by default) instead of creating a new consumer for each request. +See [RabbitMQ Direct reply-to](#direct-reply-to) for more information. + +The `AsyncRabbitTemplate` now supports direct reply-to. +See [Async Rabbit Template](#async-template) for more information. + +The `RabbitTemplate` and `AsyncRabbitTemplate` now have `receiveAndConvert` and `convertSendAndReceiveAsType` methods that take a `ParameterizedTypeReference` argument, letting the caller specify the type to which to convert the result. +This is particularly useful for complex types or when type information is not conveyed in message headers. +It requires a `SmartMessageConverter` such as the `Jackson2JsonMessageConverter`. +See [Receiving Messages](#receiving-messages), [Request/Reply Messaging](#request-reply), [Async Rabbit Template](#async-template), and [Converting From a `Message` With `RabbitTemplate`](#json-complex) for more information. + +You can now use a `RabbitTemplate` to perform multiple operations on a dedicated channel. +See [Scoped Operations](#scoped-operations) for more information. + +##### Listener Adapter + +A convenient `FunctionalInterface` is available for using lambdas with the `MessageListenerAdapter`. +See [`MessageListenerAdapter`](#message-listener-adapter) for more information. + +##### Listener Container Changes + +###### Prefetch Default Value + +The prefetch default value used to be 1, which could lead to under-utilization of efficient consumers. +The default prefetch value is now 250, which should keep consumers busy in most common scenarios and, +thus, improve throughput. + +| |There are scenarios where the prefetch value should
be low — for example, with large messages, especially if the processing is slow (messages could add up
to a large amount of memory in the client process), and if strict message ordering is necessary
(the prefetch value should be set back to 1 in this case).
Also, with low-volume messaging and multiple consumers (including concurrency within a single listener container instance), you may wish to reduce the prefetch to get a more even distribution of messages across consumers.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +For more background about prefetch, see this post about [consumer utilization in RabbitMQ](https://www.rabbitmq.com/blog/2014/04/14/finding-bottlenecks-with-rabbitmq-3-3/)and this post about [queuing theory](https://www.rabbitmq.com/blog/2012/05/11/some-queuing-theory-throughput-latency-and-bandwidth/). + +###### Message Count + +Previously, `MessageProperties.getMessageCount()` returned `0` for messages emitted by the container. +This property applies only when you use `basicGet` (for example, from `RabbitTemplate.receive()` methods) and is now initialized to `null` for container messages. + +###### Transaction Rollback Behavior + +Message re-queue on transaction rollback is now consistent, regardless of whether or not a transaction manager is configured. +See [A note on Rollback of Received Messages](#transaction-rollback) for more information. + +###### Shutdown Behavior + +If the container threads do not respond to a shutdown within `shutdownTimeout`, the channels are forced closed by default. +See [Message Listener Container Configuration](#containerAttributes) for more information. + +###### After Receive Message Post Processors + +If a `MessagePostProcessor` in the `afterReceiveMessagePostProcessors` property returns `null`, the message is discarded (and acknowledged if appropriate). + +##### Connection Factory Changes + +The connection and channel listener interfaces now provide a mechanism to obtain information about exceptions. +See [Connection and Channel Listeners](#connection-channel-listeners) and [Publishing is Asynchronous — How to Detect Successes and Failures](#publishing-is-async) for more information. + +A new `ConnectionNameStrategy` is now provided to populate the application-specific identification of the target RabbitMQ connection from the `AbstractConnectionFactory`. +See [Connection and Resource Management](#connections) for more information. + +##### Retry Changes + +The `MissingMessageIdAdvice` is no longer provided. +Its functionality is now built-in. +See [Failures in Synchronous Operations and Options for Retry](#retry) for more information. + +##### Anonymous Queue Naming + +By default, `AnonymousQueues` are now named with the default `Base64UrlNamingStrategy` instead of a simple `UUID` string. +See [`AnonymousQueue`](#anonymous-queue) for more information. + +##### `@RabbitListener` Changes + +You can now provide simple queue declarations (bound only to the default exchange) in `@RabbitListener` annotations. +See [Annotation-driven Listener Endpoints](#async-annotation-driven) for more information. + +You can now configure `@RabbitListener` annotations so that any exceptions are returned to the sender. +You can also configure a `RabbitListenerErrorHandler` to handle exceptions. +See [Handling Exceptions](#annotation-error-handling) for more information. + +You can now bind a queue with multiple routing keys when you use the `@QueueBinding` annotation. +Also `@QueueBinding.exchange()` now supports custom exchange types and declares durable exchanges by default. + +You can now set the `concurrency` of the listener container at the annotation level rather than having to configure a different container factory for different concurrency settings. + +You can now set the `autoStartup` property of the listener container at the annotation level, overriding the default setting in the container factory. + +You can now set after receive and before send (reply) `MessagePostProcessor` instances in the `RabbitListener` container factories. + +See [Annotation-driven Listener Endpoints](#async-annotation-driven) for more information. + +Starting with version 2.0.3, one of the `@RabbitHandler` annotations on a class-level `@RabbitListener` can be designated as the default. +See [Multi-method Listeners](#annotation-method-selection) for more information. + +##### Container Conditional Rollback + +When using an external transaction manager (such as JDBC), rule-based rollback is now supported when you provide the container with a transaction attribute. +It is also now more flexible when you use a transaction advice. +See [Conditional Rollback](#conditional-rollback) for more information. + +##### Remove Jackson 1.x support + +Deprecated in previous versions, Jackson `1.x` converters and related components have now been deleted. +You can use similar components based on Jackson 2.x. +See [Jackson2JsonMessageConverter](#json-message-converter) for more information. + +##### JSON Message Converter + +When the `*TypeId*` is set to `Hashtable` for an inbound JSON message, the default conversion type is now `LinkedHashMap`. +Previously, it was `Hashtable`. +To revert to a `Hashtable`, you can use `setDefaultMapType` on the `DefaultClassMapper`. + +##### XML Parsers + +When parsing `Queue` and `Exchange` XML components, the parsers no longer register the `name` attribute value as a bean alias if an `id` attribute is present. +See [A Note On the `id` and `name` Attributes](#note-id-name) for more information. + +##### Blocked Connection + +You can now inject the `com.rabbitmq.client.BlockedListener` into the `org.springframework.amqp.rabbit.connection.Connection` object. +Also, the `ConnectionBlockedEvent` and `ConnectionUnblockedEvent` events are emitted by the `ConnectionFactory` when the connection is blocked or unblocked by the Broker. + +See [Connection and Resource Management](#connections) for more information. + +#### A.2.5. Changes in 1.7 Since 1.6 + +##### AMQP Client library + +Spring AMQP now uses the new 4.0.x version of the `amqp-client` library provided by the RabbitMQ team. +This client has auto-recovery configured by default. +See [RabbitMQ Automatic Connection/Topology recovery](#auto-recovery). + +| |The 4.0.x client enables automatic recovery by default.
While compatible with this feature, Spring AMQP has its own recovery mechanisms, and the client recovery feature generally is not needed.
We recommend disabling `amqp-client` automatic recovery, to avoid getting `AutoRecoverConnectionNotCurrentlyOpenException` instances when the broker is available but the connection has not yet recovered.
Starting with version 1.7.1, Spring AMQP disables it unless you explicitly create your own RabbitMQ connection factory and provide it to the `CachingConnectionFactory`.
RabbitMQ `ConnectionFactory` instances created by the `RabbitConnectionFactoryBean` also have the option disabled by default.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Log4j 2 upgrade + +The minimum Log4j 2 version (for the `AmqpAppender`) is now `2.7`. +The framework is no longer compatible with previous versions. +See [Logging Subsystem AMQP Appenders](#logging) for more information. + +##### Logback Appender + +This appender no longer captures caller data (method, line number) by default. +You can re-enable it by setting the `includeCallerData` configuration option. +See [Logging Subsystem AMQP Appenders](#logging) for information about the available log appenders. + +##### Spring Retry Upgrade + +The minimum Spring Retry version is now `1.2`. +The framework is no longer compatible with previous versions. + +###### Shutdown Behavior + +You can now set `forceCloseChannel` to `true` so that, if the container threads do not respond to a shutdown within `shutdownTimeout`, the channels are forced closed, +causing any unacked messages to be re-queued. +See [Message Listener Container Configuration](#containerAttributes) for more information. + +##### FasterXML Jackson upgrade + +The minimum Jackson version is now `2.8`. +The framework is no longer compatible with previous versions. + +##### JUnit `@Rules` + +Rules that have previously been used internally by the framework have now been made available in a separate jar called `spring-rabbit-junit`. +See [JUnit4 `@Rules`](#junit-rules) for more information. + +##### Container Conditional Rollback + +When you use an external transaction manager (such as JDBC), rule-based rollback is now supported when you provide the container with a transaction attribute. +It is also now more flexible when you use a transaction advice. + +##### Connection Naming Strategy + +A new `ConnectionNameStrategy` is now provided to populate the application-specific identification of the target RabbitMQ connection from the `AbstractConnectionFactory`. +See [Connection and Resource Management](#connections) for more information. + +##### Listener Container Changes + +###### Transaction Rollback Behavior + +You can now configure message re-queue on transaction rollback to be consistent, regardless of whether or not a transaction manager is configured. +See [A note on Rollback of Received Messages](#transaction-rollback) for more information. + +#### A.2.6. Earlier Releases + +See [Previous Releases](#previous-whats-new) for changes in previous versions. + +#### A.2.7. Changes in 1.6 Since 1.5 + +##### Testing Support + +A new testing support library is now provided. +See [Testing Support](#testing) for more information. + +##### Builder + +Builders that provide a fluent API for configuring `Queue` and `Exchange` objects are now available. +See [Builder API for Queues and Exchanges](#builder-api) for more information. + +##### Namespace Changes + +###### Connection Factory + +You can now add a `thread-factory` to a connection factory bean declaration — for example, to name the threads +created by the `amqp-client` library. +See [Connection and Resource Management](#connections) for more information. + +When you use `CacheMode.CONNECTION`, you can now limit the total number of connections allowed. +See [Connection and Resource Management](#connections) for more information. + +###### Queue Definitions + +You can now provide a naming strategy for anonymous queues. +See [`AnonymousQueue`](#anonymous-queue) for more information. + +##### Listener Container Changes + +###### Idle Message Listener Detection + +You can now configure listener containers to publish `ApplicationEvent` instances when idle. +See [Detecting Idle Asynchronous Consumers](#idle-containers) for more information. + +###### Mismatched Queue Detection + +By default, when a listener container starts, if queues with mismatched properties or arguments are detected, +the container logs the exception but continues to listen. +The container now has a property called `mismatchedQueuesFatal`, which prevents the container (and context) from +starting if the problem is detected during startup. +It also stops the container if the problem is detected later, such as after recovering from a connection failure. +See [Message Listener Container Configuration](#containerAttributes) for more information. + +###### Listener Container Logging + +Now, listener container provides its `beanName` to the internal `SimpleAsyncTaskExecutor` as a `threadNamePrefix`. +It is useful for logs analysis. + +###### Default Error Handler + +The default error handler (`ConditionalRejectingErrorHandler`) now considers irrecoverable `@RabbitListener`exceptions as fatal. +See [Exception Handling](#exception-handling) for more information. + +##### `AutoDeclare` and `RabbitAdmin` Instances + +See [Message Listener Container Configuration](#containerAttributes) (`autoDeclare`) for some changes to the semantics of that option with respect to the use +of `RabbitAdmin` instances in the application context. + +##### `AmqpTemplate`: Receive with Timeout + +A number of new `receive()` methods with `timeout` have been introduced for the `AmqpTemplate`and its `RabbitTemplate` implementation. +See [Polling Consumer](#polling-consumer) for more information. + +##### Using `AsyncRabbitTemplate` + +A new `AsyncRabbitTemplate` has been introduced. +This template provides a number of send and receive methods, where the return value is a `ListenableFuture`, which can +be used later to obtain the result either synchronously or asynchronously. +See [Async Rabbit Template](#async-template) for more information. + +##### `RabbitTemplate` Changes + +1.4.1 introduced the ability to use [direct reply-to](https://www.rabbitmq.com/direct-reply-to.html) when the broker supports it. +It is more efficient than using a temporary queue for each reply. +This version lets you override this default behavior and use a temporary queue by setting the `useTemporaryReplyQueues` property to `true`. +See [RabbitMQ Direct reply-to](#direct-reply-to) for more information. + +The `RabbitTemplate` now supports a `user-id-expression` (`userIdExpression` when using Java configuration). +See [Validated User-ID RabbitMQ documentation](https://www.rabbitmq.com/validated-user-id.html) and [Validated User Id](#template-user-id) for more information. + +##### Message Properties + +###### Using `CorrelationId` + +The `correlationId` message property can now be a `String`. +See [Message Properties Converters](#message-properties-converters) for more information. + +###### Long String Headers + +Previously, the `DefaultMessagePropertiesConverter` “converted” headers longer than the long string limit (default 1024) +to a `DataInputStream` (actually, it referenced the `LongString` instance’s `DataInputStream`). +On output, this header was not converted (except to a String — for example, `[[email protected]](/cdn-cgi/l/email-protection)` by calling`toString()` on the stream). + +With this release, long `LongString` instances are now left as `LongString` instances by default. +You can access the contents by using the `getBytes[]`, `toString()`, or `getStream()` methods. +A large incoming `LongString` is now correctly “converted” on output too. + +See [Message Properties Converters](#message-properties-converters) for more information. + +###### Inbound Delivery Mode + +The `deliveryMode` property is no longer mapped to the `MessageProperties.deliveryMode`. +This change avoids unintended propagation if the the same `MessageProperties` object is used to send an outbound message. +Instead, the inbound `deliveryMode` header is mapped to `MessageProperties.receivedDeliveryMode`. + +See [Message Properties Converters](#message-properties-converters) for more information. + +When using annotated endpoints, the header is provided in the header named `AmqpHeaders.RECEIVED_DELIVERY_MODE`. + +See [Annotated Endpoint Method Signature](#async-annotation-driven-enable-signature) for more information. + +###### Inbound User ID + +The `user_id` property is no longer mapped to the `MessageProperties.userId`. +This change avoids unintended propagation if the the same `MessageProperties` object is used to send an outbound message. +Instead, the inbound `userId` header is mapped to `MessageProperties.receivedUserId`. + +See [Message Properties Converters](#message-properties-converters) for more information. + +When you use annotated endpoints, the header is provided in the header named `AmqpHeaders.RECEIVED_USER_ID`. + +See [Annotated Endpoint Method Signature](#async-annotation-driven-enable-signature) for more information. + +##### `RabbitAdmin` Changes + +###### Declaration Failures + +Previously, the `ignoreDeclarationFailures` flag took effect only for `IOException` on the channel (such as mis-matched +arguments). +It now takes effect for any exception (such as `TimeoutException`). +In addition, a `DeclarationExceptionEvent` is now published whenever a declaration fails. +The `RabbitAdmin` last declaration event is also available as a property `lastDeclarationExceptionEvent`. +See [Configuring the Broker](#broker-configuration) for more information. + +##### `@RabbitListener` Changes + +###### Multiple Containers for Each Bean + +When you use Java 8 or later, you can now add multiple `@RabbitListener` annotations to `@Bean` classes or +their methods. +When using Java 7 or earlier, you can use the `@RabbitListeners` container annotation to provide the same +functionality. +See [`@Repeatable` `@RabbitListener`](#repeatable-rabbit-listener) for more information. + +###### `@SendTo` SpEL Expressions + +`@SendTo` for routing replies with no `replyTo` property can now be SpEL expressions evaluated against the +request/reply. +See [Reply Management](#async-annotation-driven-reply) for more information. + +###### `@QueueBinding` Improvements + +You can now specify arguments for queues, exchanges, and bindings in `@QueueBinding` annotations. +Header exchanges are now supported by `@QueueBinding`. +See [Annotation-driven Listener Endpoints](#async-annotation-driven) for more information. + +##### Delayed Message Exchange + +Spring AMQP now has first class support for the RabbitMQ Delayed Message Exchange plugin. +See [Delayed Message Exchange](#delayed-message-exchange) for more information. + +##### Exchange Internal Flag + +Any `Exchange` definitions can now be marked as `internal`, and `RabbitAdmin` passes the value to the broker when +declaring the exchange. +See [Configuring the Broker](#broker-configuration) for more information. + +##### `CachingConnectionFactory` Changes + +###### `CachingConnectionFactory` Cache Statistics + +The `CachingConnectionFactory` now provides cache properties at runtime and over JMX. +See [Runtime Cache Properties](#runtime-cache-properties) for more information. + +###### Accessing the Underlying RabbitMQ Connection Factory + +A new getter has been added to provide access to the underlying factory. +You can use this getter, for example, to add custom connection properties. +See [Adding Custom Client Connection Properties](#custom-client-props) for more information. + +###### Channel Cache + +The default channel cache size has been increased from 1 to 25. +See [Connection and Resource Management](#connections) for more information. + +In addition, the `SimpleMessageListenerContainer` no longer adjusts the cache size to be at least as large as the number +of `concurrentConsumers` — this was superfluous, since the container consumer channels are never cached. + +##### Using `RabbitConnectionFactoryBean` + +The factory bean now exposes a property to add client connection properties to connections made by the resulting +factory. + +##### Java Deserialization + +You can now configure a “allowed list” of allowable classes when you use Java deserialization. +You should consider creating an allowed list if you accept messages with serialized java objects from +untrusted sources. +See [Java Deserialization](#java-deserialization) for more information. + +##### JSON `MessageConverter` + +Improvements to the JSON message converter now allow the consumption of messages that do not have type information +in message headers. +See [Message Conversion for Annotated Methods](#async-annotation-conversion) and [Jackson2JsonMessageConverter](#json-message-converter) for more information. + +##### Logging Appenders + +###### Log4j 2 + +A log4j 2 appender has been added, and the appenders can now be configured with an `addresses` property to connect +to a broker cluster. + +###### Client Connection Properties + +You can now add custom client connection properties to RabbitMQ connections. + +See [Logging Subsystem AMQP Appenders](#logging) for more information. + +#### A.2.8. Changes in 1.5 Since 1.4 + +##### `spring-erlang` Is No Longer Supported + +The `spring-erlang` jar is no longer included in the distribution. +Use [the RabbitMQ REST API](#management-rest-api) instead. + +##### `CachingConnectionFactory` Changes + +###### Empty Addresses Property in `CachingConnectionFactory` + +Previously, if the connection factory was configured with a host and port but an empty String was also supplied for`addresses`, the host and port were ignored. +Now, an empty `addresses` String is treated the same as a `null`, and the host and port are used. + +###### URI Constructor + +The `CachingConnectionFactory` has an additional constructor, with a `URI` parameter, to configure the broker connection. + +###### Connection Reset + +A new method called `resetConnection()` has been added to let users reset the connection (or connections). +You might use this, for example, to reconnect to the primary broker after failing over to the secondary broker. +This **does** impact in-process operations. +The existing `destroy()` method does exactly the same, but the new method has a less daunting name. + +##### Properties to Control Container Queue Declaration Behavior + +When the listener container consumers start, they attempt to passively declare the queues to ensure they are available +on the broker. +Previously, if these declarations failed (for example, because the queues didn’t exist) or when an HA queue was being +moved, the retry logic was fixed at three retry attempts at five-second intervals. +If the queues still do not exist, the behavior is controlled by the `missingQueuesFatal` property (default: `true`). +Also, for containers configured to listen from multiple queues, if only a subset of queues are available, the consumer +retried the missing queues on a fixed interval of 60 seconds. + +The `declarationRetries`, `failedDeclarationRetryInterval`, and `retryDeclarationInterval` properties are now configurable. +See [Message Listener Container Configuration](#containerAttributes) for more information. + +##### Class Package Change + +The `RabbitGatewaySupport` class has been moved from `o.s.amqp.rabbit.core.support` to `o.s.amqp.rabbit.core`. + +##### `DefaultMessagePropertiesConverter` Changes + +You can now configure the `DefaultMessagePropertiesConverter` to +determine the maximum length of a `LongString` that is converted +to a `String` rather than to a `DataInputStream`. +The converter has an alternative constructor that takes the value as a limit. +Previously, this limit was hard-coded at `1024` bytes. +(Also available in 1.4.4). + +##### `@RabbitListener` Improvements + +###### `@QueueBinding` for `@RabbitListener` + +The `bindings` attribute has been added to the `@RabbitListener` annotation as mutually exclusive with the `queues`attribute to allow the specification of the `queue`, its `exchange`, and `binding` for declaration by a `RabbitAdmin` on +the Broker. + +###### SpEL in `@SendTo` + +The default reply address (`@SendTo`) for a `@RabbitListener` can now be a SpEL expression. + +###### Multiple Queue Names through Properties + +You can now use a combination of SpEL and property placeholders to specify multiple queues for a listener. + +See [Annotation-driven Listener Endpoints](#async-annotation-driven) for more information. + +##### Automatic Exchange, Queue, and Binding Declaration + +You can now declare beans that define a collection of these entities, and the `RabbitAdmin` adds the +contents to the list of entities that it declares when a connection is established. +See [Declaring Collections of Exchanges, Queues, and Bindings](#collection-declaration) for more information. + +##### `RabbitTemplate` Changes + +###### `reply-address` Added + +The `reply-address` attribute has been added to the `` component as an alternative `reply-queue`. +See [Request/Reply Messaging](#request-reply) for more information. +(Also available in 1.4.4 as a setter on the `RabbitTemplate`). + +###### Blocking `receive` Methods + +The `RabbitTemplate` now supports blocking in `receive` and `convertAndReceive` methods. +See [Polling Consumer](#polling-consumer) for more information. + +###### Mandatory with `sendAndReceive` Methods + +When the `mandatory` flag is set when using the `sendAndReceive` and `convertSendAndReceive` methods, the calling thread +throws an `AmqpMessageReturnedException` if the request message cannot be deliverted. +See [Reply Timeout](#reply-timeout) for more information. + +###### Improper Reply Listener Configuration + +The framework tries to verify proper configuration of a reply listener container when using a named reply queue. + +See [Reply Listener Container](#reply-listener) for more information. + +##### `RabbitManagementTemplate` Added + +The `RabbitManagementTemplate` has been introduced to monitor and configure the RabbitMQ Broker by using the REST API provided by its [management plugin](https://www.rabbitmq.com/management.html). +See [RabbitMQ REST API](#management-rest-api) for more information. + +##### + +| |The `id` attribute on the `` element has been removed.
Starting with this release, the `id` on the `` child element is used alone to name the listener container bean created for each listener element.

Normal Spring bean name overrides are applied.
If a later `` is parsed with the same `id` as an existing bean, the new definition overrides the existing one.
Previously, bean names were composed from the `id` attributes of the `` and `` elements.

When migrating to this release, if you have `id` attributes on your `` elements, remove them and set the `id` on the child `` element instead.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +However, to support starting and stopping containers as a group, a new `group` attribute has been added. +When this attribute is defined, the containers created by this element are added to a bean with this name, of type `Collection`. +You can iterate over this group to start and stop containers. + +##### Class-Level `@RabbitListener` + +The `@RabbitListener` annotation can now be applied at the class level. +Together with the new `@RabbitHandler` method annotation, this lets you select the handler method based on payload type. +See [Multi-method Listeners](#annotation-method-selection) for more information. + +##### `SimpleMessageListenerContainer`: BackOff Support + +The `SimpleMessageListenerContainer` can now be supplied with a `BackOff` instance for `consumer` startup recovery. +See [Message Listener Container Configuration](#containerAttributes) for more information. + +##### Channel Close Logging + +A mechanism to control the log levels of channel closure has been introduced. +See [Logging Channel Close Events](#channel-close-logging). + +##### Application Events + +The `SimpleMessageListenerContainer` now emits application events when consumers fail. +See [Consumer Events](#consumer-events) for more information. + +##### Consumer Tag Configuration + +Previously, the consumer tags for asynchronous consumers were generated by the broker. +With this release, it is now possible to supply a naming strategy to the listener container. +See [Consumer Tags](#consumerTags). + +##### Using `MessageListenerAdapter` + +The `MessageListenerAdapter` now supports a map of queue names (or consumer tags) to method names, to determine +which delegate method to call based on the queue from which the message was received. + +##### `LocalizedQueueConnectionFactory` Added + +`LocalizedQueueConnectionFactory` is a new connection factory that connects to the node in a cluster where a mirrored queue actually resides. + +See [Queue Affinity and the `LocalizedQueueConnectionFactory`](#queue-affinity). + +##### Anonymous Queue Naming + +Starting with version 1.5.3, you can now control how `AnonymousQueue` names are generated. +See [`AnonymousQueue`](#anonymous-queue) for more information. + +#### A.2.9. Changes in 1.4 Since 1.3 + +##### `@RabbitListener` Annotation + +POJO listeners can be annotated with `@RabbitListener`, enabled by `@EnableRabbit` or ``. +Spring Framework 4.1 is required for this feature. +See [Annotation-driven Listener Endpoints](#async-annotation-driven) for more information. + +##### `RabbitMessagingTemplate` Added + +A new `RabbitMessagingTemplate` lets you interact with RabbitMQ by using `spring-messaging` `Message` instances. +Internally, it uses the `RabbitTemplate`, which you can configure as normal. +Spring Framework 4.1 is required for this feature. +See [Messaging Integration](#template-messaging) for more information. + +##### Listener Container `missingQueuesFatal` Attribute + +1.3.5 introduced the `missingQueuesFatal` property on the `SimpleMessageListenerContainer`. +This is now available on the listener container namespace element. +See [Message Listener Container Configuration](#containerAttributes). + +##### RabbitTemplate `ConfirmCallback` Interface + +The `confirm` method on this interface has an additional parameter called `cause`. +When available, this parameter contains the reason for a negative acknowledgement (nack). +See [Correlated Publisher Confirms and Returns](#template-confirms). + +##### `RabbitConnectionFactoryBean` Added + +`RabbitConnectionFactoryBean` creates the underlying RabbitMQ `ConnectionFactory` used by the `CachingConnectionFactory`. +This enables configuration of SSL options using Spring’s dependency injection. +See [Configuring the Underlying Client Connection Factory](#connection-factory). + +##### Using `CachingConnectionFactory` + +The `CachingConnectionFactory` now lets the `connectionTimeout` be set as a property or as an attribute in the namespace. +It sets the property on the underlying RabbitMQ `ConnectionFactory`. +See [Configuring the Underlying Client Connection Factory](#connection-factory). + +##### Log Appender + +The Logback `org.springframework.amqp.rabbit.logback.AmqpAppender` has been introduced. +It provides options similar to `org.springframework.amqp.rabbit.log4j.AmqpAppender`. +For more information, see the JavaDoc of these classes. + +The Log4j `AmqpAppender` now supports the `deliveryMode` property (`PERSISTENT` or `NON_PERSISTENT`, default: `PERSISTENT`). +Previously, all log4j messages were `PERSISTENT`. + +The appender also supports modification of the `Message` before sending — allowing, for example, the addition of custom headers. +Subclasses should override the `postProcessMessageBeforeSend()`. + +##### Listener Queues + +The listener container now, by default, redeclares any missing queues during startup. +A new `auto-declare` attribute has been added to the `` to prevent these re-declarations. +See [`auto-delete` Queues](#lc-auto-delete). + +##### `RabbitTemplate`: `mandatory` and `connectionFactorySelector` Expressions + +The `mandatoryExpression`, `sendConnectionFactorySelectorExpression`, and `receiveConnectionFactorySelectorExpression` SpEL Expression`s properties have been added to `RabbitTemplate`. +The `mandatoryExpression` is used to evaluate a `mandatory` boolean value against each request message when a `ReturnCallback` is in use. +See [Correlated Publisher Confirms and Returns](#template-confirms). +The `sendConnectionFactorySelectorExpression` and `receiveConnectionFactorySelectorExpression` are used when an `AbstractRoutingConnectionFactory` is provided, to determine the `lookupKey` for the target `ConnectionFactory` at runtime on each AMQP protocol interaction operation. +See [Routing Connection Factory](#routing-connection-factory). + +##### Listeners and the Routing Connection Factory + +You can configure a `SimpleMessageListenerContainer` with a routing connection factory to enable connection selection based on the queue names. +See [Routing Connection Factory](#routing-connection-factory). + +##### `RabbitTemplate`: `RecoveryCallback` Option + +The `recoveryCallback` property has been added for use in the `retryTemplate.execute()`. +See [Adding Retry Capabilities](#template-retry). + +##### `MessageConversionException` Change + +This exception is now a subclass of `AmqpException`. +Consider the following code: + +``` +try { + template.convertAndSend("thing1", "thing2", "cat"); +} +catch (AmqpException e) { + ... +} +catch (MessageConversionException e) { + ... +} +``` + +The second catch block is no longer reachable and needs to be moved above the catch-all `AmqpException` catch block. + +##### RabbitMQ 3.4 Compatibility + +Spring AMQP is now compatible with the RabbitMQ 3.4, including direct reply-to. +See [Compatibility](#compatibility) and [RabbitMQ Direct reply-to](#direct-reply-to) for more information. + +##### `ContentTypeDelegatingMessageConverter` Added + +The `ContentTypeDelegatingMessageConverter` has been introduced to select the `MessageConverter` to use, based on the `contentType` property in the `MessageProperties`. +See [Message Converters](#message-converters) for more information. + +#### A.2.10. Changes in 1.3 Since 1.2 + +##### Listener Concurrency + +The listener container now supports dynamic scaling of the number of consumers based on workload, or you can programmatically change the concurrency without stopping the container. +See [Listener Concurrency](#listener-concurrency). + +##### Listener Queues + +The listener container now permits the queues on which it listens to be modified at runtime. +Also, the container now starts if at least one of its configured queues is available for use. +See [Listener Container Queues](#listener-queues) + +This listener container now redeclares any auto-delete queues during startup. +See [`auto-delete` Queues](#lc-auto-delete). + +##### Consumer Priority + +The listener container now supports consumer arguments, letting the `x-priority` argument be set. +See [Consumer Priority](#consumer-priority). + +##### Exclusive Consumer + +You can now configure `SimpleMessageListenerContainer` with a single `exclusive` consumer, preventing other consumers from listening to the queue. +See [Exclusive Consumer](#exclusive-consumer). + +##### Rabbit Admin + +You can now have the broker generate the queue name, regardless of `durable`, `autoDelete`, and `exclusive` settings. +See [Configuring the Broker](#broker-configuration). + +##### Direct Exchange Binding + +Previously, omitting the `key` attribute from a `binding` element of a `direct-exchange` configuration caused the queue or exchange to be bound with an empty string as the routing key. +Now it is bound with the the name of the provided `Queue` or `Exchange`. +If you wish to bind with an empty string routing key, you need to specify `key=""`. + +##### `AmqpTemplate` Changes + +The `AmqpTemplate` now provides several synchronous `receiveAndReply` methods. +These are implemented by the `RabbitTemplate`. +For more information see [Receiving Messages](#receiving-messages). + +The `RabbitTemplate` now supports configuring a `RetryTemplate` to attempt retries (with optional back-off policy) for when the broker is not available. +For more information see [Adding Retry Capabilities](#template-retry). + +##### Caching Connection Factory + +You can now configure the caching connection factory to cache `Connection` instances and their `Channel` instances instead of using a single connection and caching only `Channel` instances. +See [Connection and Resource Management](#connections). + +##### Binding Arguments + +The `` of the `` now supports parsing of the `` sub-element. +You can now configure the `` of the `` with a `key/value` attribute pair (to match on a single header) or with a `` sub-element (allowing matching on multiple headers). +These options are mutually exclusive. +See [Headers Exchange](#headers-exchange). + +##### Routing Connection Factory + +A new `SimpleRoutingConnectionFactory` has been introduced. +It allows configuration of `ConnectionFactories` mapping, to determine the target `ConnectionFactory` to use at runtime. +See [Routing Connection Factory](#routing-connection-factory). + +##### `MessageBuilder` and `MessagePropertiesBuilder` + +“Fluent APIs” for building messages or message properties are now provided. +See [Message Builder API](#message-builder). + +##### `RetryInterceptorBuilder` Change + +A “Fluent API” for building listener container retry interceptors is now provided. +See [Failures in Synchronous Operations and Options for Retry](#retry). + +##### `RepublishMessageRecoverer` Added + +This new `MessageRecoverer` is provided to allow publishing a failed message to another queue (including stack trace information in the header) when retries are exhausted. +See [Message Listeners and the Asynchronous Case](#async-listeners). + +##### + +A default `ConditionalRejectingErrorHandler` has been added to the listener container. +This error handler detects fatal message conversion problems and instructs the container to reject the message to prevent the broker from continually redelivering the unconvertible message. +See [Exception Handling](#exception-handling). + +##### + +The `SimpleMessageListenerContainer` now has a property called `missingQueuesFatal` (default: `true`). +Previously, missing queues were always fatal. +See [Message Listener Container Configuration](#containerAttributes). + +#### A.2.11. Changes to 1.2 Since 1.1 + +##### RabbitMQ Version + +Spring AMQP now uses RabbitMQ 3.1.x by default (but retains compatibility with earlier versions). +Certain deprecations have been added for features no longer supported by RabbitMQ 3.1.x — federated exchanges and the `immediate` property on the `RabbitTemplate`. + +##### Rabbit Admin + +`RabbitAdmin` now provides an option to let exchange, queue, and binding declarations continue when a declaration fails. +Previously, all declarations stopped on a failure. +By setting `ignore-declaration-exceptions`, such exceptions are logged (at the `WARN` level), but further declarations continue. +An example where this might be useful is when a queue declaration fails because of a slightly different `ttl` setting that would normally stop other declarations from proceeding. + +`RabbitAdmin` now provides an additional method called `getQueueProperties()`. +You can use this determine if a queue exists on the broker (returns `null` for a non-existent queue). +In addition, it returns the current number of messages in the queue as well as the current number of consumers. + +##### Rabbit Template + +Previously, when the `…​sendAndReceive()` methods were used with a fixed reply queue, two custom headers were used for correlation data and to retain and restore reply queue information. +With this release, the standard message property (`correlationId`) is used by default, although you can specify a custom property to use instead. +In addition, nested `replyTo` information is now retained internally in the template, instead of using a custom header. + +The `immediate` property is deprecated. +You must not set this property when using RabbitMQ 3.0.x or greater. + +##### JSON Message Converters + +A Jackson 2.x `MessageConverter` is now provided, along with the existing converter that uses Jackson 1.x. + +##### Automatic Declaration of Queues and Other Items + +Previously, when declaring queues, exchanges and bindings, you could not define which connection factory was used for the declarations. +Each `RabbitAdmin` declared all components by using its connection. + +Starting with this release, you can now limit declarations to specific `RabbitAdmin` instances. +See [Conditional Declaration](#conditional-declaration). + +##### AMQP Remoting + +Facilities are now provided for using Spring remoting techniques, using AMQP as the transport for the RPC calls. +For more information see [Spring Remoting with AMQP](#remoting) + +##### Requested Heart Beats + +Several users have asked for the underlying client connection factory’s `requestedHeartBeats` property to be exposed on the Spring AMQP `CachingConnectionFactory`. +This is now available. +Previously, it was necessary to configure the AMQP client factory as a separate bean and provide a reference to it in the `CachingConnectionFactory`. + +#### A.2.12. Changes to 1.1 Since 1.0 + +##### General + +Spring-AMQP is now built with Gradle. + +Adds support for publisher confirms and returns. + +Adds support for HA queues and broker failover. + +Adds support for dead letter exchanges and dead letter queues. + +##### AMQP Log4j Appender + +Adds an option to support adding a message ID to logged messages. + +Adds an option to allow the specification of a `Charset` name to be used when converting `String` to `byte[]`. \ No newline at end of file diff --git a/docs/en/spring-for-graphql/READEME.md b/docs/en/spring-batch/README.md similarity index 100% rename from docs/en/spring-for-graphql/READEME.md rename to docs/en/spring-batch/README.md diff --git a/docs/en/spring-batch/appendix.md b/docs/en/spring-batch/appendix.md new file mode 100644 index 0000000000000000000000000000000000000000..10e84b5cc95996eff85bcf539d44c70e52435319 --- /dev/null +++ b/docs/en/spring-batch/appendix.md @@ -0,0 +1,48 @@ +## Appendix A: List of ItemReaders and ItemWriters + +### Item Readers + +| Item Reader | Description | +|----------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|AbstractItemCountingItemStreamItemReader| Abstract base class that provides basic
restart capabilities by counting the number of items returned from
an `ItemReader`. | +| AggregateItemReader |An `ItemReader` that delivers a list as its
item, storing up objects from the injected `ItemReader` until they
are ready to be packed out as a collection. This class must be used
as a wrapper for a custom `ItemReader` that can identify the record
boundaries. The custom reader should mark the beginning and end of
records by returning an `AggregateItem` which responds `true` to its
query methods `isHeader()` and `isFooter()`. Note that this reader
is not part of the library of readers provided by Spring Batch
but given as a sample in `spring-batch-samples`.| +| AmqpItemReader | Given a Spring `AmqpTemplate`, it provides
synchronous receive methods. The `receiveAndConvert()` method
lets you receive POJO objects. | +| KafkaItemReader | An `ItemReader` that reads messages from an Apache Kafka topic.
It can be configured to read messages from multiple partitions of the same topic.
This reader stores message offsets in the execution context to support restart capabilities. | +| FlatFileItemReader | Reads from a flat file. Includes `ItemStream`and `Skippable` functionality. See [`FlatFileItemReader`](readersAndWriters.html#flatFileItemReader). | +| HibernateCursorItemReader | Reads from a cursor based on an HQL query. See[`Cursor-based ItemReaders`](readersAndWriters.html#cursorBasedItemReaders). | +| HibernatePagingItemReader | Reads from a paginated HQL query | +| ItemReaderAdapter | Adapts any class to the`ItemReader` interface. | +| JdbcCursorItemReader | Reads from a database cursor via JDBC. See[`Cursor-based ItemReaders`](readersAndWriters.html#cursorBasedItemReaders). | +| JdbcPagingItemReader | Given an SQL statement, pages through the rows,
such that large datasets can be read without running out of
memory. | +| JmsItemReader | Given a Spring `JmsOperations` object and a JMS
Destination or destination name to which to send errors, provides items
received through the injected `JmsOperations#receive()`method. | +| JpaPagingItemReader | Given a JPQL statement, pages through the
rows, such that large datasets can be read without running out of
memory. | +| ListItemReader | Provides the items from a list, one at a
time. | +| MongoItemReader | Given a `MongoOperations` object and a JSON-based MongoDB
query, provides items received from the `MongoOperations#find()` method. | +| Neo4jItemReader | Given a `Neo4jOperations` object and the components of a
Cyhper query, items are returned as the result of the Neo4jOperations.query
method. | +| RepositoryItemReader | Given a Spring Data `PagingAndSortingRepository` object,
a `Sort`, and the name of method to execute, returns items provided by the
Spring Data repository implementation. | +| StoredProcedureItemReader | Reads from a database cursor resulting from the
execution of a database stored procedure. See [`StoredProcedureItemReader`](readersAndWriters.html#StoredProcedureItemReader) | +| StaxEventItemReader | Reads via StAX. see [`StaxEventItemReader`](readersAndWriters.html#StaxEventItemReader). | +| JsonItemReader | Reads items from a Json document. see [`JsonItemReader`](readersAndWriters.html#JsonItemReader). | + +### Item Writers + +| Item Writer | Description | +|--------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| AbstractItemStreamItemWriter | Abstract base class that combines the`ItemStream` and`ItemWriter` interfaces. | +| AmqpItemWriter | Given a Spring `AmqpTemplate`, it provides
for a synchronous `send` method. The `convertAndSend(Object)`method lets you send POJO objects. | +| CompositeItemWriter | Passes an item to the `write` method of each
in an injected `List` of `ItemWriter` objects. | +| FlatFileItemWriter | Writes to a flat file. Includes `ItemStream` and
Skippable functionality. See [`FlatFileItemWriter`](readersAndWriters.html#flatFileItemWriter). | +| GemfireItemWriter | Using a `GemfireOperations` object, items are either written
or removed from the Gemfire instance based on the configuration of the delete
flag. | +| HibernateItemWriter | This item writer is Hibernate-session aware
and handles some transaction-related work that a non-"hibernate-aware"
item writer would not need to know about and then delegates
to another item writer to do the actual writing. | +| ItemWriterAdapter | Adapts any class to the`ItemWriter` interface. | +| JdbcBatchItemWriter | Uses batching features from a`PreparedStatement`, if available, and can
take rudimentary steps to locate a failure during a`flush`. | +| JmsItemWriter | Using a `JmsOperations` object, items are written
to the default queue through the `JmsOperations#convertAndSend()` method. | +| JpaItemWriter | This item writer is JPA EntityManager-aware
and handles some transaction-related work that a non-"JPA-aware"`ItemWriter` would not need to know about and
then delegates to another writer to do the actual writing. | +| KafkaItemWriter |Using a `KafkaTemplate` object, items are written to the default topic through the`KafkaTemplate#sendDefault(Object, Object)` method using a `Converter` to map the key from the item.
A delete flag can also be configured to send delete events to the topic.| +| MimeMessageItemWriter | Using Spring’s `JavaMailSender`, items of type `MimeMessage`are sent as mail messages. | +| MongoItemWriter | Given a `MongoOperations` object, items are written
through the `MongoOperations.save(Object)` method. The actual write is delayed
until the last possible moment before the transaction commits. | +| Neo4jItemWriter | Given a `Neo4jOperations` object, items are persisted through the`save(Object)` method or deleted through the `delete(Object)` per the`ItemWriter’s` configuration | +|PropertyExtractingDelegatingItemWriter| Extends `AbstractMethodInvokingDelegator`creating arguments on the fly. Arguments are created by retrieving
the values from the fields in the item to be processed (through a`SpringBeanWrapper`), based on an injected array of field
names. | +| RepositoryItemWriter | Given a Spring Data `CrudRepository` implementation,
items are saved through the method specified in the configuration. | +| StaxEventItemWriter | Uses a `Marshaller` implementation to
convert each item to XML and then writes it to an XML file using
StAX. | +| JsonFileItemWriter | Uses a `JsonObjectMarshaller` implementation to
convert each item to Json and then writes it to an Json file. \ No newline at end of file diff --git a/docs/en/spring-batch/common-patterns.md b/docs/en/spring-batch/common-patterns.md new file mode 100644 index 0000000000000000000000000000000000000000..12e0f17d23d4ff2b75bf2c0f548da8879c09bee7 --- /dev/null +++ b/docs/en/spring-batch/common-patterns.md @@ -0,0 +1,703 @@ +# Common Batch Patterns + +## Common Batch Patterns + +XMLJavaBoth + +Some batch jobs can be assembled purely from off-the-shelf components in Spring Batch. +For instance, the `ItemReader` and `ItemWriter` implementations can be configured to +cover a wide range of scenarios. However, for the majority of cases, custom code must be +written. The main API entry points for application developers are the `Tasklet`, the`ItemReader`, the `ItemWriter`, and the various listener interfaces. Most simple batch +jobs can use off-the-shelf input from a Spring Batch `ItemReader`, but it is often the +case that there are custom concerns in the processing and writing that require developers +to implement an `ItemWriter` or `ItemProcessor`. + +In this chapter, we provide a few examples of common patterns in custom business logic. +These examples primarily feature the listener interfaces. It should be noted that an`ItemReader` or `ItemWriter` can implement a listener interface as well, if appropriate. + +### Logging Item Processing and Failures + +A common use case is the need for special handling of errors in a step, item by item, +perhaps logging to a special channel or inserting a record into a database. A +chunk-oriented `Step` (created from the step factory beans) lets users implement this use +case with a simple `ItemReadListener` for errors on `read` and an `ItemWriteListener` for +errors on `write`. The following code snippet illustrates a listener that logs both read +and write failures: + +``` +public class ItemFailureLoggerListener extends ItemListenerSupport { + + private static Log logger = LogFactory.getLog("item.error"); + + public void onReadError(Exception ex) { + logger.error("Encountered error on read", e); + } + + public void onWriteError(Exception ex, List items) { + logger.error("Encountered error on write", ex); + } +} +``` + +Having implemented this listener, it must be registered with a step. + +The following example shows how to register a listener with a step in XML: + +XML Configuration + +``` + +... + + + + + + +``` + +The following example shows how to register a listener with a step Java: + +Java Configuration + +``` +@Bean +public Step simpleStep() { + return this.stepBuilderFactory.get("simpleStep") + ... + .listener(new ItemFailureLoggerListener()) + .build(); +} +``` + +| |if your listener does anything in an `onError()` method, it must be inside
a transaction that is going to be rolled back. If you need to use a transactional
resource, such as a database, inside an `onError()` method, consider adding a declarative
transaction to that method (see Spring Core Reference Guide for details), and giving its
propagation attribute a value of `REQUIRES_NEW`.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### Stopping a Job Manually for Business Reasons + +Spring Batch provides a `stop()` method through the `JobOperator` interface, but this is +really for use by the operator rather than the application programmer. Sometimes, it is +more convenient or makes more sense to stop a job execution from within the business +logic. + +The simplest thing to do is to throw a `RuntimeException` (one that is neither retried +indefinitely nor skipped). For example, a custom exception type could be used, as shown +in the following example: + +``` +public class PoisonPillItemProcessor implements ItemProcessor { + + @Override + public T process(T item) throws Exception { + if (isPoisonPill(item)) { + throw new PoisonPillException("Poison pill detected: " + item); + } + return item; + } +} +``` + +Another simple way to stop a step from executing is to return `null` from the`ItemReader`, as shown in the following example: + +``` +public class EarlyCompletionItemReader implements ItemReader { + + private ItemReader delegate; + + public void setDelegate(ItemReader delegate) { ... } + + public T read() throws Exception { + T item = delegate.read(); + if (isEndItem(item)) { + return null; // end the step here + } + return item; + } + +} +``` + +The previous example actually relies on the fact that there is a default implementation +of the `CompletionPolicy` strategy that signals a complete batch when the item to be +processed is `null`. A more sophisticated completion policy could be implemented and +injected into the `Step` through the `SimpleStepFactoryBean`. + +The following example shows how to inject a completion policy into a step in XML: + +XML Configuration + +``` + + + + + + + +``` + +The following example shows how to inject a completion policy into a step in Java: + +Java Configuration + +``` +@Bean +public Step simpleStep() { + return this.stepBuilderFactory.get("simpleStep") + .chunk(new SpecialCompletionPolicy()) + .reader(reader()) + .writer(writer()) + .build(); +} +``` + +An alternative is to set a flag in the `StepExecution`, which is checked by the `Step`implementations in the framework in between item processing. To implement this +alternative, we need access to the current `StepExecution`, and this can be achieved by +implementing a `StepListener` and registering it with the `Step`. The following example +shows a listener that sets the flag: + +``` +public class CustomItemWriter extends ItemListenerSupport implements StepListener { + + private StepExecution stepExecution; + + public void beforeStep(StepExecution stepExecution) { + this.stepExecution = stepExecution; + } + + public void afterRead(Object item) { + if (isPoisonPill(item)) { + stepExecution.setTerminateOnly(); + } + } + +} +``` + +When the flag is set, the default behavior is for the step to throw a`JobInterruptedException`. This behavior can be controlled through the`StepInterruptionPolicy`. However, the only choice is to throw or not throw an exception, +so this is always an abnormal ending to a job. + +### Adding a Footer Record + +Often, when writing to flat files, a “footer” record must be appended to the end of the +file, after all processing has be completed. This can be achieved using the`FlatFileFooterCallback` interface provided by Spring Batch. The `FlatFileFooterCallback`(and its counterpart, the `FlatFileHeaderCallback`) are optional properties of the`FlatFileItemWriter` and can be added to an item writer. + +The following example shows how to use the `FlatFileHeaderCallback` and the`FlatFileFooterCallback` in XML: + +XML Configuration + +``` + + + + + + +``` + +The following example shows how to use the `FlatFileHeaderCallback` and the`FlatFileFooterCallback` in Java: + +Java Configuration + +``` +@Bean +public FlatFileItemWriter itemWriter(Resource outputResource) { + return new FlatFileItemWriterBuilder() + .name("itemWriter") + .resource(outputResource) + .lineAggregator(lineAggregator()) + .headerCallback(headerCallback()) + .footerCallback(footerCallback()) + .build(); +} +``` + +The footer callback interface has just one method that is called when the footer must be +written, as shown in the following interface definition: + +``` +public interface FlatFileFooterCallback { + + void writeFooter(Writer writer) throws IOException; + +} +``` + +#### Writing a Summary Footer + +A common requirement involving footer records is to aggregate information during the +output process and to append this information to the end of the file. This footer often +serves as a summarization of the file or provides a checksum. + +For example, if a batch job is writing `Trade` records to a flat file, and there is a +requirement that the total amount from all the `Trades` is placed in a footer, then the +following `ItemWriter` implementation can be used: + +``` +public class TradeItemWriter implements ItemWriter, + FlatFileFooterCallback { + + private ItemWriter delegate; + + private BigDecimal totalAmount = BigDecimal.ZERO; + + public void write(List items) throws Exception { + BigDecimal chunkTotal = BigDecimal.ZERO; + for (Trade trade : items) { + chunkTotal = chunkTotal.add(trade.getAmount()); + } + + delegate.write(items); + + // After successfully writing all items + totalAmount = totalAmount.add(chunkTotal); + } + + public void writeFooter(Writer writer) throws IOException { + writer.write("Total Amount Processed: " + totalAmount); + } + + public void setDelegate(ItemWriter delegate) {...} +} +``` + +This `TradeItemWriter` stores a `totalAmount` value that is increased with the `amount`from each `Trade` item written. After the last `Trade` is processed, the framework calls`writeFooter`, which puts the `totalAmount` into the file. Note that the `write` method +makes use of a temporary variable, `chunkTotal`, that stores the total of the`Trade` amounts in the chunk. This is done to ensure that, if a skip occurs in the`write` method, the `totalAmount` is left unchanged. It is only at the end of the `write`method, once we are guaranteed that no exceptions are thrown, that we update the`totalAmount`. + +In order for the `writeFooter` method to be called, the `TradeItemWriter` (which +implements `FlatFileFooterCallback`) must be wired into the `FlatFileItemWriter` as the`footerCallback`. + +The following example shows how to wire the `TradeItemWriter` in XML: + +XML Configuration + +``` + + + + + + + + + +``` + +The following example shows how to wire the `TradeItemWriter` in Java: + +Java Configuration + +``` +@Bean +public TradeItemWriter tradeItemWriter() { + TradeItemWriter itemWriter = new TradeItemWriter(); + + itemWriter.setDelegate(flatFileItemWriter(null)); + + return itemWriter; +} + +@Bean +public FlatFileItemWriter flatFileItemWriter(Resource outputResource) { + return new FlatFileItemWriterBuilder() + .name("itemWriter") + .resource(outputResource) + .lineAggregator(lineAggregator()) + .footerCallback(tradeItemWriter()) + .build(); +} +``` + +The way that the `TradeItemWriter` has been written so far functions correctly only if +the `Step` is not restartable. This is because the class is stateful (since it stores the`totalAmount`), but the `totalAmount` is not persisted to the database. Therefore, it +cannot be retrieved in the event of a restart. In order to make this class restartable, +the `ItemStream` interface should be implemented along with the methods `open` and`update`, as shown in the following example: + +``` +public void open(ExecutionContext executionContext) { + if (executionContext.containsKey("total.amount") { + totalAmount = (BigDecimal) executionContext.get("total.amount"); + } +} + +public void update(ExecutionContext executionContext) { + executionContext.put("total.amount", totalAmount); +} +``` + +The update method stores the most current version of `totalAmount` to the`ExecutionContext` just before that object is persisted to the database. The open method +retrieves any existing `totalAmount` from the `ExecutionContext` and uses it as the +starting point for processing, allowing the `TradeItemWriter` to pick up on restart where +it left off the previous time the `Step` was run. + +### Driving Query Based ItemReaders + +In the [chapter on readers and writers](readersAndWriters.html), database input using +paging was discussed. Many database vendors, such as DB2, have extremely pessimistic +locking strategies that can cause issues if the table being read also needs to be used by +other portions of the online application. Furthermore, opening cursors over extremely +large datasets can cause issues on databases from certain vendors. Therefore, many +projects prefer to use a 'Driving Query' approach to reading in data. This approach works +by iterating over keys, rather than the entire object that needs to be returned, as the +following image illustrates: + +![Driving Query Job](https://docs.spring.io/spring-batch/docs/current/reference/html/images/drivingQueryExample.png) + +Figure 1. Driving Query Job + +As you can see, the example shown in the preceding image uses the same 'FOO' table as was +used in the cursor-based example. However, rather than selecting the entire row, only the +IDs were selected in the SQL statement. So, rather than a `FOO` object being returned +from `read`, an `Integer` is returned. This number can then be used to query for the +'details', which is a complete `Foo` object, as shown in the following image: + +![Driving Query Example](https://docs.spring.io/spring-batch/docs/current/reference/html/images/drivingQueryJob.png) + +Figure 2. Driving Query Example + +An `ItemProcessor` should be used to transform the key obtained from the driving query +into a full `Foo` object. An existing DAO can be used to query for the full object based +on the key. + +### Multi-Line Records + +While it is usually the case with flat files that each record is confined to a single +line, it is common that a file might have records spanning multiple lines with multiple +formats. The following excerpt from a file shows an example of such an arrangement: + +``` +HEA;0013100345;2007-02-15 +NCU;Smith;Peter;;T;20014539;F +BAD;;Oak Street 31/A;;Small Town;00235;IL;US +FOT;2;2;267.34 +``` + +Everything between the line starting with 'HEA' and the line starting with 'FOT' is +considered one record. There are a few considerations that must be made in order to +handle this situation correctly: + +* Instead of reading one record at a time, the `ItemReader` must read every line of the + multi-line record as a group, so that it can be passed to the `ItemWriter` intact. + +* Each line type may need to be tokenized differently. + +Because a single record spans multiple lines and because we may not know how many lines +there are, the `ItemReader` must be careful to always read an entire record. In order to +do this, a custom `ItemReader` should be implemented as a wrapper for the`FlatFileItemReader`. + +The following example shows how to implement a custom `ItemReader` in XML: + +XML Configuration + +``` + + + + + + + + + + + + + +``` + +The following example shows how to implement a custom `ItemReader` in Java: + +Java Configuration + +``` +@Bean +public MultiLineTradeItemReader itemReader() { + MultiLineTradeItemReader itemReader = new MultiLineTradeItemReader(); + + itemReader.setDelegate(flatFileItemReader()); + + return itemReader; +} + +@Bean +public FlatFileItemReader flatFileItemReader() { + FlatFileItemReader reader = new FlatFileItemReaderBuilder<>() + .name("flatFileItemReader") + .resource(new ClassPathResource("data/iosample/input/multiLine.txt")) + .lineTokenizer(orderFileTokenizer()) + .fieldSetMapper(orderFieldSetMapper()) + .build(); + return reader; +} +``` + +To ensure that each line is tokenized properly, which is especially important for +fixed-length input, the `PatternMatchingCompositeLineTokenizer` can be used on the +delegate `FlatFileItemReader`. See[`FlatFileItemReader` in the Readers and +Writers chapter](readersAndWriters.html#flatFileItemReader) for more details. The delegate reader then uses a`PassThroughFieldSetMapper` to deliver a `FieldSet` for each line back to the wrapping`ItemReader`. + +The following example shows how to ensure that each line is properly tokenized in XML: + +XML Content + +``` + + + + + + + + + + +``` + +The following example shows how to ensure that each line is properly tokenized in Java: + +Java Content + +``` +@Bean +public PatternMatchingCompositeLineTokenizer orderFileTokenizer() { + PatternMatchingCompositeLineTokenizer tokenizer = + new PatternMatchingCompositeLineTokenizer(); + + Map tokenizers = new HashMap<>(4); + + tokenizers.put("HEA*", headerRecordTokenizer()); + tokenizers.put("FOT*", footerRecordTokenizer()); + tokenizers.put("NCU*", customerLineTokenizer()); + tokenizers.put("BAD*", billingAddressLineTokenizer()); + + tokenizer.setTokenizers(tokenizers); + + return tokenizer; +} +``` + +This wrapper has to be able to recognize the end of a record so that it can continually +call `read()` on its delegate until the end is reached. For each line that is read, the +wrapper should build up the item to be returned. Once the footer is reached, the item can +be returned for delivery to the `ItemProcessor` and `ItemWriter`, as shown in the +following example: + +``` +private FlatFileItemReader

delegate; + +public Trade read() throws Exception { + Trade t = null; + + for (FieldSet line = null; (line = this.delegate.read()) != null;) { + String prefix = line.readString(0); + if (prefix.equals("HEA")) { + t = new Trade(); // Record must start with header + } + else if (prefix.equals("NCU")) { + Assert.notNull(t, "No header was found."); + t.setLast(line.readString(1)); + t.setFirst(line.readString(2)); + ... + } + else if (prefix.equals("BAD")) { + Assert.notNull(t, "No header was found."); + t.setCity(line.readString(4)); + t.setState(line.readString(6)); + ... + } + else if (prefix.equals("FOT")) { + return t; // Record must end with footer + } + } + Assert.isNull(t, "No 'END' was found."); + return null; +} +``` + +### Executing System Commands + +Many batch jobs require that an external command be called from within the batch job. +Such a process could be kicked off separately by the scheduler, but the advantage of +common metadata about the run would be lost. Furthermore, a multi-step job would also +need to be split up into multiple jobs as well. + +Because the need is so common, Spring Batch provides a `Tasklet` implementation for +calling system commands. + +The following example shows how to call an external command in XML: + +XML Configuration + +``` + + + + + +``` + +The following example shows how to call an external command in Java: + +Java Configuration + +``` +@Bean +public SystemCommandTasklet tasklet() { + SystemCommandTasklet tasklet = new SystemCommandTasklet(); + + tasklet.setCommand("echo hello"); + tasklet.setTimeout(5000); + + return tasklet; +} +``` + +### Handling Step Completion When No Input is Found + +In many batch scenarios, finding no rows in a database or file to process is not +exceptional. The `Step` is simply considered to have found no work and completes with 0 +items read. All of the `ItemReader` implementations provided out of the box in Spring +Batch default to this approach. This can lead to some confusion if nothing is written out +even when input is present (which usually happens if a file was misnamed or some similar +issue arises). For this reason, the metadata itself should be inspected to determine how +much work the framework found to be processed. However, what if finding no input is +considered exceptional? In this case, programmatically checking the metadata for no items +processed and causing failure is the best solution. Because this is a common use case, +Spring Batch provides a listener with exactly this functionality, as shown in +the class definition for `NoWorkFoundStepExecutionListener`: + +``` +public class NoWorkFoundStepExecutionListener extends StepExecutionListenerSupport { + + public ExitStatus afterStep(StepExecution stepExecution) { + if (stepExecution.getReadCount() == 0) { + return ExitStatus.FAILED; + } + return null; + } + +} +``` + +The preceding `StepExecutionListener` inspects the `readCount` property of the`StepExecution` during the 'afterStep' phase to determine if no items were read. If that +is the case, an exit code `FAILED` is returned, indicating that the `Step` should fail. +Otherwise, `null` is returned, which does not affect the status of the `Step`. + +### Passing Data to Future Steps + +It is often useful to pass information from one step to another. This can be done through +the `ExecutionContext`. The catch is that there are two `ExecutionContexts`: one at the`Step` level and one at the `Job` level. The `Step` `ExecutionContext` remains only as +long as the step, while the `Job` `ExecutionContext` remains through the whole `Job`. On +the other hand, the `Step` `ExecutionContext` is updated every time the `Step` commits a +chunk, while the `Job` `ExecutionContext` is updated only at the end of each `Step`. + +The consequence of this separation is that all data must be placed in the `Step``ExecutionContext` while the `Step` is executing. Doing so ensures that the data is +stored properly while the `Step` runs. If data is stored to the `Job` `ExecutionContext`, +then it is not persisted during `Step` execution. If the `Step` fails, that data is lost. + +``` +public class SavingItemWriter implements ItemWriter { + private StepExecution stepExecution; + + public void write(List items) throws Exception { + // ... + + ExecutionContext stepContext = this.stepExecution.getExecutionContext(); + stepContext.put("someKey", someObject); + } + + @BeforeStep + public void saveStepExecution(StepExecution stepExecution) { + this.stepExecution = stepExecution; + } +} +``` + +To make the data available to future `Steps`, it must be “promoted” to the `Job``ExecutionContext` after the step has finished. Spring Batch provides the`ExecutionContextPromotionListener` for this purpose. The listener must be configured +with the keys related to the data in the `ExecutionContext` that must be promoted. It can +also, optionally, be configured with a list of exit code patterns for which the promotion +should occur (`COMPLETED` is the default). As with all listeners, it must be registered +on the `Step`. + +The following example shows how to promote a step to the `Job` `ExecutionContext` in XML: + +XML Configuration + +``` + + + + + + + + + + + + ... + + + + + + + someKey + + + +``` + +The following example shows how to promote a step to the `Job` `ExecutionContext` in Java: + +Java Configuration + +``` +@Bean +public Job job1() { + return this.jobBuilderFactory.get("job1") + .start(step1()) + .next(step1()) + .build(); +} + +@Bean +public Step step1() { + return this.stepBuilderFactory.get("step1") + .chunk(10) + .reader(reader()) + .writer(savingWriter()) + .listener(promotionListener()) + .build(); +} + +@Bean +public ExecutionContextPromotionListener promotionListener() { + ExecutionContextPromotionListener listener = new ExecutionContextPromotionListener(); + + listener.setKeys(new String[] {"someKey"}); + + return listener; +} +``` + +Finally, the saved values must be retrieved from the `Job` `ExecutionContext`, as shown +in the following example: + +``` +public class RetrievingItemWriter implements ItemWriter { + private Object someObject; + + public void write(List items) throws Exception { + // ... + } + + @BeforeStep + public void retrieveInterstepData(StepExecution stepExecution) { + JobExecution jobExecution = stepExecution.getJobExecution(); + ExecutionContext jobContext = jobExecution.getExecutionContext(); + this.someObject = jobContext.get("someKey"); + } +} +``` \ No newline at end of file diff --git a/docs/en/spring-batch/domain.md b/docs/en/spring-batch/domain.md new file mode 100644 index 0000000000000000000000000000000000000000..a424d19cf8a333c5ec076481396d71aa0e5b39d8 --- /dev/null +++ b/docs/en/spring-batch/domain.md @@ -0,0 +1,434 @@ +# The Domain Language of Batch + +## The Domain Language of Batch + +XMLJavaBoth + +To any experienced batch architect, the overall concepts of batch processing used in +Spring Batch should be familiar and comfortable. There are "Jobs" and "Steps" and +developer-supplied processing units called `ItemReader` and `ItemWriter`. However, +because of the Spring patterns, operations, templates, callbacks, and idioms, there are +opportunities for the following: + +* Significant improvement in adherence to a clear separation of concerns. + +* Clearly delineated architectural layers and services provided as interfaces. + +* Simple and default implementations that allow for quick adoption and ease of use + out-of-the-box. + +* Significantly enhanced extensibility. + +The following diagram is a simplified version of the batch reference architecture that +has been used for decades. It provides an overview of the components that make up the +domain language of batch processing. This architecture framework is a blueprint that has +been proven through decades of implementations on the last several generations of +platforms (COBOL/Mainframe, C/Unix, and now Java/anywhere). JCL and COBOL developers +are likely to be as comfortable with the concepts as C, C#, and Java developers. Spring +Batch provides a physical implementation of the layers, components, and technical +services commonly found in the robust, maintainable systems that are used to address the +creation of simple to complex batch applications, with the infrastructure and extensions +to address very complex processing needs. + +![Figure 2.1: Batch Stereotypes](https://docs.spring.io/spring-batch/docs/current/reference/html/images/spring-batch-reference-model.png) + +Figure 1. Batch Stereotypes + +The preceding diagram highlights the key concepts that make up the domain language of +Spring Batch. A Job has one to many steps, each of which has exactly one `ItemReader`, +one `ItemProcessor`, and one `ItemWriter`. A job needs to be launched (with`JobLauncher`), and metadata about the currently running process needs to be stored (in`JobRepository`). + +### Job + +This section describes stereotypes relating to the concept of a batch job. A `Job` is an +entity that encapsulates an entire batch process. As is common with other Spring +projects, a `Job` is wired together with either an XML configuration file or Java-based +configuration. This configuration may be referred to as the "job configuration". However,`Job` is just the top of an overall hierarchy, as shown in the following diagram: + +![Job Hierarchy](https://docs.spring.io/spring-batch/docs/current/reference/html/images/job-heirarchy.png) + +Figure 2. Job Hierarchy + +In Spring Batch, a `Job` is simply a container for `Step` instances. It combines multiple +steps that belong logically together in a flow and allows for configuration of properties +global to all steps, such as restartability. The job configuration contains: + +* The simple name of the job. + +* Definition and ordering of `Step` instances. + +* Whether or not the job is restartable. + +For those who use Java configuration, Spring Batch provides a default implementation of +the Job interface in the form of the `SimpleJob` class, which creates some standard +functionality on top of `Job`. When using java based configuration, a collection of +builders is made available for the instantiation of a `Job`, as shown in the following +example: + +``` +@Bean +public Job footballJob() { + return this.jobBuilderFactory.get("footballJob") + .start(playerLoad()) + .next(gameLoad()) + .next(playerSummarization()) + .build(); +} +``` + +For those who use XML configuration, Spring Batch provides a default implementation of the`Job` interface in the form of the `SimpleJob` class, which creates some standard +functionality on top of `Job`. However, the batch namespace abstracts away the need to +instantiate it directly. Instead, the `` element can be used, as shown in the +following example: + +``` + + + + + +``` + +#### JobInstance + +A `JobInstance` refers to the concept of a logical job run. Consider a batch job that +should be run once at the end of the day, such as the 'EndOfDay' `Job` from the preceding +diagram. There is one 'EndOfDay' job, but each individual run of the `Job` must be +tracked separately. In the case of this job, there is one logical `JobInstance` per day. +For example, there is a January 1st run, a January 2nd run, and so on. If the January 1st +run fails the first time and is run again the next day, it is still the January 1st run. +(Usually, this corresponds with the data it is processing as well, meaning the January +1st run processes data for January 1st). Therefore, each `JobInstance` can have multiple +executions (`JobExecution` is discussed in more detail later in this chapter), and only +one `JobInstance` corresponding to a particular `Job` and identifying `JobParameters` can +run at a given time. + +The definition of a `JobInstance` has absolutely no bearing on the data to be loaded. +It is entirely up to the `ItemReader` implementation to determine how data is loaded. For +example, in the EndOfDay scenario, there may be a column on the data that indicates the +'effective date' or 'schedule date' to which the data belongs. So, the January 1st run +would load only data from the 1st, and the January 2nd run would use only data from the +2nd. Because this determination is likely to be a business decision, it is left up to the`ItemReader` to decide. However, using the same `JobInstance` determines whether or not +the 'state' (that is, the `ExecutionContext`, which is discussed later in this chapter) +from previous executions is used. Using a new `JobInstance` means 'start from the +beginning', and using an existing instance generally means 'start from where you left +off'. + +#### JobParameters + +Having discussed `JobInstance` and how it differs from Job, the natural question to ask +is: "How is one `JobInstance` distinguished from another?" The answer is:`JobParameters`. A `JobParameters` object holds a set of parameters used to start a batch +job. They can be used for identification or even as reference data during the run, as +shown in the following image: + +![Job Parameters](https://docs.spring.io/spring-batch/docs/current/reference/html/images/job-stereotypes-parameters.png) + +Figure 3. Job Parameters + +In the preceding example, where there are two instances, one for January 1st, and another +for January 2nd, there is really only one `Job`, but it has two `JobParameter` objects: +one that was started with a job parameter of 01-01-2017 and another that was started with +a parameter of 01-02-2017. Thus, the contract can be defined as: `JobInstance` = `Job`+ identifying `JobParameters`. This allows a developer to effectively control how a`JobInstance` is defined, since they control what parameters are passed in. + +| |Not all job parameters are required to contribute to the identification of a`JobInstance`. By default, they do so. However, the framework also allows the submission
of a `Job` with parameters that do not contribute to the identity of a `JobInstance`.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### JobExecution + +A `JobExecution` refers to the technical concept of a single attempt to run a Job. An +execution may end in failure or success, but the `JobInstance` corresponding to a given +execution is not considered to be complete unless the execution completes successfully. +Using the EndOfDay `Job` described previously as an example, consider a `JobInstance` for +01-01-2017 that failed the first time it was run. If it is run again with the same +identifying job parameters as the first run (01-01-2017), a new `JobExecution` is +created. However, there is still only one `JobInstance`. + +A `Job` defines what a job is and how it is to be executed, and a `JobInstance` is a +purely organizational object to group executions together, primarily to enable correct +restart semantics. A `JobExecution`, however, is the primary storage mechanism for what +actually happened during a run and contains many more properties that must be controlled +and persisted, as shown in the following table: + +| Property | Definition | +|-----------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Status | A `BatchStatus` object that indicates the status of the execution. While running, it is`BatchStatus#STARTED`. If it fails, it is `BatchStatus#FAILED`. If it finishes
successfully, it is `BatchStatus#COMPLETED` | +| startTime | A `java.util.Date` representing the current system time when the execution was started.
This field is empty if the job has yet to start. | +| endTime | A `java.util.Date` representing the current system time when the execution finished,
regardless of whether or not it was successful. The field is empty if the job has yet to
finish. | +| exitStatus | The `ExitStatus`, indicating the result of the run. It is most important, because it
contains an exit code that is returned to the caller. See chapter 5 for more details. The
field is empty if the job has yet to finish. | +| createTime |A `java.util.Date` representing the current system time when the `JobExecution` was
first persisted. The job may not have been started yet (and thus has no start time), but
it always has a createTime, which is required by the framework for managing job level`ExecutionContexts`.| +| lastUpdated | A `java.util.Date` representing the last time a `JobExecution` was persisted. This field
is empty if the job has yet to start. | +|executionContext | The "property bag" containing any user data that needs to be persisted between
executions. | +|failureExceptions| The list of exceptions encountered during the execution of a `Job`. These can be useful
if more than one exception is encountered during the failure of a `Job`. | + +These properties are important because they are persisted and can be used to completely +determine the status of an execution. For example, if the EndOfDay job for 01-01 is +executed at 9:00 PM and fails at 9:30, the following entries are made in the batch +metadata tables: + +|JOB\_INST\_ID| JOB\_NAME | +|-------------|-----------| +| 1 |EndOfDayJob| + +|JOB\_EXECUTION\_ID|TYPE\_CD| KEY\_NAME |DATE\_VAL |IDENTIFYING| +|------------------|--------|-------------|----------|-----------| +| 1 | DATE |schedule.Date|2017-01-01| TRUE | + +|JOB\_EXEC\_ID|JOB\_INST\_ID| START\_TIME | END\_TIME |STATUS| +|-------------|-------------|----------------|----------------|------| +| 1 | 1 |2017-01-01 21:00|2017-01-01 21:30|FAILED| + +| |Column names may have been abbreviated or removed for the sake of clarity and
formatting.| +|---|---------------------------------------------------------------------------------------------| + +Now that the job has failed, assume that it took the entire night for the problem to be +determined, so that the 'batch window' is now closed. Further assuming that the window +starts at 9:00 PM, the job is kicked off again for 01-01, starting where it left off and +completing successfully at 9:30. Because it is now the next day, the 01-02 job must be +run as well, and it is kicked off just afterwards at 9:31 and completes in its normal one +hour time at 10:30. There is no requirement that one `JobInstance` be kicked off after +another, unless there is potential for the two jobs to attempt to access the same data, +causing issues with locking at the database level. It is entirely up to the scheduler to +determine when a `Job` should be run. Since they are separate `JobInstances`, Spring +Batch makes no attempt to stop them from being run concurrently. (Attempting to run the +same `JobInstance` while another is already running results in a`JobExecutionAlreadyRunningException` being thrown). There should now be an extra entry +in both the `JobInstance` and `JobParameters` tables and two extra entries in the`JobExecution` table, as shown in the following tables: + +|JOB\_INST\_ID| JOB\_NAME | +|-------------|-----------| +| 1 |EndOfDayJob| +| 2 |EndOfDayJob| + +|JOB\_EXECUTION\_ID|TYPE\_CD| KEY\_NAME | DATE\_VAL |IDENTIFYING| +|------------------|--------|-------------|-------------------|-----------| +| 1 | DATE |schedule.Date|2017-01-01 00:00:00| TRUE | +| 2 | DATE |schedule.Date|2017-01-01 00:00:00| TRUE | +| 3 | DATE |schedule.Date|2017-01-02 00:00:00| TRUE | + +|JOB\_EXEC\_ID|JOB\_INST\_ID| START\_TIME | END\_TIME | STATUS | +|-------------|-------------|----------------|----------------|---------| +| 1 | 1 |2017-01-01 21:00|2017-01-01 21:30| FAILED | +| 2 | 1 |2017-01-02 21:00|2017-01-02 21:30|COMPLETED| +| 3 | 2 |2017-01-02 21:31|2017-01-02 22:29|COMPLETED| + +| |Column names may have been abbreviated or removed for the sake of clarity and
formatting.| +|---|---------------------------------------------------------------------------------------------| + +### Step + +A `Step` is a domain object that encapsulates an independent, sequential phase of a batch +job. Therefore, every Job is composed entirely of one or more steps. A `Step` contains +all of the information necessary to define and control the actual batch processing. This +is a necessarily vague description because the contents of any given `Step` are at the +discretion of the developer writing a `Job`. A `Step` can be as simple or complex as the +developer desires. A simple `Step` might load data from a file into the database, +requiring little or no code (depending upon the implementations used). A more complex`Step` may have complicated business rules that are applied as part of the processing. As +with a `Job`, a `Step` has an individual `StepExecution` that correlates with a unique`JobExecution`, as shown in the following image: + +![Figure 2.1: Job Hierarchy With Steps](https://docs.spring.io/spring-batch/docs/current/reference/html/images/jobHeirarchyWithSteps.png) + +Figure 4. Job Hierarchy With Steps + +#### StepExecution + +A `StepExecution` represents a single attempt to execute a `Step`. A new `StepExecution`is created each time a `Step` is run, similar to `JobExecution`. However, if a step fails +to execute because the step before it fails, no execution is persisted for it. A`StepExecution` is created only when its `Step` is actually started. + +`Step` executions are represented by objects of the `StepExecution` class. Each execution +contains a reference to its corresponding step and `JobExecution` and transaction related +data, such as commit and rollback counts and start and end times. Additionally, each step +execution contains an `ExecutionContext`, which contains any data a developer needs to +have persisted across batch runs, such as statistics or state information needed to +restart. The following table lists the properties for `StepExecution`: + +| Property | Definition | +|----------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Status |A `BatchStatus` object that indicates the status of the execution. While running, the
status is `BatchStatus.STARTED`. If it fails, the status is `BatchStatus.FAILED`. If it
finishes successfully, the status is `BatchStatus.COMPLETED`.| +| startTime | A `java.util.Date` representing the current system time when the execution was started.
This field is empty if the step has yet to start. | +| endTime | A `java.util.Date` representing the current system time when the execution finished,
regardless of whether or not it was successful. This field is empty if the step has yet to
exit. | +| exitStatus | The `ExitStatus` indicating the result of the execution. It is most important, because
it contains an exit code that is returned to the caller. See chapter 5 for more details.
This field is empty if the job has yet to exit. | +|executionContext| The "property bag" containing any user data that needs to be persisted between
executions. | +| readCount | The number of items that have been successfully read. | +| writeCount | The number of items that have been successfully written. | +| commitCount | The number of transactions that have been committed for this execution. | +| rollbackCount | The number of times the business transaction controlled by the `Step` has been rolled
back. | +| readSkipCount | The number of times `read` has failed, resulting in a skipped item. | +|processSkipCount| The number of times `process` has failed, resulting in a skipped item. | +| filterCount | The number of items that have been 'filtered' by the `ItemProcessor`. | +| writeSkipCount | The number of times `write` has failed, resulting in a skipped item. | + +### ExecutionContext + +An `ExecutionContext` represents a collection of key/value pairs that are persisted and +controlled by the framework in order to allow developers a place to store persistent +state that is scoped to a `StepExecution` object or a `JobExecution` object. For those +familiar with Quartz, it is very similar to JobDataMap. The best usage example is to +facilitate restart. Using flat file input as an example, while processing individual +lines, the framework periodically persists the `ExecutionContext` at commit points. Doing +so allows the `ItemReader` to store its state in case a fatal error occurs during the run +or even if the power goes out. All that is needed is to put the current number of lines +read into the context, as shown in the following example, and the framework will do the +rest: + +``` +executionContext.putLong(getKey(LINES_READ_COUNT), reader.getPosition()); +``` + +Using the EndOfDay example from the `Job` Stereotypes section as an example, assume there +is one step, 'loadData', that loads a file into the database. After the first failed run, +the metadata tables would look like the following example: + +|JOB\_INST\_ID| JOB\_NAME | +|-------------|-----------| +| 1 |EndOfDayJob| + +|JOB\_INST\_ID|TYPE\_CD| KEY\_NAME |DATE\_VAL | +|-------------|--------|-------------|----------| +| 1 | DATE |schedule.Date|2017-01-01| + +|JOB\_EXEC\_ID|JOB\_INST\_ID| START\_TIME | END\_TIME |STATUS| +|-------------|-------------|----------------|----------------|------| +| 1 | 1 |2017-01-01 21:00|2017-01-01 21:30|FAILED| + +|STEP\_EXEC\_ID|JOB\_EXEC\_ID|STEP\_NAME| START\_TIME | END\_TIME |STATUS| +|--------------|-------------|----------|----------------|----------------|------| +| 1 | 1 | loadData |2017-01-01 21:00|2017-01-01 21:30|FAILED| + +|STEP\_EXEC\_ID| SHORT\_CONTEXT | +|--------------|-------------------| +| 1 |{piece.count=40321}| + +In the preceding case, the `Step` ran for 30 minutes and processed 40,321 'pieces', which +would represent lines in a file in this scenario. This value is updated just before each +commit by the framework and can contain multiple rows corresponding to entries within the`ExecutionContext`. Being notified before a commit requires one of the various`StepListener` implementations (or an `ItemStream`), which are discussed in more detail +later in this guide. As with the previous example, it is assumed that the `Job` is +restarted the next day. When it is restarted, the values from the `ExecutionContext` of +the last run are reconstituted from the database. When the `ItemReader` is opened, it can +check to see if it has any stored state in the context and initialize itself from there, +as shown in the following example: + +``` +if (executionContext.containsKey(getKey(LINES_READ_COUNT))) { + log.debug("Initializing for restart. Restart data is: " + executionContext); + + long lineCount = executionContext.getLong(getKey(LINES_READ_COUNT)); + + LineReader reader = getReader(); + + Object record = ""; + while (reader.getPosition() < lineCount && record != null) { + record = readLine(); + } +} +``` + +In this case, after the above code runs, the current line is 40,322, allowing the `Step`to start again from where it left off. The `ExecutionContext` can also be used for +statistics that need to be persisted about the run itself. For example, if a flat file +contains orders for processing that exist across multiple lines, it may be necessary to +store how many orders have been processed (which is much different from the number of +lines read), so that an email can be sent at the end of the `Step` with the total number +of orders processed in the body. The framework handles storing this for the developer, in +order to correctly scope it with an individual `JobInstance`. It can be very difficult to +know whether an existing `ExecutionContext` should be used or not. For example, using the +'EndOfDay' example from above, when the 01-01 run starts again for the second time, the +framework recognizes that it is the same `JobInstance` and on an individual `Step` basis, +pulls the `ExecutionContext` out of the database, and hands it (as part of the`StepExecution`) to the `Step` itself. Conversely, for the 01-02 run, the framework +recognizes that it is a different instance, so an empty context must be handed to the`Step`. There are many of these types of determinations that the framework makes for the +developer, to ensure the state is given to them at the correct time. It is also important +to note that exactly one `ExecutionContext` exists per `StepExecution` at any given time. +Clients of the `ExecutionContext` should be careful, because this creates a shared +keyspace. As a result, care should be taken when putting values in to ensure no data is +overwritten. However, the `Step` stores absolutely no data in the context, so there is no +way to adversely affect the framework. + +It is also important to note that there is at least one `ExecutionContext` per`JobExecution` and one for every `StepExecution`. For example, consider the following +code snippet: + +``` +ExecutionContext ecStep = stepExecution.getExecutionContext(); +ExecutionContext ecJob = jobExecution.getExecutionContext(); +//ecStep does not equal ecJob +``` + +As noted in the comment, `ecStep` does not equal `ecJob`. They are two different`ExecutionContexts`. The one scoped to the `Step` is saved at every commit point in the`Step`, whereas the one scoped to the Job is saved in between every `Step` execution. + +### JobRepository + +`JobRepository` is the persistence mechanism for all of the Stereotypes mentioned above. +It provides CRUD operations for `JobLauncher`, `Job`, and `Step` implementations. When a`Job` is first launched, a `JobExecution` is obtained from the repository, and, during +the course of execution, `StepExecution` and `JobExecution` implementations are persisted +by passing them to the repository. + +The Spring Batch XML namespace provides support for configuring a `JobRepository` instance +with the `` tag, as shown in the following example: + +``` + +``` + +When using Java configuration, the `@EnableBatchProcessing` annotation provides a`JobRepository` as one of the components automatically configured out of the box. + +### JobLauncher + +`JobLauncher` represents a simple interface for launching a `Job` with a given set of`JobParameters`, as shown in the following example: + +``` +public interface JobLauncher { + +public JobExecution run(Job job, JobParameters jobParameters) + throws JobExecutionAlreadyRunningException, JobRestartException, + JobInstanceAlreadyCompleteException, JobParametersInvalidException; +} +``` + +It is expected that implementations obtain a valid `JobExecution` from the`JobRepository` and execute the `Job`. + +### Item Reader + +`ItemReader` is an abstraction that represents the retrieval of input for a `Step`, one +item at a time. When the `ItemReader` has exhausted the items it can provide, it +indicates this by returning `null`. More details about the `ItemReader` interface and its +various implementations can be found in[Readers And Writers](readersAndWriters.html#readersAndWriters). + +### Item Writer + +`ItemWriter` is an abstraction that represents the output of a `Step`, one batch or chunk +of items at a time. Generally, an `ItemWriter` has no knowledge of the input it should +receive next and knows only the item that was passed in its current invocation. More +details about the `ItemWriter` interface and its various implementations can be found in[Readers And Writers](readersAndWriters.html#readersAndWriters). + +### Item Processor + +`ItemProcessor` is an abstraction that represents the business processing of an item. +While the `ItemReader` reads one item, and the `ItemWriter` writes them, the`ItemProcessor` provides an access point to transform or apply other business processing. +If, while processing the item, it is determined that the item is not valid, returning`null` indicates that the item should not be written out. More details about the`ItemProcessor` interface can be found in[Readers And Writers](readersAndWriters.html#readersAndWriters). + +### Batch Namespace + +Many of the domain concepts listed previously need to be configured in a Spring`ApplicationContext`. While there are implementations of the interfaces above that can be +used in a standard bean definition, a namespace has been provided for ease of +configuration, as shown in the following example: + +``` + + + + + + + + + + + +``` + +As long as the batch namespace has been declared, any of its elements can be used. More +information on configuring a Job can be found in [Configuring and +Running a Job](job.html#configureJob). More information on configuring a `Step` can be found in[Configuring a Step](step.html#configureStep). \ No newline at end of file diff --git a/docs/en/spring-batch/glossary.md b/docs/en/spring-batch/glossary.md new file mode 100644 index 0000000000000000000000000000000000000000..6f43da1bc4055539a564f5309dd9fc1d8749f638 --- /dev/null +++ b/docs/en/spring-batch/glossary.md @@ -0,0 +1,122 @@ +# Glossary + +## Appendix A: Glossary + +### Spring Batch Glossary + +Batch + +An accumulation of business transactions over time. + +Batch Application Style + +Term used to designate batch as an application style in its own right, similar to +online, Web, or SOA. It has standard elements of input, validation, transformation of +information to business model, business processing, and output. In addition, it +requires monitoring at a macro level. + +Batch Processing + +The handling of a batch of many business transactions that have accumulated over a +period of time (such as an hour, a day, a week, a month, or a year). It is the +application of a process or set of processes to many data entities or objects in a +repetitive and predictable fashion with either no manual element or a separate manual +element for error processing. + +Batch Window + +The time frame within which a batch job must complete. This can be constrained by other +systems coming online, other dependent jobs needing to execute, or other factors +specific to the batch environment. + +Step + +The main batch task or unit of work. It initializes the business logic and controls the +transaction environment, based on commit interval setting and other factors. + +Tasklet + +A component created by an application developer to process the business logic for a +Step. + +Batch Job Type + +Job types describe application of jobs for particular types of processing. Common areas +are interface processing (typically flat files), forms processing (either for online +PDF generation or print formats), and report processing. + +Driving Query + +A driving query identifies the set of work for a job to do. The job then breaks that +work into individual units of work. For instance, a driving query might be to identify +all financial transactions that have a status of "pending transmission" and send them +to a partner system. The driving query returns a set of record IDs to process. Each +record ID then becomes a unit of work. A driving query may involve a join (if the +criteria for selection falls across two or more tables) or it may work with a single +table. + +Item + +An item represents the smallest amount of complete data for processing. In the simplest +terms, this might be a line in a file, a row in a database table, or a particular +element in an XML file. + +Logical Unit of Work (LUW) + +A batch job iterates through a driving query (or other input source, such as a file) to +perform the set of work that the job must accomplish. Each iteration of work performed +is a unit of work. + +Commit Interval + +A set of LUWs processed within a single transaction. + +Partitioning + +Splitting a job into multiple threads where each thread is responsible for a subset of +the overall data to be processed. The threads of execution may be within the same JVM +or they may span JVMs in a clustered environment that supports workload balancing. + +Staging Table + +A table that holds temporary data while it is being processed. + +Restartable + +A job that can be executed again and assumes the same identity as when run initially. +In other words, it is has the same job instance ID. + +Rerunnable + +A job that is restartable and manages its own state in terms of the previous run’s +record processing. An example of a rerunnable step is one based on a driving query. If +the driving query can be formed so that it limits the processed rows when the job is +restarted, then it is re-runnable. This is managed by the application logic. Often, a +condition is added to the `where` statement to limit the rows returned by the driving +query with logic resembling "and processedFlag!= true". + +Repeat + +One of the most basic units of batch processing, it defines by repeatability calling a +portion of code until it is finished and while there is no error. Typically, a batch +process would be repeatable as long as there is input. + +Retry + +Simplifies the execution of operations with retry semantics most frequently associated +with handling transactional output exceptions. Retry is slightly different from repeat, +rather than continually calling a block of code, retry is stateful and continually +calls the same block of code with the same input, until it either succeeds or some type +of retry limit has been exceeded. It is only generally useful when a subsequent +invocation of the operation might succeed because something in the environment has +improved. + +Recover + +Recover operations handle an exception in such a way that a repeat process is able to +continue. + +Skip + +Skip is a recovery strategy often used on file input sources as the strategy for +ignoring bad input records that failed validation. \ No newline at end of file diff --git a/docs/en/spring-batch/job.md b/docs/en/spring-batch/job.md new file mode 100644 index 0000000000000000000000000000000000000000..ebc75c5b814d7c61153d09f216a370c387888796 --- /dev/null +++ b/docs/en/spring-batch/job.md @@ -0,0 +1,1357 @@ +# Configuring and Running a Job + +## Configuring and Running a Job + +XMLJavaBoth + +In the [domain section](domain.html#domainLanguageOfBatch) , the overall +architecture design was discussed, using the following diagram as a +guide: + +![Figure 2.1: Batch Stereotypes](https://docs.spring.io/spring-batch/docs/current/reference/html/images/spring-batch-reference-model.png) + +Figure 1. Batch Stereotypes + +While the `Job` object may seem like a simple +container for steps, there are many configuration options of which a +developer must be aware. Furthermore, there are many considerations for +how a `Job` will be run and how its meta-data will be +stored during that run. This chapter will explain the various configuration +options and runtime concerns of a `Job`. + +### Configuring a Job + +There are multiple implementations of the [`Job`](#configureJob) interface. However, +builders abstract away the difference in configuration. + +``` +@Bean +public Job footballJob() { + return this.jobBuilderFactory.get("footballJob") + .start(playerLoad()) + .next(gameLoad()) + .next(playerSummarization()) + .build(); +} +``` + +A `Job` (and typically any `Step` within it) requires a `JobRepository`. The +configuration of the `JobRepository` is handled via the [`BatchConfigurer`](#javaConfig). + +The above example illustrates a `Job` that consists of three `Step` instances. The job related +builders can also contain other elements that help with parallelisation (`Split`), +declarative flow control (`Decision`) and externalization of flow definitions (`Flow`). + +Whether you use Java or XML, there are multiple implementations of the [`Job`](#configureJob)interface. However, the namespace abstracts away the differences in configuration. It has +only three required dependencies: a name, `JobRepository` , and a list of `Step` instances. + +``` + + + + + +``` + +The examples here use a parent bean definition to create the steps. +See the section on [step configuration](step.html#configureStep)for more options declaring specific step details inline. The XML namespace +defaults to referencing a repository with an id of 'jobRepository', which +is a sensible default. However, this can be overridden explicitly: + +``` + + + + + +``` + +In addition to steps a job configuration can contain other elements that help with +parallelization (``), declarative flow control (``) and externalization +of flow definitions (``). + +#### Restartability + +One key issue when executing a batch job concerns the behavior of a `Job` when it is +restarted. The launching of a `Job` is considered to be a 'restart' if a `JobExecution`already exists for the particular `JobInstance`. Ideally, all jobs should be able to start +up where they left off, but there are scenarios where this is not possible. *It is +entirely up to the developer to ensure that a new `JobInstance` is created in this +scenario.* However, Spring Batch does provide some help. If a `Job` should never be +restarted, but should always be run as part of a new `JobInstance`, then the +restartable property may be set to 'false'. + +The following example shows how to set the `restartable` field to `false` in XML: + +XML Configuration + +``` + + ... + +``` + +The following example shows how to set the `restartable` field to `false` in Java: + +Java Configuration + +``` +@Bean +public Job footballJob() { + return this.jobBuilderFactory.get("footballJob") + .preventRestart() + ... + .build(); +} +``` + +To phrase it another way, setting restartable to false means “this`Job` does not support being started again”. Restarting a `Job` that is not +restartable causes a `JobRestartException` to +be thrown. + +``` +Job job = new SimpleJob(); +job.setRestartable(false); + +JobParameters jobParameters = new JobParameters(); + +JobExecution firstExecution = jobRepository.createJobExecution(job, jobParameters); +jobRepository.saveOrUpdate(firstExecution); + +try { + jobRepository.createJobExecution(job, jobParameters); + fail(); +} +catch (JobRestartException e) { + // expected +} +``` + +This snippet of JUnit code shows how attempting to create a`JobExecution` the first time for a non restartable +job will cause no issues. However, the second +attempt will throw a `JobRestartException`. + +#### Intercepting Job Execution + +During the course of the execution of a +Job, it may be useful to be notified of various +events in its lifecycle so that custom code may be executed. The`SimpleJob` allows for this by calling a`JobListener` at the appropriate time: + +``` +public interface JobExecutionListener { + + void beforeJob(JobExecution jobExecution); + + void afterJob(JobExecution jobExecution); + +} +``` + +`JobListeners` can be added to a `SimpleJob` by setting listeners on the job. + +The following example shows how to add a listener element to an XML job definition: + +XML Configuration + +``` + + + + + + + + +``` + +The following example shows how to add a listener method to a Java job definition: + +Java Configuration + +``` +@Bean +public Job footballJob() { + return this.jobBuilderFactory.get("footballJob") + .listener(sampleListener()) + ... + .build(); +} +``` + +It should be noted that the `afterJob` method is called regardless of the success or +failure of the `Job`. If success or failure needs to be determined, it can be obtained +from the `JobExecution`, as follows: + +``` +public void afterJob(JobExecution jobExecution){ + if (jobExecution.getStatus() == BatchStatus.COMPLETED ) { + //job success + } + else if (jobExecution.getStatus() == BatchStatus.FAILED) { + //job failure + } +} +``` + +The annotations corresponding to this interface are: + +* `@BeforeJob` + +* `@AfterJob` + +#### Inheriting from a Parent Job + +If a group of Jobs share similar, but not +identical, configurations, then it may be helpful to define a "parent"`Job` from which the concrete +Jobs may inherit properties. Similar to class +inheritance in Java, the "child" `Job` will combine +its elements and attributes with the parent’s. + +In the following example, "baseJob" is an abstract`Job` definition that defines only a list of +listeners. The `Job` "job1" is a concrete +definition that inherits the list of listeners from "baseJob" and merges +it with its own list of listeners to produce a`Job` with two listeners and one`Step`, "step1". + +``` + + + + + + + + + + + + + +``` + +Please see the section on [Inheriting from a Parent Step](step.html#inheritingFromParentStep)for more detailed information. + +#### JobParametersValidator + +A job declared in the XML namespace or using any subclass of`AbstractJob` can optionally declare a validator for the job parameters at +runtime. This is useful when for instance you need to assert that a job +is started with all its mandatory parameters. There is a`DefaultJobParametersValidator` that can be used to constrain combinations +of simple mandatory and optional parameters, and for more complex +constraints you can implement the interface yourself. + +The configuration of a validator is supported through the XML namespace through a child +element of the job, as shown in the following example: + +``` + + + + +``` + +The validator can be specified as a reference (as shown earlier) or as a nested bean +definition in the beans namespace. + +The configuration of a validator is supported through the java builders, as shown in the +following example: + +``` +@Bean +public Job job1() { + return this.jobBuilderFactory.get("job1") + .validator(parametersValidator()) + ... + .build(); +} +``` + +### Java Config + +Spring 3 brought the ability to configure applications via java instead of XML. As of +Spring Batch 2.2.0, batch jobs can be configured using the same java config. +There are two components for the java based configuration: the `@EnableBatchProcessing`annotation and two builders. + +The `@EnableBatchProcessing` works similarly to the other @Enable\* annotations in the +Spring family. In this case, `@EnableBatchProcessing` provides a base configuration for +building batch jobs. Within this base configuration, an instance of `StepScope` is +created in addition to a number of beans made available to be autowired: + +* `JobRepository`: bean name "jobRepository" + +* `JobLauncher`: bean name "jobLauncher" + +* `JobRegistry`: bean name "jobRegistry" + +* `PlatformTransactionManager`: bean name "transactionManager" + +* `JobBuilderFactory`: bean name "jobBuilders" + +* `StepBuilderFactory`: bean name "stepBuilders" + +The core interface for this configuration is the `BatchConfigurer`. The default +implementation provides the beans mentioned above and requires a `DataSource` as a bean +within the context to be provided. This data source is used by the JobRepository. +You can customize any of these beans +by creating a custom implementation of the `BatchConfigurer` interface. +Typically, extending the `DefaultBatchConfigurer` (which is provided if a`BatchConfigurer` is not found) and overriding the required getter is sufficient. +However, implementing your own from scratch may be required. The following +example shows how to provide a custom transaction manager: + +``` +@Bean +public BatchConfigurer batchConfigurer(DataSource dataSource) { + return new DefaultBatchConfigurer(dataSource) { + @Override + public PlatformTransactionManager getTransactionManager() { + return new MyTransactionManager(); + } + }; +} +``` + +| |Only one configuration class needs to have the `@EnableBatchProcessing` annotation. Once
you have a class annotated with it, you will have all of the above available.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +With the base configuration in place, a user can use the provided builder factories to +configure a job. The following example shows a two step job configured with the`JobBuilderFactory` and the `StepBuilderFactory`: + +``` +@Configuration +@EnableBatchProcessing +@Import(DataSourceConfiguration.class) +public class AppConfig { + + @Autowired + private JobBuilderFactory jobs; + + @Autowired + private StepBuilderFactory steps; + + @Bean + public Job job(@Qualifier("step1") Step step1, @Qualifier("step2") Step step2) { + return jobs.get("myJob").start(step1).next(step2).build(); + } + + @Bean + protected Step step1(ItemReader reader, + ItemProcessor processor, + ItemWriter writer) { + return steps.get("step1") + . chunk(10) + .reader(reader) + .processor(processor) + .writer(writer) + .build(); + } + + @Bean + protected Step step2(Tasklet tasklet) { + return steps.get("step2") + .tasklet(tasklet) + .build(); + } +} +``` + +### Configuring a JobRepository + +When using `@EnableBatchProcessing`, a `JobRepository` is provided out of the box for you. +This section addresses configuring your own. + +As described in earlier, the [`JobRepository`](#configureJob) is used for basic CRUD operations of the various persisted +domain objects within Spring Batch, such as`JobExecution` and`StepExecution`. It is required by many of the major +framework features, such as the `JobLauncher`,`Job`, and `Step`. + +The batch namespace abstracts away many of the implementation details of the`JobRepository` implementations and their collaborators. However, there are still a few +configuration options available, as shown in the following example: + +XML Configuration + +``` + +``` + +None of the configuration options listed above are required except the `id`. If they are +not set, the defaults shown above will be used. They are shown above for awareness +purposes. The `max-varchar-length` defaults to 2500, which is the length of the long`VARCHAR` columns in the [sample schema +scripts](schema-appendix.html#metaDataSchemaOverview). + +When using java configuration, a `JobRepository` is provided for you. A JDBC based one is +provided out of the box if a `DataSource` is provided, the `Map` based one if not. However, +you can customize the configuration of the `JobRepository` through an implementation of the`BatchConfigurer` interface. + +Java Configuration + +``` +... +// This would reside in your BatchConfigurer implementation +@Override +protected JobRepository createJobRepository() throws Exception { + JobRepositoryFactoryBean factory = new JobRepositoryFactoryBean(); + factory.setDataSource(dataSource); + factory.setTransactionManager(transactionManager); + factory.setIsolationLevelForCreate("ISOLATION_SERIALIZABLE"); + factory.setTablePrefix("BATCH_"); + factory.setMaxVarCharLength(1000); + return factory.getObject(); +} +... +``` + +None of the configuration options listed above are required except +the dataSource and transactionManager. If they are not set, the defaults shown above +will be used. They are shown above for awareness purposes. The +max varchar length defaults to 2500, which is the +length of the long `VARCHAR` columns in the[sample schema scripts](schema-appendix.html#metaDataSchemaOverview) + +#### Transaction Configuration for the JobRepository + +If the namespace or the provided `FactoryBean` is used, transactional advice is +automatically created around the repository. This is to ensure that the batch meta-data, +including state that is necessary for restarts after a failure, is persisted correctly. +The behavior of the framework is not well defined if the repository methods are not +transactional. The isolation level in the `create*` method attributes is specified +separately to ensure that, when jobs are launched, if two processes try to launch +the same job at the same time, only one succeeds. The default isolation level for that +method is `SERIALIZABLE`, which is quite aggressive. `READ_COMMITTED` would work just as +well. `READ_UNCOMMITTED` would be fine if two processes are not likely to collide in this +way. However, since a call to the `create*` method is quite short, it is unlikely that`SERIALIZED` causes problems, as long as the database platform supports it. However, this +can be overridden. + +The following example shows how to override the isolation level in XML: + +XML Configuration + +``` + +``` + +The following example shows how to override the isolation level in Java: + +Java Configuration + +``` +// This would reside in your BatchConfigurer implementation +@Override +protected JobRepository createJobRepository() throws Exception { + JobRepositoryFactoryBean factory = new JobRepositoryFactoryBean(); + factory.setDataSource(dataSource); + factory.setTransactionManager(transactionManager); + factory.setIsolationLevelForCreate("ISOLATION_REPEATABLE_READ"); + return factory.getObject(); +} +``` + +If the namespace or factory beans are not used, then it is also essential to configure the +transactional behavior of the repository using AOP. + +The following example shows how to configure the transactional behavior of the repository +in XML: + +XML Configuration + +``` + + + + + + + + + + +``` + +The preceding fragment can be used nearly as is, with almost no changes. Remember also to +include the appropriate namespace declarations and to make sure spring-tx and spring-aop +(or the whole of Spring) are on the classpath. + +The following example shows how to configure the transactional behavior of the repository +in Java: + +Java Configuration + +``` +@Bean +public TransactionProxyFactoryBean baseProxy() { + TransactionProxyFactoryBean transactionProxyFactoryBean = new TransactionProxyFactoryBean(); + Properties transactionAttributes = new Properties(); + transactionAttributes.setProperty("*", "PROPAGATION_REQUIRED"); + transactionProxyFactoryBean.setTransactionAttributes(transactionAttributes); + transactionProxyFactoryBean.setTarget(jobRepository()); + transactionProxyFactoryBean.setTransactionManager(transactionManager()); + return transactionProxyFactoryBean; +} +``` + +#### Changing the Table Prefix + +Another modifiable property of the `JobRepository` is the table prefix of the meta-data +tables. By default they are all prefaced with `BATCH_`. `BATCH_JOB_EXECUTION` and`BATCH_STEP_EXECUTION` are two examples. However, there are potential reasons to modify this +prefix. If the schema names needs to be prepended to the table names, or if more than one +set of meta data tables is needed within the same schema, then the table prefix needs to +be changed: + +The following example shows how to change the table prefix in XML: + +XML Configuration + +``` + +``` + +The following example shows how to change the table prefix in Java: + +Java Configuration + +``` +// This would reside in your BatchConfigurer implementation +@Override +protected JobRepository createJobRepository() throws Exception { + JobRepositoryFactoryBean factory = new JobRepositoryFactoryBean(); + factory.setDataSource(dataSource); + factory.setTransactionManager(transactionManager); + factory.setTablePrefix("SYSTEM.TEST_"); + return factory.getObject(); +} +``` + +Given the preceding changes, every query to the meta-data tables is prefixed with`SYSTEM.TEST_`. `BATCH_JOB_EXECUTION` is referred to as SYSTEM.`TEST_JOB_EXECUTION`. + +| |Only the table prefix is configurable. The table and column names are not.| +|---|--------------------------------------------------------------------------| + +#### In-Memory Repository + +There are scenarios in which you may not want to persist your domain objects to the +database. One reason may be speed; storing domain objects at each commit point takes extra +time. Another reason may be that you just don’t need to persist status for a particular +job. For this reason, Spring batch provides an in-memory `Map` version of the job +repository. + +The following example shows the inclusion of `MapJobRepositoryFactoryBean` in XML: + +XML Configuration + +``` + + + +``` + +The following example shows the inclusion of `MapJobRepositoryFactoryBean` in Java: + +Java Configuration + +``` +// This would reside in your BatchConfigurer implementation +@Override +protected JobRepository createJobRepository() throws Exception { + MapJobRepositoryFactoryBean factory = new MapJobRepositoryFactoryBean(); + factory.setTransactionManager(transactionManager); + return factory.getObject(); +} +``` + +Note that the in-memory repository is volatile and so does not allow restart between JVM +instances. It also cannot guarantee that two job instances with the same parameters are +launched simultaneously, and is not suitable for use in a multi-threaded Job, or a locally +partitioned `Step`. So use the database version of the repository wherever you need those +features. + +However it does require a transaction manager to be defined because there are rollback +semantics within the repository, and because the business logic might still be +transactional (such as RDBMS access). For testing purposes many people find the`ResourcelessTransactionManager` useful. + +| |The `MapJobRepositoryFactoryBean` and related classes have been deprecated in v4 and are scheduled
for removal in v5. If you want to use an in-memory job repository, you can use an embedded database
like H2, Apache Derby or HSQLDB. There are several ways to create an embedded database and use it in
your Spring Batch application. One way to do that is by using the APIs from [Spring JDBC](https://docs.spring.io/spring-framework/docs/current/reference/html/data-access.html#jdbc-embedded-database-support):

```
@Bean
public DataSource dataSource() {
return new EmbeddedDatabaseBuilder()
.setType(EmbeddedDatabaseType.H2)
.addScript("/org/springframework/batch/core/schema-drop-h2.sql")
.addScript("/org/springframework/batch/core/schema-h2.sql")
.build();
}
```

Once you have defined your embedded datasource as a bean in your application context, it should be picked
up automatically if you use `@EnableBatchProcessing`. Otherwise you can configure it manually using the
JDBC based `JobRepositoryFactoryBean` as shown in the [Configuring a JobRepository section](#configuringJobRepository).| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### Non-standard Database Types in a Repository + +If you are using a database platform that is not in the list of supported platforms, you +may be able to use one of the supported types, if the SQL variant is close enough. To do +this, you can use the raw `JobRepositoryFactoryBean` instead of the namespace shortcut and +use it to set the database type to the closest match. + +The following example shows how to use `JobRepositoryFactoryBean` to set the database type +to the closest match in XML: + +XML Configuration + +``` + + + + +``` + +The following example shows how to use `JobRepositoryFactoryBean` to set the database type +to the closest match in Java: + +Java Configuration + +``` +// This would reside in your BatchConfigurer implementation +@Override +protected JobRepository createJobRepository() throws Exception { + JobRepositoryFactoryBean factory = new JobRepositoryFactoryBean(); + factory.setDataSource(dataSource); + factory.setDatabaseType("db2"); + factory.setTransactionManager(transactionManager); + return factory.getObject(); +} +``` + +(The `JobRepositoryFactoryBean` tries to +auto-detect the database type from the `DataSource`if it is not specified.) The major differences between platforms are +mainly accounted for by the strategy for incrementing primary keys, so +often it might be necessary to override the`incrementerFactory` as well (using one of the standard +implementations from the Spring Framework). + +If even that doesn’t work, or you are not using an RDBMS, then the +only option may be to implement the various `Dao`interfaces that the `SimpleJobRepository` depends +on and wire one up manually in the normal Spring way. + +### Configuring a JobLauncher + +When using `@EnableBatchProcessing`, a `JobRegistry` is provided out of the box for you. +This section addresses configuring your own. + +The most basic implementation of the `JobLauncher` interface is the `SimpleJobLauncher`. +Its only required dependency is a `JobRepository`, in order to obtain an execution. + +The following example shows a `SimpleJobLauncher` in XML: + +XML Configuration + +``` + + + +``` + +The following example shows a `SimpleJobLauncher` in Java: + +Java Configuration + +``` +... +// This would reside in your BatchConfigurer implementation +@Override +protected JobLauncher createJobLauncher() throws Exception { + SimpleJobLauncher jobLauncher = new SimpleJobLauncher(); + jobLauncher.setJobRepository(jobRepository); + jobLauncher.afterPropertiesSet(); + return jobLauncher; +} +... +``` + +Once a [JobExecution](domain.html#domainLanguageOfBatch) is obtained, it is passed to the +execute method of `Job`, ultimately returning the `JobExecution` to the caller, as shown +in the following image: + +![Job Launcher Sequence](https://docs.spring.io/spring-batch/docs/current/reference/html/images/job-launcher-sequence-sync.png) + +Figure 2. Job Launcher Sequence + +The sequence is straightforward and works well when launched from a scheduler. However, +issues arise when trying to launch from an HTTP request. In this scenario, the launching +needs to be done asynchronously so that the `SimpleJobLauncher` returns immediately to its +caller. This is because it is not good practice to keep an HTTP request open for the +amount of time needed by long running processes such as batch. The following image shows +an example sequence: + +![Async Job Launcher Sequence](https://docs.spring.io/spring-batch/docs/current/reference/html/images/job-launcher-sequence-async.png) + +Figure 3. Asynchronous Job Launcher Sequence + +The `SimpleJobLauncher` can be configured to allow for this scenario by configuring a`TaskExecutor`. + +The following XML example shows a `SimpleJobLauncher` configured to return immediately: + +XML Configuration + +``` + + + + + + +``` + +The following Java example shows a `SimpleJobLauncher` configured to return immediately: + +Java Configuration + +``` +@Bean +public JobLauncher jobLauncher() { + SimpleJobLauncher jobLauncher = new SimpleJobLauncher(); + jobLauncher.setJobRepository(jobRepository()); + jobLauncher.setTaskExecutor(new SimpleAsyncTaskExecutor()); + jobLauncher.afterPropertiesSet(); + return jobLauncher; +} +``` + +Any implementation of the spring `TaskExecutor`interface can be used to control how jobs are asynchronously +executed. + +### Running a Job + +At a minimum, launching a batch job requires two things: the`Job` to be launched and a`JobLauncher`. Both can be contained within the same +context or different contexts. For example, if launching a job from the +command line, a new JVM will be instantiated for each Job, and thus every +job will have its own `JobLauncher`. However, if +running from within a web container within the scope of an`HttpRequest`, there will usually be one`JobLauncher`, configured for asynchronous job +launching, that multiple requests will invoke to launch their jobs. + +#### Running Jobs from the Command Line + +For users that want to run their jobs from an enterprise +scheduler, the command line is the primary interface. This is because +most schedulers (with the exception of Quartz unless using the +NativeJob) work directly with operating system +processes, primarily kicked off with shell scripts. There are many ways +to launch a Java process besides a shell script, such as Perl, Ruby, or +even 'build tools' such as ant or maven. However, because most people +are familiar with shell scripts, this example will focus on them. + +##### The CommandLineJobRunner + +Because the script launching the job must kick off a Java +Virtual Machine, there needs to be a class with a main method to act +as the primary entry point. Spring Batch provides an implementation +that serves just this purpose:`CommandLineJobRunner`. It’s important to note +that this is just one way to bootstrap your application, but there are +many ways to launch a Java process, and this class should in no way be +viewed as definitive. The `CommandLineJobRunner`performs four tasks: + +* Load the appropriate`ApplicationContext` + +* Parse command line arguments into`JobParameters` + +* Locate the appropriate job based on arguments + +* Use the `JobLauncher` provided in the + application context to launch the job. + +All of these tasks are accomplished using only the arguments +passed in. The following are required arguments: + +|jobPath|The location of the XML file that will be used to
create an `ApplicationContext`. This file
should contain everything needed to run the complete
Job| +|-------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +|jobName| The name of the job to be run. | + +These arguments must be passed in with the path first and the name second. All arguments +after these are considered to be job parameters, are turned into a JobParameters object, +and must be in the format of 'name=value'. + +The following example shows a date passed as a job parameter to a job defied in XML: + +``` +key/value pairs to identifying job parameters. However, it is possible to explicitly specify
which job parameters are identifying and which are not by prefixing them with `+` or `-` respectively.

In the following example, `schedule.date` is an identifying job parameter while `vendor.id` is not:

```
+schedule.date(date)=2007/05/05 -vendor.id=123
```

```
+schedule.date(date)=2007/05/05 -vendor.id=123
```

This behaviour can be overridden by using a custom `JobParametersConverter`.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +In most cases, you would want to use a manifest to declare your main class in a jar, but, +for simplicity, the class was used directly. This example is using the same 'EndOfDay' +example from the [domainLanguageOfBatch](domain.html#domainLanguageOfBatch). The first +argument is 'endOfDayJob.xml', which is the Spring ApplicationContext containing the`Job`. The second argument, 'endOfDay' represents the job name. The final argument, +'schedule.date(date)=2007/05/05', is converted into a JobParameters object. + +The following example shows a sample configuration for `endOfDay` in XML: + +``` + + + + + + +``` + +In most cases you would want to use a manifest to declare your main class in a jar, but, +for simplicity, the class was used directly. This example is using the same 'EndOfDay' +example from the [domainLanguageOfBatch](domain.html#domainLanguageOfBatch). The first +argument is 'io.spring.EndOfDayJobConfiguration', which is the fully qualified class name +to the configuration class containing the Job. The second argument, 'endOfDay' represents +the job name. The final argument, 'schedule.date(date)=2007/05/05' is converted into a`JobParameters` object. An example of the java configuration follows: + +The following example shows a sample configuration for `endOfDay` in Java: + +``` +@Configuration +@EnableBatchProcessing +public class EndOfDayJobConfiguration { + + @Autowired + private JobBuilderFactory jobBuilderFactory; + + @Autowired + private StepBuilderFactory stepBuilderFactory; + + @Bean + public Job endOfDay() { + return this.jobBuilderFactory.get("endOfDay") + .start(step1()) + .build(); + } + + @Bean + public Step step1() { + return this.stepBuilderFactory.get("step1") + .tasklet((contribution, chunkContext) -> null) + .build(); + } +} +``` + +The preceding example is overly simplistic, since there are many more requirements to a +run a batch job in Spring Batch in general, but it serves to show the two main +requirements of the `CommandLineJobRunner`: `Job` and `JobLauncher`. + +##### ExitCodes + +When launching a batch job from the command-line, an enterprise +scheduler is often used. Most schedulers are fairly dumb and work only +at the process level. This means that they only know about some +operating system process such as a shell script that they’re invoking. +In this scenario, the only way to communicate back to the scheduler +about the success or failure of a job is through return codes. A +return code is a number that is returned to a scheduler by the process +that indicates the result of the run. In the simplest case: 0 is +success and 1 is failure. However, there may be more complex +scenarios: If job A returns 4 kick off job B, and if it returns 5 kick +off job C. This type of behavior is configured at the scheduler level, +but it is important that a processing framework such as Spring Batch +provide a way to return a numeric representation of the 'Exit Code' +for a particular batch job. In Spring Batch this is encapsulated +within an `ExitStatus`, which is covered in more +detail in Chapter 5. For the purposes of discussing exit codes, the +only important thing to know is that an`ExitStatus` has an exit code property that is +set by the framework (or the developer) and is returned as part of the`JobExecution` returned from the`JobLauncher`. The`CommandLineJobRunner` converts this string value +to a number using the `ExitCodeMapper`interface: + +``` +public interface ExitCodeMapper { + + public int intValue(String exitCode); + +} +``` + +The essential contract of an`ExitCodeMapper` is that, given a string exit +code, a number representation will be returned. The default implementation used by the job runner is the `SimpleJvmExitCodeMapper`that returns 0 for completion, 1 for generic errors, and 2 for any job +runner errors such as not being able to find a`Job` in the provided context. If anything more +complex than the 3 values above is needed, then a custom +implementation of the `ExitCodeMapper` interface +must be supplied. Because the`CommandLineJobRunner` is the class that creates +an `ApplicationContext`, and thus cannot be +'wired together', any values that need to be overwritten must be +autowired. This means that if an implementation of`ExitCodeMapper` is found within the `BeanFactory`, +it will be injected into the runner after the context is created. All +that needs to be done to provide your own`ExitCodeMapper` is to declare the implementation +as a root level bean and ensure that it is part of the`ApplicationContext` that is loaded by the +runner. + +#### Running Jobs from within a Web Container + +Historically, offline processing such as batch jobs have been +launched from the command-line, as described above. However, there are +many cases where launching from an `HttpRequest` is +a better option. Many such use cases include reporting, ad-hoc job +running, and web application support. Because a batch job by definition +is long running, the most important concern is ensuring to launch the +job asynchronously: + +![Async Job Launcher Sequence from web container](https://docs.spring.io/spring-batch/docs/current/reference/html/images/launch-from-request.png) + +Figure 4. Asynchronous Job Launcher Sequence From Web Container + +The controller in this case is a Spring MVC controller. More +information on Spring MVC can be found here: . +The controller launches a `Job` using a`JobLauncher` that has been configured to launch[asynchronously](#runningJobsFromWebContainer), which +immediately returns a `JobExecution`. The`Job` will likely still be running, however, this +nonblocking behaviour allows the controller to return immediately, which +is required when handling an `HttpRequest`. An +example is below: + +``` +@Controller +public class JobLauncherController { + + @Autowired + JobLauncher jobLauncher; + + @Autowired + Job job; + + @RequestMapping("/jobLauncher.html") + public void handle() throws Exception{ + jobLauncher.run(job, new JobParameters()); + } +} +``` + +### Advanced Meta-Data Usage + +So far, both the `JobLauncher` and `JobRepository` interfaces have been +discussed. Together, they represent simple launching of a job, and basic +CRUD operations of batch domain objects: + +![Job Repository](https://docs.spring.io/spring-batch/docs/current/reference/html/images/job-repository.png) + +Figure 5. Job Repository + +A `JobLauncher` uses the`JobRepository` to create new`JobExecution` objects and run them.`Job` and `Step` implementations +later use the same `JobRepository` for basic updates +of the same executions during the running of a Job. +The basic operations suffice for simple scenarios, but in a large batch +environment with hundreds of batch jobs and complex scheduling +requirements, more advanced access of the meta data is required: + +![Job Repository Advanced](https://docs.spring.io/spring-batch/docs/current/reference/html/images/job-repository-advanced.png) + +Figure 6. Advanced Job Repository Access + +The `JobExplorer` and`JobOperator` interfaces, which will be discussed +below, add additional functionality for querying and controlling the meta +data. + +#### Querying the Repository + +The most basic need before any advanced features is the ability to +query the repository for existing executions. This functionality is +provided by the `JobExplorer` interface: + +``` +public interface JobExplorer { + + List getJobInstances(String jobName, int start, int count); + + JobExecution getJobExecution(Long executionId); + + StepExecution getStepExecution(Long jobExecutionId, Long stepExecutionId); + + JobInstance getJobInstance(Long instanceId); + + List getJobExecutions(JobInstance jobInstance); + + Set findRunningJobExecutions(String jobName); +} +``` + +As is evident from the method signatures above, `JobExplorer` is a read-only version of +the `JobRepository`, and, like the `JobRepository`, it can be easily configured by using a +factory bean: + +The following example shows how to configure a `JobExplorer` in XML: + +XML Configuration + +``` + +``` + +The following example shows how to configure a `JobExplorer` in Java: + +Java Configuration + +``` +... +// This would reside in your BatchConfigurer implementation +@Override +public JobExplorer getJobExplorer() throws Exception { + JobExplorerFactoryBean factoryBean = new JobExplorerFactoryBean(); + factoryBean.setDataSource(this.dataSource); + return factoryBean.getObject(); +} +... +``` + +[Earlier in this chapter](#repositoryTablePrefix), we noted that the table prefix +of the `JobRepository` can be modified to allow for different versions or schemas. Because +the `JobExplorer` works with the same tables, it too needs the ability to set a prefix. + +The following example shows how to set the table prefix for a `JobExplorer` in XML: + +XML Configuration + +``` + +``` + +The following example shows how to set the table prefix for a `JobExplorer` in Java: + +Java Configuration + +``` +... +// This would reside in your BatchConfigurer implementation +@Override +public JobExplorer getJobExplorer() throws Exception { + JobExplorerFactoryBean factoryBean = new JobExplorerFactoryBean(); + factoryBean.setDataSource(this.dataSource); + factoryBean.setTablePrefix("SYSTEM."); + return factoryBean.getObject(); +} +... +``` + +#### JobRegistry + +A `JobRegistry` (and its parent interface `JobLocator`) is not mandatory, but it can be +useful if you want to keep track of which jobs are available in the context. It is also +useful for collecting jobs centrally in an application context when they have been created +elsewhere (for example, in child contexts). Custom `JobRegistry` implementations can also +be used to manipulate the names and other properties of the jobs that are registered. +There is only one implementation provided by the framework and this is based on a simple +map from job name to job instance. + +The following example shows how to include a `JobRegistry` for a job defined in XML: + +``` + +``` + +The following example shows how to include a `JobRegistry` for a job defined in Java: + +When using `@EnableBatchProcessing`, a `JobRegistry` is provided out of the box for you. +If you want to configure your own: + +``` +... +// This is already provided via the @EnableBatchProcessing but can be customized via +// overriding the getter in the SimpleBatchConfiguration +@Override +@Bean +public JobRegistry jobRegistry() throws Exception { + return new MapJobRegistry(); +} +... +``` + +There are two ways to populate a `JobRegistry` automatically: using +a bean post processor and using a registrar lifecycle component. These +two mechanisms are described in the following sections. + +##### JobRegistryBeanPostProcessor + +This is a bean post-processor that can register all jobs as they are created. + +The following example shows how to include the `JobRegistryBeanPostProcessor` for a job +defined in XML: + +XML Configuration + +``` + + + +``` + +The following example shows how to include the `JobRegistryBeanPostProcessor` for a job +defined in Java: + +Java Configuration + +``` +@Bean +public JobRegistryBeanPostProcessor jobRegistryBeanPostProcessor() { + JobRegistryBeanPostProcessor postProcessor = new JobRegistryBeanPostProcessor(); + postProcessor.setJobRegistry(jobRegistry()); + return postProcessor; +} +``` + +Although it is not strictly necessary, the post-processor in the +example has been given an id so that it can be included in child +contexts (e.g. as a parent bean definition) and cause all jobs created +there to also be registered automatically. + +##### `AutomaticJobRegistrar` + +This is a lifecycle component that creates child contexts and registers jobs from those +contexts as they are created. One advantage of doing this is that, while the job names in +the child contexts still have to be globally unique in the registry, their dependencies +can have "natural" names. So for example, you can create a set of XML configuration files +each having only one Job, but all having different definitions of an `ItemReader` with the +same bean name, such as "reader". If all those files were imported into the same context, +the reader definitions would clash and override one another, but with the automatic +registrar this is avoided. This makes it easier to integrate jobs contributed from +separate modules of an application. + +The following example shows how to include the `AutomaticJobRegistrar` for a job defined +in XML: + +XML Configuration + +``` + + + + + + + + + + + + +``` + +The following example shows how to include the `AutomaticJobRegistrar` for a job defined +in Java: + +Java Configuration + +``` +@Bean +public AutomaticJobRegistrar registrar() { + + AutomaticJobRegistrar registrar = new AutomaticJobRegistrar(); + registrar.setJobLoader(jobLoader()); + registrar.setApplicationContextFactories(applicationContextFactories()); + registrar.afterPropertiesSet(); + return registrar; + +} +``` + +The registrar has two mandatory properties, one is an array of`ApplicationContextFactory` (here created from a +convenient factory bean), and the other is a`JobLoader`. The `JobLoader`is responsible for managing the lifecycle of the child contexts and +registering jobs in the `JobRegistry`. + +The `ApplicationContextFactory` is +responsible for creating the child context and the most common usage +would be as above using a`ClassPathXmlApplicationContextFactory`. One of +the features of this factory is that by default it copies some of the +configuration down from the parent context to the child. So for +instance you don’t have to re-define the`PropertyPlaceholderConfigurer` or AOP +configuration in the child, if it should be the same as the +parent. + +The `AutomaticJobRegistrar` can be used in +conjunction with a `JobRegistryBeanPostProcessor`if desired (as long as the `DefaultJobLoader` is +used as well). For instance this might be desirable if there are jobs +defined in the main parent context as well as in the child +locations. + +#### JobOperator + +As previously discussed, the `JobRepository`provides CRUD operations on the meta-data, and the`JobExplorer` provides read-only operations on the +meta-data. However, those operations are most useful when used together +to perform common monitoring tasks such as stopping, restarting, or +summarizing a Job, as is commonly done by batch operators. Spring Batch +provides these types of operations via the`JobOperator` interface: + +``` +public interface JobOperator { + + List getExecutions(long instanceId) throws NoSuchJobInstanceException; + + List getJobInstances(String jobName, int start, int count) + throws NoSuchJobException; + + Set getRunningExecutions(String jobName) throws NoSuchJobException; + + String getParameters(long executionId) throws NoSuchJobExecutionException; + + Long start(String jobName, String parameters) + throws NoSuchJobException, JobInstanceAlreadyExistsException; + + Long restart(long executionId) + throws JobInstanceAlreadyCompleteException, NoSuchJobExecutionException, + NoSuchJobException, JobRestartException; + + Long startNextInstance(String jobName) + throws NoSuchJobException, JobParametersNotFoundException, JobRestartException, + JobExecutionAlreadyRunningException, JobInstanceAlreadyCompleteException; + + boolean stop(long executionId) + throws NoSuchJobExecutionException, JobExecutionNotRunningException; + + String getSummary(long executionId) throws NoSuchJobExecutionException; + + Map getStepExecutionSummaries(long executionId) + throws NoSuchJobExecutionException; + + Set getJobNames(); + +} +``` + +The above operations represent methods from many different interfaces, such as`JobLauncher`, `JobRepository`, `JobExplorer`, and `JobRegistry`. For this reason, the +provided implementation of `JobOperator`, `SimpleJobOperator`, has many dependencies. + +The following example shows a typical bean definition for `SimpleJobOperator` in XML: + +``` + + + + + + + + + + +``` + +The following example shows a typical bean definition for `SimpleJobOperator` in Java: + +``` + /** + * All injected dependencies for this bean are provided by the @EnableBatchProcessing + * infrastructure out of the box. + */ + @Bean + public SimpleJobOperator jobOperator(JobExplorer jobExplorer, + JobRepository jobRepository, + JobRegistry jobRegistry) { + + SimpleJobOperator jobOperator = new SimpleJobOperator(); + + jobOperator.setJobExplorer(jobExplorer); + jobOperator.setJobRepository(jobRepository); + jobOperator.setJobRegistry(jobRegistry); + jobOperator.setJobLauncher(jobLauncher); + + return jobOperator; + } +``` + +| |If you set the table prefix on the job repository, don’t forget to set it on the job explorer as well.| +|---|------------------------------------------------------------------------------------------------------| + +#### JobParametersIncrementer + +Most of the methods on `JobOperator` are +self-explanatory, and more detailed explanations can be found on the[javadoc of the interface](https://docs.spring.io/spring-batch/docs/current/api/org/springframework/batch/core/launch/JobOperator.html). However, the`startNextInstance` method is worth noting. This +method will always start a new instance of a Job. +This can be extremely useful if there are serious issues in a`JobExecution` and the Job +needs to be started over again from the beginning. Unlike`JobLauncher` though, which requires a new`JobParameters` object that will trigger a new`JobInstance` if the parameters are different from +any previous set of parameters, the`startNextInstance` method will use the`JobParametersIncrementer` tied to the`Job` to force the `Job` to a +new instance: + +``` +public interface JobParametersIncrementer { + + JobParameters getNext(JobParameters parameters); + +} +``` + +The contract of `JobParametersIncrementer` is +that, given a [JobParameters](#jobParameters)object, it will return the 'next' JobParameters +object by incrementing any necessary values it may contain. This +strategy is useful because the framework has no way of knowing what +changes to the `JobParameters` make it the 'next' +instance. For example, if the only value in`JobParameters` is a date, and the next instance +should be created, should that value be incremented by one day? Or one +week (if the job is weekly for instance)? The same can be said for any +numerical values that help to identify the Job, +as shown below: + +``` +public class SampleIncrementer implements JobParametersIncrementer { + + public JobParameters getNext(JobParameters parameters) { + if (parameters==null || parameters.isEmpty()) { + return new JobParametersBuilder().addLong("run.id", 1L).toJobParameters(); + } + long id = parameters.getLong("run.id",1L) + 1; + return new JobParametersBuilder().addLong("run.id", id).toJobParameters(); + } +} +``` + +In this example, the value with a key of 'run.id' is used to +discriminate between `JobInstances`. If the`JobParameters` passed in is null, it can be +assumed that the `Job` has never been run before +and thus its initial state can be returned. However, if not, the old +value is obtained, incremented by one, and returned. + +For jobs defined in XML, an incrementer can be associated with `Job` through the +'incrementer' attribute in the namespace, as follows: + +``` + + ... + +``` + +For jobs defined in Java, an incrementer can be associated with a 'Job' through the`incrementer` method provided in the builders, as follows: + +``` +@Bean +public Job footballJob() { + return this.jobBuilderFactory.get("footballJob") + .incrementer(sampleIncrementer()) + ... + .build(); +} +``` + +#### Stopping a Job + +One of the most common use cases of`JobOperator` is gracefully stopping a +Job: + +``` +Set executions = jobOperator.getRunningExecutions("sampleJob"); +jobOperator.stop(executions.iterator().next()); +``` + +The shutdown is not immediate, since there is no way to force +immediate shutdown, especially if the execution is currently in +developer code that the framework has no control over, such as a +business service. However, as soon as control is returned back to the +framework, it will set the status of the current`StepExecution` to`BatchStatus.STOPPED`, save it, then do the same +for the `JobExecution` before finishing. + +#### Aborting a Job + +A job execution which is `FAILED` can be +restarted (if the `Job` is restartable). A job execution whose status is`ABANDONED` will not be restarted by the framework. +The `ABANDONED` status is also used in step +executions to mark them as skippable in a restarted job execution: if a +job is executing and encounters a step that has been marked`ABANDONED` in the previous failed job execution, it +will move on to the next step (as determined by the job flow definition +and the step execution exit status). + +If the process died (`"kill -9"` or server +failure) the job is, of course, not running, but the `JobRepository` has +no way of knowing because no-one told it before the process died. You +have to tell it manually that you know that the execution either failed +or should be considered aborted (change its status to`FAILED` or `ABANDONED`) - it’s +a business decision and there is no way to automate it. Only change the +status to `FAILED` if it is not restartable, or if +you know the restart data is valid. There is a utility in Spring Batch +Admin `JobService` to abort a job execution. \ No newline at end of file diff --git a/docs/en/spring-batch/jsr-352.md b/docs/en/spring-batch/jsr-352.md new file mode 100644 index 0000000000000000000000000000000000000000..6ccea008257b7304321dab4dc28e5e5192dd9ecc --- /dev/null +++ b/docs/en/spring-batch/jsr-352.md @@ -0,0 +1,415 @@ +# JSR-352 Support + +## JSR-352 Support + +XMLJavaBoth + +As of Spring Batch 3.0 support for JSR-352 has been fully implemented. This section is not a replacement for +the spec itself and instead, intends to explain how the JSR-352 specific concepts apply to Spring Batch. +Additional information on JSR-352 can be found via the +JCP here: + +### General Notes about Spring Batch and JSR-352 + +Spring Batch and JSR-352 are structurally the same. They both have jobs that are made up of steps. They +both have readers, processors, writers, and listeners. However, their interactions are subtly different. +For example, the `org.springframework.batch.core.SkipListener#onSkipInWrite(S item, Throwable t)`within Spring Batch receives two parameters: the item that was skipped and the Exception that caused the +skip. The JSR-352 version of the same method +(`javax.batch.api.chunk.listener.SkipWriteListener#onSkipWriteItem(List items, Exception ex)`) +also receives two parameters. However the first one is a `List` of all the items +within the current chunk with the second being the `Exception` that caused the skip. +Because of these differences, it is important to note that there are two paths to execute a job within +Spring Batch: either a traditional Spring Batch job or a JSR-352 based job. While the use of Spring Batch +artifacts (readers, writers, etc) will work within a job configured with JSR-352’s JSL and executed with the`JsrJobOperator`, they will behave according to the rules of JSR-352. It is also +important to note that batch artifacts that have been developed against the JSR-352 interfaces will not work +within a traditional Spring Batch job. + +### Setup + +#### Application Contexts + +All JSR-352 based jobs within Spring Batch consist of two application contexts. A parent context, that +contains beans related to the infrastructure of Spring Batch such as the `JobRepository`,`PlatformTransactionManager`, etc and a child context that consists of the configuration +of the job to be run. The parent context is defined via the `jsrBaseContext.xml` provided +by the framework. This context may be overridden by setting the `JSR-352-BASE-CONTEXT` system +property. + +| |The base context is not processed by the JSR-352 processors for things like property injection so
no components requiring that additional processing should be configured there.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### Launching a JSR-352 based job + +JSR-352 requires a very simple path to executing a batch job. The following code is all that is needed to +execute your first batch job: + +``` +JobOperator operator = BatchRuntime.getJobOperator(); +jobOperator.start("myJob", new Properties()); +``` + +While that is convenient for developers, the devil is in the details. Spring Batch bootstraps a bit of +infrastructure behind the scenes that a developer may want to override. The following is bootstrapped the +first time `BatchRuntime.getJobOperator()` is called: + +| *Bean Name* | *Default Configuration* | *Notes* | +|------------------------|-----------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| dataSource | Apache DBCP BasicDataSource with configured values. | By default, HSQLDB is bootstrapped. | +| `transactionManager` | `org.springframework.jdbc.datasource.DataSourceTransactionManager` | References the dataSource bean defined above. | +|A Datasource initializer| | This is configured to execute the scripts configured via the`batch.drop.script` and `batch.schema.script` properties. By
default, the schema scripts for HSQLDB are executed. This behavior can be disabled by setting the`batch.data.source.init` property. | +| jobRepository | A JDBC based `SimpleJobRepository`. | This `JobRepository` uses the previously mentioned data source and transaction
manager. The schema’s table prefix is configurable (defaults to BATCH\_) via the`batch.table.prefix` property. | +| jobLauncher | `org.springframework.batch.core.launch.support.SimpleJobLauncher` | Used to launch jobs. | +| batchJobOperator | `org.springframework.batch.core.launch.support.SimpleJobOperator` | The `JsrJobOperator` wraps this to provide most of it’s functionality. | +| jobExplorer |`org.springframework.batch.core.explore.support.JobExplorerFactoryBean`| Used to address lookup functionality provided by the `JsrJobOperator`. | +| jobParametersConverter | `org.springframework.batch.core.jsr.JsrJobParametersConverter` | JSR-352 specific implementation of the `JobParametersConverter`. | +| jobRegistry | `org.springframework.batch.core.configuration.support.MapJobRegistry` | Used by the `SimpleJobOperator`. | +| placeholderProperties |`org.springframework.beans.factory.config.PropertyPlaceholderConfigure`|Loads the properties file `batch-${ENVIRONMENT:hsql}.properties` to configure
the properties mentioned above. ENVIRONMENT is a System property (defaults to `hsql`)
that can be used to specify any of the supported databases Spring Batch currently
supports.| + +| |None of the above beans are optional for executing JSR-352 based jobs. All may be overridden to
provide customized functionality as needed.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------| + +### Dependency Injection + +JSR-352 is based heavily on the Spring Batch programming model. As such, while not explicitly requiring a +formal dependency injection implementation, DI of some kind implied. Spring Batch supports all three +methods for loading batch artifacts defined by JSR-352: + +* Implementation Specific Loader: Spring Batch is built upon Spring and so supports + Spring dependency injection within JSR-352 batch jobs. + +* Archive Loader: JSR-352 defines the existing of a `batch.xml` file that provides mappings + between a logical name and a class name. This file must be found within the `/META-INF/`directory if it is used. + +* Thread Context Class Loader: JSR-352 allows configurations to specify batch artifact + implementations in their JSL by providing the fully qualified class name inline. Spring + Batch supports this as well in JSR-352 configured jobs. + +To use Spring dependency injection within a JSR-352 based batch job consists of +configuring batch artifacts using a Spring application context as beans. Once the beans +have been defined, a job can refer to them as it would any bean defined within the`batch.xml` file. + +The following example shows how to use Spring dependency injection within a JSR-352 based +batch job in XML: + +XML Configuration + +``` + + + + + + + + + + + + + + + +``` + +The following example shows how to use Spring dependency injection within a JSR-352 based +batch job in Java: + +Java Configuration + +``` +@Configuration +public class BatchConfiguration { + + @Bean + public Batchlet fooBatchlet() { + FooBatchlet batchlet = new FooBatchlet(); + batchlet.setProp("bar"); + return batchlet; + } +} + + + + + + + +``` + +The assembly of Spring contexts (imports, etc) works with JSR-352 jobs just as it would with any other +Spring based application. The only difference with a JSR-352 based job is that the entry point for the +context definition will be the job definition found in /META-INF/batch-jobs/. + +To use the thread context class loader approach, all you need to do is provide the fully qualified class +name as the ref. It is important to note that when using this approach or the `batch.xml` approach, the class +referenced requires a no argument constructor which will be used to create the bean. + +``` + + + + + + +``` + +### Batch Properties + +#### Property Support + +JSR-352 allows for properties to be defined at the Job, Step and batch artifact level by way of +configuration in the JSL. Batch properties are configured at each level in the following way: + +``` + + + + +``` + +`Properties` may be configured on any batch artifact. + +#### @BatchProperty annotation + +`Properties` are referenced in batch artifacts by annotating class fields with the`@BatchProperty` and `@Inject` annotations (both annotations +are required by the spec). As defined by JSR-352, fields for properties must be String typed. Any type +conversion is up to the implementing developer to perform. + +An `javax.batch.api.chunk.ItemReader` artifact could be configured with a +properties block such as the one described above and accessed as such: + +``` +public class MyItemReader extends AbstractItemReader { + @Inject + @BatchProperty + private String propertyName1; + + ... +} +``` + +The value of the field "propertyName1" will be "propertyValue1" + +#### Property Substitution + +Property substitution is provided by way of operators and simple conditional expressions. The general +usage is `#{operator['key']}`. + +Supported operators: + +* `jobParameters`: access job parameter values that the job was started/restarted with. + +* `jobProperties`: access properties configured at the job level of the JSL. + +* `systemProperties`: access named system properties. + +* `partitionPlan`: access named property from the partition plan of a partitioned step. + +``` +#{jobParameters['unresolving.prop']}?:#{systemProperties['file.separator']} +``` + +The left hand side of the assignment is the expected value, the right hand side is the +default value. In the preceding +example, the result will resolve to a value of the system property file.separator as +#{jobParameters['unresolving.prop']} is assumed to not be resolvable. If neither +expressions can be resolved, an empty String will be returned. Multiple conditions can be +used, which are separated by a ';'. + +### Processing Models + +JSR-352 provides the same two basic processing models that Spring Batch does: + +* Item based processing - Using an `javax.batch.api.chunk.ItemReader`, an optional`javax.batch.api.chunk.ItemProcessor`, and an `javax.batch.api.chunk.ItemWriter`. + +* Task based processing - Using a `javax.batch.api.Batchlet`implementation. This processing model is the same as the`org.springframework.batch.core.step.tasklet.Tasklet` based processing + currently available. + +#### Item based processing + +Item based processing in this context is a chunk size being set by the number of items read by an`ItemReader`. To configure a step this way, specify the`item-count` (which defaults to 10) and optionally configure the`checkpoint-policy` as item (this is the default). + +``` +... + + + + + + + +... +``` + +If item-based checkpointing is chosen, an additional attribute `time-limit` is supported. +This sets a time limit for how long the number of items specified has to be processed. If +the timeout is reached, the chunk will complete with however many items have been read by +then regardless of what the `item-count` is configured to be. + +#### Custom checkpointing + +JSR-352 calls the process around the commit interval within a step "checkpointing". +Item-based checkpointing is one approach as mentioned above. However, this is not robust +enough in many cases. Because of this, the spec allows for the implementation of a custom +checkpointing algorithm by implementing the `javax.batch.api.chunk.CheckpointAlgorithm`interface. This functionality is functionally the same as Spring Batch’s custom completion +policy. To use an implementation of `CheckpointAlgorithm`, configure your step with the +custom `checkpoint-policy` as shown below where `fooCheckpointer` refers to an +implementation of `CheckpointAlgorithm`. + +``` +... + + + + + + + + +... +``` + +### Running a job + +The entrance to executing a JSR-352 based job is through the`javax.batch.operations.JobOperator`. Spring Batch provides its own implementation of +this interface (`org.springframework.batch.core.jsr.launch.JsrJobOperator`). This +implementation is loaded via the `javax.batch.runtime.BatchRuntime`. Launching a +JSR-352 based batch job is implemented as follows: + +``` +JobOperator jobOperator = BatchRuntime.getJobOperator(); +long jobExecutionId = jobOperator.start("fooJob", new Properties()); +``` + +The above code does the following: + +* Bootstraps a base `ApplicationContext`: In order to provide batch functionality, the + framework needs some infrastructure bootstrapped. This occurs once per JVM. The + components that are bootstrapped are similar to those provided by`@EnableBatchProcessing`. Specific details can be found in the javadoc for the`JsrJobOperator`. + +* Loads an `ApplicationContext` for the job requested: In the example + above, the framework looks in /META-INF/batch-jobs for a file named fooJob.xml and load a + context that is a child of the shared context mentioned previously. + +* Launch the job: The job defined within the context will be executed asynchronously. + The `JobExecution’s` ID will be returned. + +| |All JSR-352 based batch jobs are executed asynchronously.| +|---|---------------------------------------------------------| + +When `JobOperator#start` is called using `SimpleJobOperator`, Spring Batch determines if +the call is an initial run or a retry of a previously executed run. Using the JSR-352 +based `JobOperator#start(String jobXMLName, Properties jobParameters)`, the framework +will always create a new JobInstance (JSR-352 job parameters are non-identifying). In order to +restart a job, a call to`JobOperator#restart(long executionId, Properties restartParameters)` is required. + +### Contexts + +JSR-352 defines two context objects that are used to interact with the meta-data of a job or step from +within a batch artifact: `javax.batch.runtime.context.JobContext` and`javax.batch.runtime.context.StepContext`. Both of these are available in any step +level artifact (`Batchlet`, `ItemReader`, etc) with the`JobContext` being available to job level artifacts as well +(`JobListener` for example). + +To obtain a reference to the `JobContext` or `StepContext`within the current scope, simply use the `@Inject` annotation: + +``` +@Inject +JobContext jobContext; +``` + +| |@Autowire for JSR-352 contexts

Using Spring’s @Autowire is not supported for the injection of these contexts.| +|---|----------------------------------------------------------------------------------------------------------------------| + +In Spring Batch, the `JobContext` and `StepContext` wrap their +corresponding execution objects (`JobExecution` and`StepExecution` respectively). Data stored through`StepContext#setPersistentUserData(Serializable data)` is stored in the +Spring Batch `StepExecution#executionContext`. + +### Step Flow + +Within a JSR-352 based job, the flow of steps works similarly as it does within Spring Batch. +However, there are a few subtle differences: + +* Decision’s are steps - In a regular Spring Batch job, a decision is a state that does not + have an independent `StepExecution` or any of the rights and + responsibilities that go along with being a full step.. However, with JSR-352, a decision + is a step just like any other and will behave just as any other steps (transactionality, + it gets a `StepExecution`, etc). This means that they are treated the + same as any other step on restarts as well. + +* `next` attribute and step transitions - In a regular job, these are + allowed to appear together in the same step. JSR-352 allows them to both be used in the + same step with the next attribute taking precedence in evaluation. + +* Transition element ordering - In a standard Spring Batch job, transition elements are + sorted from most specific to least specific and evaluated in that order. JSR-352 jobs + evaluate transition elements in the order they are specified in the XML. + +### Scaling a JSR-352 batch job + +Traditional Spring Batch jobs have four ways of scaling (the last two capable of being executed across +multiple JVMs): + +* Split - Running multiple steps in parallel. + +* Multiple threads - Executing a single step via multiple threads. + +* Partitioning - Dividing the data up for parallel processing (manager/worker). + +* Remote Chunking - Executing the processor piece of logic remotely. + +JSR-352 provides two options for scaling batch jobs. Both options support only a single JVM: + +* Split - Same as Spring Batch + +* Partitioning - Conceptually the same as Spring Batch however implemented slightly different. + +#### Partitioning + +Conceptually, partitioning in JSR-352 is the same as it is in Spring Batch. Meta-data is provided +to each worker to identify the input to be processed, with the workers reporting back to the manager the +results upon completion. However, there are some important differences: + +* Partitioned `Batchlet` - This will run multiple instances of the + configured `Batchlet` on multiple threads. Each instance will have + it’s own set of properties as provided by the JSL or the`PartitionPlan` + +* `PartitionPlan` - With Spring Batch’s partitioning, an`ExecutionContext` is provided for each partition. With JSR-352, a + single `javax.batch.api.partition.PartitionPlan` is provided with an + array of `Properties` providing the meta-data for each partition. + +* `PartitionMapper` - JSR-352 provides two ways to generate partition + meta-data. One is via the JSL (partition properties). The second is via an implementation + of the `javax.batch.api.partition.PartitionMapper` interface. + Functionally, this interface is similar to the`org.springframework.batch.core.partition.support.Partitioner`interface provided by Spring Batch in that it provides a way to programmatically generate + meta-data for partitioning. + +* `StepExecutions` - In Spring Batch, partitioned steps are run as + manager/worker. Within JSR-352, the same configuration occurs. However, the worker steps do + not get official `StepExecutions`. Because of that, calls to`JsrJobOperator#getStepExecutions(long jobExecutionId)` will only + return the `StepExecution` for the manager. + +| |The child `StepExecutions` still exist in the job repository and are available
through the `JobExplorer`.| +|---|-------------------------------------------------------------------------------------------------------------| + +* Compensating logic - Since Spring Batch implements the manager/worker logic of + partitioning using steps, `StepExecutionListeners` can be used to + handle compensating logic if something goes wrong. However, since the workers JSR-352 + provides a collection of other components for the ability to provide compensating logic when + errors occur and to dynamically set the exit status. These components include the following: + +| *Artifact Interface* | *Description* | +|----------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------| +|`javax.batch.api.partition.PartitionCollector`| Provides a way for worker steps to send information back to the
manager. There is one instance per worker thread. | +|`javax.batch.api.partition.PartitionAnalyzer` |End point that receives the information collected by the`PartitionCollector` as well as the resulting
statuses from a completed partition.| +| `javax.batch.api.partition.PartitionReducer` | Provides the ability to provide compensating logic for a partitioned
step. | + +### Testing + +Since all JSR-352 based jobs are executed asynchronously, it can be difficult to determine when a job has +completed. To help with testing, Spring Batch provides the`org.springframework.batch.test.JsrTestUtils`. This utility class provides the +ability to start a job and restart a job and wait for it to complete. Once the job completes, the +associated `JobExecution` is returned. \ No newline at end of file diff --git a/docs/en/spring-batch/monitoring-and-metrics.md b/docs/en/spring-batch/monitoring-and-metrics.md new file mode 100644 index 0000000000000000000000000000000000000000..86c682c6565b23981d300c690415b2dbb04a03a4 --- /dev/null +++ b/docs/en/spring-batch/monitoring-and-metrics.md @@ -0,0 +1,75 @@ +# Monitoring and metrics + +## Monitoring and metrics + +Since version 4.2, Spring Batch provides support for batch monitoring and metrics +based on [Micrometer](https://micrometer.io/). This section describes +which metrics are provided out-of-the-box and how to contribute custom metrics. + +### Built-in metrics + +Metrics collection does not require any specific configuration. All metrics provided +by the framework are registered in[Micrometer’s global registry](https://micrometer.io/docs/concepts#_global_registry)under the `spring.batch` prefix. The following table explains all the metrics in details: + +| *Metric Name* | *Type* | *Description* | *Tags* | +|---------------------------|-----------------|---------------------------|---------------------------------| +| `spring.batch.job` | `TIMER` | Duration of job execution | `name`, `status` | +| `spring.batch.job.active` |`LONG_TASK_TIMER`| Currently active jobs | `name` | +| `spring.batch.step` | `TIMER` |Duration of step execution | `name`, `job.name`, `status` | +| `spring.batch.item.read` | `TIMER` | Duration of item reading |`job.name`, `step.name`, `status`| +|`spring.batch.item.process`| `TIMER` |Duration of item processing|`job.name`, `step.name`, `status`| +|`spring.batch.chunk.write` | `TIMER` | Duration of chunk writing |`job.name`, `step.name`, `status`| + +| |The `status` tag can be either `SUCCESS` or `FAILURE`.| +|---|------------------------------------------------------| + +### Custom metrics + +If you want to use your own metrics in your custom components, we recommend using +Micrometer APIs directly. The following is an example of how to time a `Tasklet`: + +``` +import io.micrometer.core.instrument.Metrics; +import io.micrometer.core.instrument.Timer; + +import org.springframework.batch.core.StepContribution; +import org.springframework.batch.core.scope.context.ChunkContext; +import org.springframework.batch.core.step.tasklet.Tasklet; +import org.springframework.batch.repeat.RepeatStatus; + +public class MyTimedTasklet implements Tasklet { + + @Override + public RepeatStatus execute(StepContribution contribution, ChunkContext chunkContext) { + Timer.Sample sample = Timer.start(Metrics.globalRegistry); + String status = "success"; + try { + // do some work + } catch (Exception e) { + // handle exception + status = "failure"; + } finally { + sample.stop(Timer.builder("my.tasklet.timer") + .description("Duration of MyTimedTasklet") + .tag("status", status) + .register(Metrics.globalRegistry)); + } + return RepeatStatus.FINISHED; + } +} +``` + +### Disabling metrics + +Metrics collection is a concern similar to logging. Disabling logs is typically +done by configuring the logging library and this is no different for metrics. +There is no feature in Spring Batch to disable micrometer’s metrics, this should +be done on micrometer’s side. Since Spring Batch stores metrics in the global +registry of micrometer with the `spring.batch` prefix, it is possible to configure +micrometer to ignore/deny batch metrics with the following snippet: + +``` +Metrics.globalRegistry.config().meterFilter(MeterFilter.denyNameStartsWith("spring.batch")) +``` + +Please refer to micrometer’s [reference documentation](http://micrometer.io/docs/concepts#_meter_filters)for more details. \ No newline at end of file diff --git a/docs/en/spring-batch/processor.md b/docs/en/spring-batch/processor.md new file mode 100644 index 0000000000000000000000000000000000000000..a25e49301d882f1261e37ee8964b1b4a7f8168ac --- /dev/null +++ b/docs/en/spring-batch/processor.md @@ -0,0 +1,347 @@ +# Item processing + +## Item processing + +XMLJavaBoth + +The [ItemReader and ItemWriter interfaces](readersAndWriters.html#readersAndWriters) are both very useful for their specific +tasks, but what if you want to insert business logic before writing? One option for both +reading and writing is to use the composite pattern: Create an `ItemWriter` that contains +another `ItemWriter` or an `ItemReader` that contains another `ItemReader`. The following +code shows an example: + +``` +public class CompositeItemWriter implements ItemWriter { + + ItemWriter itemWriter; + + public CompositeItemWriter(ItemWriter itemWriter) { + this.itemWriter = itemWriter; + } + + public void write(List items) throws Exception { + //Add business logic here + itemWriter.write(items); + } + + public void setDelegate(ItemWriter itemWriter){ + this.itemWriter = itemWriter; + } +} +``` + +The preceding class contains another `ItemWriter` to which it delegates after having +provided some business logic. This pattern could easily be used for an `ItemReader` as +well, perhaps to obtain more reference data based upon the input that was provided by the +main `ItemReader`. It is also useful if you need to control the call to `write` yourself. +However, if you only want to 'transform' the item passed in for writing before it is +actually written, you need not `write` yourself. You can just modify the item. For this +scenario, Spring Batch provides the `ItemProcessor` interface, as shown in the following +interface definition: + +``` +public interface ItemProcessor { + + O process(I item) throws Exception; +} +``` + +An `ItemProcessor` is simple. Given one object, transform it and return another. The +provided object may or may not be of the same type. The point is that business logic may +be applied within the process, and it is completely up to the developer to create that +logic. An `ItemProcessor` can be wired directly into a step. For example, assume an`ItemReader` provides a class of type `Foo` and that it needs to be converted to type `Bar`before being written out. The following example shows an `ItemProcessor` that performs +the conversion: + +``` +public class Foo {} + +public class Bar { + public Bar(Foo foo) {} +} + +public class FooProcessor implements ItemProcessor { + public Bar process(Foo foo) throws Exception { + //Perform simple transformation, convert a Foo to a Bar + return new Bar(foo); + } +} + +public class BarWriter implements ItemWriter { + public void write(List bars) throws Exception { + //write bars + } +} +``` + +In the preceding example, there is a class `Foo`, a class `Bar`, and a class`FooProcessor` that adheres to the `ItemProcessor` interface. The transformation is +simple, but any type of transformation could be done here. The `BarWriter` writes `Bar`objects, throwing an exception if any other type is provided. Similarly, the`FooProcessor` throws an exception if anything but a `Foo` is provided. The`FooProcessor` can then be injected into a `Step`, as shown in the following example: + +XML Configuration + +``` + + + + + + + +``` + +Java Configuration + +``` +@Bean +public Job ioSampleJob() { + return this.jobBuilderFactory.get("ioSampleJob") + .start(step1()) + .build(); +} + +@Bean +public Step step1() { + return this.stepBuilderFactory.get("step1") + .chunk(2) + .reader(fooReader()) + .processor(fooProcessor()) + .writer(barWriter()) + .build(); +} +``` + +A difference between `ItemProcessor` and `ItemReader` or `ItemWriter` is that an `ItemProcessor`is optional for a `Step`. + +### Chaining ItemProcessors + +Performing a single transformation is useful in many scenarios, but what if you want to +'chain' together multiple `ItemProcessor` implementations? This can be accomplished using +the composite pattern mentioned previously. To update the previous, single +transformation, example, `Foo` is transformed to `Bar`, which is transformed to `Foobar`and written out, as shown in the following example: + +``` +public class Foo {} + +public class Bar { + public Bar(Foo foo) {} +} + +public class Foobar { + public Foobar(Bar bar) {} +} + +public class FooProcessor implements ItemProcessor { + public Bar process(Foo foo) throws Exception { + //Perform simple transformation, convert a Foo to a Bar + return new Bar(foo); + } +} + +public class BarProcessor implements ItemProcessor { + public Foobar process(Bar bar) throws Exception { + return new Foobar(bar); + } +} + +public class FoobarWriter implements ItemWriter{ + public void write(List items) throws Exception { + //write items + } +} +``` + +A `FooProcessor` and a `BarProcessor` can be 'chained' together to give the resultant`Foobar`, as shown in the following example: + +``` +CompositeItemProcessor compositeProcessor = + new CompositeItemProcessor(); +List itemProcessors = new ArrayList(); +itemProcessors.add(new FooProcessor()); +itemProcessors.add(new BarProcessor()); +compositeProcessor.setDelegates(itemProcessors); +``` + +Just as with the previous example, the composite processor can be configured into the`Step`: + +XML Configuration + +``` + + + + + + + + + + + + + + + + +``` + +Java Configuration + +``` +@Bean +public Job ioSampleJob() { + return this.jobBuilderFactory.get("ioSampleJob") + .start(step1()) + .build(); +} + +@Bean +public Step step1() { + return this.stepBuilderFactory.get("step1") + .chunk(2) + .reader(fooReader()) + .processor(compositeProcessor()) + .writer(foobarWriter()) + .build(); +} + +@Bean +public CompositeItemProcessor compositeProcessor() { + List delegates = new ArrayList<>(2); + delegates.add(new FooProcessor()); + delegates.add(new BarProcessor()); + + CompositeItemProcessor processor = new CompositeItemProcessor(); + + processor.setDelegates(delegates); + + return processor; +} +``` + +### Filtering Records + +One typical use for an item processor is to filter out records before they are passed to +the `ItemWriter`. Filtering is an action distinct from skipping. Skipping indicates that +a record is invalid, while filtering simply indicates that a record should not be +written. + +For example, consider a batch job that reads a file containing three different types of +records: records to insert, records to update, and records to delete. If record deletion +is not supported by the system, then we would not want to send any "delete" records to +the `ItemWriter`. But, since these records are not actually bad records, we would want to +filter them out rather than skip them. As a result, the `ItemWriter` would receive only +"insert" and "update" records. + +To filter a record, you can return `null` from the `ItemProcessor`. The framework detects +that the result is `null` and avoids adding that item to the list of records delivered to +the `ItemWriter`. As usual, an exception thrown from the `ItemProcessor` results in a +skip. + +### Validating Input + +In the [ItemReaders and ItemWriters](readersAndWriters.html#readersAndWriters) chapter, multiple approaches to parsing input have been +discussed. Each major implementation throws an exception if it is not 'well-formed'. The`FixedLengthTokenizer` throws an exception if a range of data is missing. Similarly, +attempting to access an index in a `RowMapper` or `FieldSetMapper` that does not exist or +is in a different format than the one expected causes an exception to be thrown. All of +these types of exceptions are thrown before `read` returns. However, they do not address +the issue of whether or not the returned item is valid. For example, if one of the fields +is an age, it obviously cannot be negative. It may parse correctly, because it exists and +is a number, but it does not cause an exception. Since there are already a plethora of +validation frameworks, Spring Batch does not attempt to provide yet another. Rather, it +provides a simple interface, called `Validator`, that can be implemented by any number of +frameworks, as shown in the following interface definition: + +``` +public interface Validator { + + void validate(T value) throws ValidationException; + +} +``` + +The contract is that the `validate` method throws an exception if the object is invalid +and returns normally if it is valid. Spring Batch provides an out of the box`ValidatingItemProcessor`, as shown in the following bean definition: + +XML Configuration + +``` + + + + + + + + + +``` + +Java Configuration + +``` +@Bean +public ValidatingItemProcessor itemProcessor() { + ValidatingItemProcessor processor = new ValidatingItemProcessor(); + + processor.setValidator(validator()); + + return processor; +} + +@Bean +public SpringValidator validator() { + SpringValidator validator = new SpringValidator(); + + validator.setValidator(new TradeValidator()); + + return validator; +} +``` + +You can also use the `BeanValidatingItemProcessor` to validate items annotated with +the Bean Validation API (JSR-303) annotations. For example, given the following type `Person`: + +``` +class Person { + + @NotEmpty + private String name; + + public Person(String name) { + this.name = name; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + +} +``` + +you can validate items by declaring a `BeanValidatingItemProcessor` bean in your +application context and register it as a processor in your chunk-oriented step: + +``` +@Bean +public BeanValidatingItemProcessor beanValidatingItemProcessor() throws Exception { + BeanValidatingItemProcessor beanValidatingItemProcessor = new BeanValidatingItemProcessor<>(); + beanValidatingItemProcessor.setFilter(true); + + return beanValidatingItemProcessor; +} +``` + +### Fault Tolerance + +When a chunk is rolled back, items that have been cached during reading may be +reprocessed. If a step is configured to be fault tolerant (typically by using skip or +retry processing), any `ItemProcessor` used should be implemented in a way that is +idempotent. Typically that would consist of performing no changes on the input item for +the `ItemProcessor` and only updating the +instance that is the result. \ No newline at end of file diff --git a/docs/en/spring-batch/readersAndWriters.md b/docs/en/spring-batch/readersAndWriters.md new file mode 100644 index 0000000000000000000000000000000000000000..9d7cea907527e6ba3b545ee92acd63df1e2088aa --- /dev/null +++ b/docs/en/spring-batch/readersAndWriters.md @@ -0,0 +1,2760 @@ +# ItemReaders and ItemWriters + +## ItemReaders and ItemWriters + +XMLJavaBoth + +All batch processing can be described in its most simple form as reading in large amounts +of data, performing some type of calculation or transformation, and writing the result +out. Spring Batch provides three key interfaces to help perform bulk reading and writing:`ItemReader`, `ItemProcessor`, and `ItemWriter`. + +### `ItemReader` + +Although a simple concept, an `ItemReader` is the means for providing data from many +different types of input. The most general examples include: + +* Flat File: Flat-file item readers read lines of data from a flat file that typically + describes records with fields of data defined by fixed positions in the file or delimited + by some special character (such as a comma). + +* XML: XML `ItemReaders` process XML independently of technologies used for parsing, + mapping and validating objects. Input data allows for the validation of an XML file + against an XSD schema. + +* Database: A database resource is accessed to return resultsets which can be mapped to + objects for processing. The default SQL `ItemReader` implementations invoke a `RowMapper`to return objects, keep track of the current row if restart is required, store basic + statistics, and provide some transaction enhancements that are explained later. + +There are many more possibilities, but we focus on the basic ones for this chapter. A +complete list of all available `ItemReader` implementations can be found in[Appendix A](appendix.html#listOfReadersAndWriters). + +`ItemReader` is a basic interface for generic +input operations, as shown in the following interface definition: + +``` +public interface ItemReader { + + T read() throws Exception, UnexpectedInputException, ParseException, NonTransientResourceException; + +} +``` + +The `read` method defines the most essential contract of the `ItemReader`. Calling it +returns one item or `null` if no more items are left. An item might represent a line in a +file, a row in a database, or an element in an XML file. It is generally expected that +these are mapped to a usable domain object (such as `Trade`, `Foo`, or others), but there +is no requirement in the contract to do so. + +It is expected that implementations of the `ItemReader` interface are forward only. +However, if the underlying resource is transactional (such as a JMS queue) then calling`read` may return the same logical item on subsequent calls in a rollback scenario. It is +also worth noting that a lack of items to process by an `ItemReader` does not cause an +exception to be thrown. For example, a database `ItemReader` that is configured with a +query that returns 0 results returns `null` on the first invocation of `read`. + +### `ItemWriter` + +`ItemWriter` is similar in functionality to an `ItemReader` but with inverse operations. +Resources still need to be located, opened, and closed but they differ in that an`ItemWriter` writes out, rather than reading in. In the case of databases or queues, +these operations may be inserts, updates, or sends. The format of the serialization of +the output is specific to each batch job. + +As with `ItemReader`,`ItemWriter` is a fairly generic interface, as shown in the following interface definition: + +``` +public interface ItemWriter { + + void write(List items) throws Exception; + +} +``` + +As with `read` on `ItemReader`, `write` provides the basic contract of `ItemWriter`. It +attempts to write out the list of items passed in as long as it is open. Because it is +generally expected that items are 'batched' together into a chunk and then output, the +interface accepts a list of items, rather than an item by itself. After writing out the +list, any flushing that may be necessary can be performed before returning from the write +method. For example, if writing to a Hibernate DAO, multiple calls to write can be made, +one for each item. The writer can then call `flush` on the hibernate session before +returning. + +### `ItemStream` + +Both `ItemReaders` and `ItemWriters` serve their individual purposes well, but there is a +common concern among both of them that necessitates another interface. In general, as +part of the scope of a batch job, readers and writers need to be opened, closed, and +require a mechanism for persisting state. The `ItemStream` interface serves that purpose, +as shown in the following example: + +``` +public interface ItemStream { + + void open(ExecutionContext executionContext) throws ItemStreamException; + + void update(ExecutionContext executionContext) throws ItemStreamException; + + void close() throws ItemStreamException; +} +``` + +Before describing each method, we should mention the `ExecutionContext`. Clients of an`ItemReader` that also implement `ItemStream` should call `open` before any calls to`read`, in order to open any resources such as files or to obtain connections. A similar +restriction applies to an `ItemWriter` that implements `ItemStream`. As mentioned in +Chapter 2, if expected data is found in the `ExecutionContext`, it may be used to start +the `ItemReader` or `ItemWriter` at a location other than its initial state. Conversely,`close` is called to ensure that any resources allocated during open are released safely.`update` is called primarily to ensure that any state currently being held is loaded into +the provided `ExecutionContext`. This method is called before committing, to ensure that +the current state is persisted in the database before commit. + +In the special case where the client of an `ItemStream` is a `Step` (from the Spring +Batch Core), an `ExecutionContext` is created for each StepExecution to allow users to +store the state of a particular execution, with the expectation that it is returned if +the same `JobInstance` is started again. For those familiar with Quartz, the semantics +are very similar to a Quartz `JobDataMap`. + +### The Delegate Pattern and Registering with the Step + +Note that the `CompositeItemWriter` is an example of the delegation pattern, which is +common in Spring Batch. The delegates themselves might implement callback interfaces, +such as `StepListener`. If they do and if they are being used in conjunction with Spring +Batch Core as part of a `Step` in a `Job`, then they almost certainly need to be +registered manually with the `Step`. A reader, writer, or processor that is directly +wired into the `Step` gets registered automatically if it implements `ItemStream` or a`StepListener` interface. However, because the delegates are not known to the `Step`, +they need to be injected as listeners or streams (or both if appropriate). + +The following example shows how to inject a delegate as a stream in XML: + +XML Configuration + +``` + + + + + + + + + + + + + + + + + +``` + +The following example shows how to inject a delegate as a stream in XML: + +Java Configuration + +``` +@Bean +public Job ioSampleJob() { + return this.jobBuilderFactory.get("ioSampleJob") + .start(step1()) + .build(); +} + +@Bean +public Step step1() { + return this.stepBuilderFactory.get("step1") + .chunk(2) + .reader(fooReader()) + .processor(fooProcessor()) + .writer(compositeItemWriter()) + .stream(barWriter()) + .build(); +} + +@Bean +public CustomCompositeItemWriter compositeItemWriter() { + + CustomCompositeItemWriter writer = new CustomCompositeItemWriter(); + + writer.setDelegate(barWriter()); + + return writer; +} + +@Bean +public BarWriter barWriter() { + return new BarWriter(); +} +``` + +### Flat Files + +One of the most common mechanisms for interchanging bulk data has always been the flat +file. Unlike XML, which has an agreed upon standard for defining how it is structured +(XSD), anyone reading a flat file must understand ahead of time exactly how the file is +structured. In general, all flat files fall into two types: delimited and fixed length. +Delimited files are those in which fields are separated by a delimiter, such as a comma. +Fixed Length files have fields that are a set length. + +#### The `FieldSet` + +When working with flat files in Spring Batch, regardless of whether it is for input or +output, one of the most important classes is the `FieldSet`. Many architectures and +libraries contain abstractions for helping you read in from a file, but they usually +return a `String` or an array of `String` objects. This really only gets you halfway +there. A `FieldSet` is Spring Batch’s abstraction for enabling the binding of fields from +a file resource. It allows developers to work with file input in much the same way as +they would work with database input. A `FieldSet` is conceptually similar to a JDBC`ResultSet`. A `FieldSet` requires only one argument: a `String` array of tokens. +Optionally, you can also configure the names of the fields so that the fields may be +accessed either by index or name as patterned after `ResultSet`, as shown in the following +example: + +``` +String[] tokens = new String[]{"foo", "1", "true"}; +FieldSet fs = new DefaultFieldSet(tokens); +String name = fs.readString(0); +int value = fs.readInt(1); +boolean booleanValue = fs.readBoolean(2); +``` + +There are many more options on the `FieldSet` interface, such as `Date`, long,`BigDecimal`, and so on. The biggest advantage of the `FieldSet` is that it provides +consistent parsing of flat file input. Rather than each batch job parsing differently in +potentially unexpected ways, it can be consistent, both when handling errors caused by a +format exception, or when doing simple data conversions. + +#### `FlatFileItemReader` + +A flat file is any type of file that contains at most two-dimensional (tabular) data. +Reading flat files in the Spring Batch framework is facilitated by the class called`FlatFileItemReader`, which provides basic functionality for reading and parsing flat +files. The two most important required dependencies of `FlatFileItemReader` are`Resource` and `LineMapper`. The `LineMapper` interface is explored more in the next +sections. The resource property represents a Spring Core `Resource`. Documentation +explaining how to create beans of this type can be found in[Spring +Framework, Chapter 5. Resources](https://docs.spring.io/spring/docs/current/spring-framework-reference/core.html#resources). Therefore, this guide does not go into the details of +creating `Resource` objects beyond showing the following simple example: + +``` +Resource resource = new FileSystemResource("resources/trades.csv"); +``` + +In complex batch environments, the directory structures are often managed by the Enterprise Application Integration (EAI) +infrastructure, where drop zones for external interfaces are established for moving files +from FTP locations to batch processing locations and vice versa. File moving utilities +are beyond the scope of the Spring Batch architecture, but it is not unusual for batch +job streams to include file moving utilities as steps in the job stream. The batch +architecture only needs to know how to locate the files to be processed. Spring Batch +begins the process of feeding the data into the pipe from this starting point. However,[Spring Integration](https://projects.spring.io/spring-integration/) provides many +of these types of services. + +The other properties in `FlatFileItemReader` let you further specify how your data is +interpreted, as described in the following table: + +| Property | Type | Description | +|---------------------|---------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------| +| comments | String[] | Specifies line prefixes that indicate comment rows. | +| encoding | String | Specifies what text encoding to use. The default is the value of `Charset.defaultCharset()`. | +| lineMapper | `LineMapper` | Converts a `String` to an `Object` representing the item. | +| linesToSkip | int | Number of lines to ignore at the top of the file. | +|recordSeparatorPolicy|RecordSeparatorPolicy| Used to determine where the line endings are
and do things like continue over a line ending if inside a quoted string. | +| resource | `Resource` | The resource from which to read. | +|skippedLinesCallback | LineCallbackHandler |Interface that passes the raw line content of
the lines in the file to be skipped. If `linesToSkip` is set to 2, then this interface is
called twice.| +| strict | boolean |In strict mode, the reader throws an exception on `ExecutionContext` if
the input resource does not exist. Otherwise, it logs the problem and continues. | + +##### `LineMapper` + +As with `RowMapper`, which takes a low-level construct such as `ResultSet` and returns +an `Object`, flat file processing requires the same construct to convert a `String` line +into an `Object`, as shown in the following interface definition: + +``` +public interface LineMapper { + + T mapLine(String line, int lineNumber) throws Exception; + +} +``` + +The basic contract is that, given the current line and the line number with which it is +associated, the mapper should return a resulting domain object. This is similar to`RowMapper`, in that each line is associated with its line number, just as each row in a`ResultSet` is tied to its row number. This allows the line number to be tied to the +resulting domain object for identity comparison or for more informative logging. However, +unlike `RowMapper`, the `LineMapper` is given a raw line which, as discussed above, only +gets you halfway there. The line must be tokenized into a `FieldSet`, which can then be +mapped to an object, as described later in this document. + +##### `LineTokenizer` + +An abstraction for turning a line of input into a `FieldSet` is necessary because there +can be many formats of flat file data that need to be converted to a `FieldSet`. In +Spring Batch, this interface is the `LineTokenizer`: + +``` +public interface LineTokenizer { + + FieldSet tokenize(String line); + +} +``` + +The contract of a `LineTokenizer` is such that, given a line of input (in theory the`String` could encompass more than one line), a `FieldSet` representing the line is +returned. This `FieldSet` can then be passed to a `FieldSetMapper`. Spring Batch contains +the following `LineTokenizer` implementations: + +* `DelimitedLineTokenizer`: Used for files where fields in a record are separated by a + delimiter. The most common delimiter is a comma, but pipes or semicolons are often used + as well. + +* `FixedLengthTokenizer`: Used for files where fields in a record are each a "fixed + width". The width of each field must be defined for each record type. + +* `PatternMatchingCompositeLineTokenizer`: Determines which `LineTokenizer` among a list of + tokenizers should be used on a particular line by checking against a pattern. + +##### `FieldSetMapper` + +The `FieldSetMapper` interface defines a single method, `mapFieldSet`, which takes a`FieldSet` object and maps its contents to an object. This object may be a custom DTO, a +domain object, or an array, depending on the needs of the job. The `FieldSetMapper` is +used in conjunction with the `LineTokenizer` to translate a line of data from a resource +into an object of the desired type, as shown in the following interface definition: + +``` +public interface FieldSetMapper { + + T mapFieldSet(FieldSet fieldSet) throws BindException; + +} +``` + +The pattern used is the same as the `RowMapper` used by `JdbcTemplate`. + +##### `DefaultLineMapper` + +Now that the basic interfaces for reading in flat files have been defined, it becomes +clear that three basic steps are required: + +1. Read one line from the file. + +2. Pass the `String` line into the `LineTokenizer#tokenize()` method to retrieve a`FieldSet`. + +3. Pass the `FieldSet` returned from tokenizing to a `FieldSetMapper`, returning the + result from the `ItemReader#read()` method. + +The two interfaces described above represent two separate tasks: converting a line into a`FieldSet` and mapping a `FieldSet` to a domain object. Because the input of a`LineTokenizer` matches the input of the `LineMapper` (a line), and the output of a`FieldSetMapper` matches the output of the `LineMapper`, a default implementation that +uses both a `LineTokenizer` and a `FieldSetMapper` is provided. The `DefaultLineMapper`, +shown in the following class definition, represents the behavior most users need: + +``` +public class DefaultLineMapper implements LineMapper<>, InitializingBean { + + private LineTokenizer tokenizer; + + private FieldSetMapper fieldSetMapper; + + public T mapLine(String line, int lineNumber) throws Exception { + return fieldSetMapper.mapFieldSet(tokenizer.tokenize(line)); + } + + public void setLineTokenizer(LineTokenizer tokenizer) { + this.tokenizer = tokenizer; + } + + public void setFieldSetMapper(FieldSetMapper fieldSetMapper) { + this.fieldSetMapper = fieldSetMapper; + } +} +``` + +The above functionality is provided in a default implementation, rather than being built +into the reader itself (as was done in previous versions of the framework) to allow users +greater flexibility in controlling the parsing process, especially if access to the raw +line is needed. + +##### Simple Delimited File Reading Example + +The following example illustrates how to read a flat file with an actual domain scenario. +This particular batch job reads in football players from the following file: + +``` +ID,lastName,firstName,position,birthYear,debutYear +"AbduKa00,Abdul-Jabbar,Karim,rb,1974,1996", +"AbduRa00,Abdullah,Rabih,rb,1975,1999", +"AberWa00,Abercrombie,Walter,rb,1959,1982", +"AbraDa00,Abramowicz,Danny,wr,1945,1967", +"AdamBo00,Adams,Bob,te,1946,1969", +"AdamCh00,Adams,Charlie,wr,1979,2003" +``` + +The contents of this file are mapped to the following`Player` domain object: + +``` +public class Player implements Serializable { + + private String ID; + private String lastName; + private String firstName; + private String position; + private int birthYear; + private int debutYear; + + public String toString() { + return "PLAYER:ID=" + ID + ",Last Name=" + lastName + + ",First Name=" + firstName + ",Position=" + position + + ",Birth Year=" + birthYear + ",DebutYear=" + + debutYear; + } + + // setters and getters... +} +``` + +To map a `FieldSet` into a `Player` object, a `FieldSetMapper` that returns players needs +to be defined, as shown in the following example: + +``` +protected static class PlayerFieldSetMapper implements FieldSetMapper { + public Player mapFieldSet(FieldSet fieldSet) { + Player player = new Player(); + + player.setID(fieldSet.readString(0)); + player.setLastName(fieldSet.readString(1)); + player.setFirstName(fieldSet.readString(2)); + player.setPosition(fieldSet.readString(3)); + player.setBirthYear(fieldSet.readInt(4)); + player.setDebutYear(fieldSet.readInt(5)); + + return player; + } +} +``` + +The file can then be read by correctly constructing a `FlatFileItemReader` and calling`read`, as shown in the following example: + +``` +FlatFileItemReader itemReader = new FlatFileItemReader<>(); +itemReader.setResource(new FileSystemResource("resources/players.csv")); +DefaultLineMapper lineMapper = new DefaultLineMapper<>(); +//DelimitedLineTokenizer defaults to comma as its delimiter +lineMapper.setLineTokenizer(new DelimitedLineTokenizer()); +lineMapper.setFieldSetMapper(new PlayerFieldSetMapper()); +itemReader.setLineMapper(lineMapper); +itemReader.open(new ExecutionContext()); +Player player = itemReader.read(); +``` + +Each call to `read` returns a new`Player` object from each line in the file. When the end of the file is +reached, `null` is returned. + +##### Mapping Fields by Name + +There is one additional piece of functionality that is allowed by both`DelimitedLineTokenizer` and `FixedLengthTokenizer` and that is similar in function to a +JDBC `ResultSet`. The names of the fields can be injected into either of these`LineTokenizer` implementations to increase the readability of the mapping function. +First, the column names of all fields in the flat file are injected into the tokenizer, +as shown in the following example: + +``` +tokenizer.setNames(new String[] {"ID", "lastName", "firstName", "position", "birthYear", "debutYear"}); +``` + +A `FieldSetMapper` can use this information as follows: + +``` +public class PlayerMapper implements FieldSetMapper { + public Player mapFieldSet(FieldSet fs) { + + if (fs == null) { + return null; + } + + Player player = new Player(); + player.setID(fs.readString("ID")); + player.setLastName(fs.readString("lastName")); + player.setFirstName(fs.readString("firstName")); + player.setPosition(fs.readString("position")); + player.setDebutYear(fs.readInt("debutYear")); + player.setBirthYear(fs.readInt("birthYear")); + + return player; + } +} +``` + +##### Automapping FieldSets to Domain Objects + +For many, having to write a specific `FieldSetMapper` is equally as cumbersome as writing +a specific `RowMapper` for a `JdbcTemplate`. Spring Batch makes this easier by providing +a `FieldSetMapper` that automatically maps fields by matching a field name with a setter +on the object using the JavaBean specification. + +Again using the football example, the `BeanWrapperFieldSetMapper` configuration looks like +the following snippet in XML: + +XML Configuration + +``` + + + + + +``` + +Again using the football example, the `BeanWrapperFieldSetMapper` configuration looks like +the following snippet in Java: + +Java Configuration + +``` +@Bean +public FieldSetMapper fieldSetMapper() { + BeanWrapperFieldSetMapper fieldSetMapper = new BeanWrapperFieldSetMapper(); + + fieldSetMapper.setPrototypeBeanName("player"); + + return fieldSetMapper; +} + +@Bean +@Scope("prototype") +public Player player() { + return new Player(); +} +``` + +For each entry in the `FieldSet`, the mapper looks for a corresponding setter on a new +instance of the `Player` object (for this reason, prototype scope is required) in the +same way the Spring container looks for setters matching a property name. Each available +field in the `FieldSet` is mapped, and the resultant `Player` object is returned, with no +code required. + +##### Fixed Length File Formats + +So far, only delimited files have been discussed in much detail. However, they represent +only half of the file reading picture. Many organizations that use flat files use fixed +length formats. An example fixed length file follows: + +``` +UK21341EAH4121131.11customer1 +UK21341EAH4221232.11customer2 +UK21341EAH4321333.11customer3 +UK21341EAH4421434.11customer4 +UK21341EAH4521535.11customer5 +``` + +While this looks like one large field, it actually represent 4 distinct fields: + +1. ISIN: Unique identifier for the item being ordered - 12 characters long. + +2. Quantity: Number of the item being ordered - 3 characters long. + +3. Price: Price of the item - 5 characters long. + +4. Customer: ID of the customer ordering the item - 9 characters long. + +When configuring the `FixedLengthLineTokenizer`, each of these lengths must be provided +in the form of ranges. + +The following example shows how to define ranges for the `FixedLengthLineTokenizer` in +XML: + +XML Configuration + +``` + + + + +``` + +Because the `FixedLengthLineTokenizer` uses the same `LineTokenizer` interface as +discussed earlier, it returns the same `FieldSet` as if a delimiter had been used. This +allows the same approaches to be used in handling its output, such as using the`BeanWrapperFieldSetMapper`. + +| |Supporting the preceding syntax for ranges requires that a specialized property editor,`RangeArrayPropertyEditor`, be configured in the `ApplicationContext`. However, this bean
is automatically declared in an `ApplicationContext` where the batch namespace is used.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The following example shows how to define ranges for the `FixedLengthLineTokenizer` in +Java: + +Java Configuration + +``` +@Bean +public FixedLengthTokenizer fixedLengthTokenizer() { + FixedLengthTokenizer tokenizer = new FixedLengthTokenizer(); + + tokenizer.setNames("ISIN", "Quantity", "Price", "Customer"); + tokenizer.setColumns(new Range(1, 12), + new Range(13, 15), + new Range(16, 20), + new Range(21, 29)); + + return tokenizer; +} +``` + +Because the `FixedLengthLineTokenizer` uses the same `LineTokenizer` interface as +discussed above, it returns the same `FieldSet` as if a delimiter had been used. This +lets the same approaches be used in handling its output, such as using the`BeanWrapperFieldSetMapper`. + +##### Multiple Record Types within a Single File + +All of the file reading examples up to this point have all made a key assumption for +simplicity’s sake: all of the records in a file have the same format. However, this may +not always be the case. It is very common that a file might have records with different +formats that need to be tokenized differently and mapped to different objects. The +following excerpt from a file illustrates this: + +``` +USER;Smith;Peter;;T;20014539;F +LINEA;1044391041ABC037.49G201XX1383.12H +LINEB;2134776319DEF422.99M005LI +``` + +In this file we have three types of records, "USER", "LINEA", and "LINEB". A "USER" line +corresponds to a `User` object. "LINEA" and "LINEB" both correspond to `Line` objects, +though a "LINEA" has more information than a "LINEB". + +The `ItemReader` reads each line individually, but we must specify different`LineTokenizer` and `FieldSetMapper` objects so that the `ItemWriter` receives the +correct items. The `PatternMatchingCompositeLineMapper` makes this easy by allowing maps +of patterns to `LineTokenizers` and patterns to `FieldSetMappers` to be configured. + +The following example shows how to define ranges for the `FixedLengthLineTokenizer` in +XML: + +XML Configuration + +``` + + + + + + + + + + + + + + + +``` + +Java Configuration + +``` +@Bean +public PatternMatchingCompositeLineMapper orderFileLineMapper() { + PatternMatchingCompositeLineMapper lineMapper = + new PatternMatchingCompositeLineMapper(); + + Map tokenizers = new HashMap<>(3); + tokenizers.put("USER*", userTokenizer()); + tokenizers.put("LINEA*", lineATokenizer()); + tokenizers.put("LINEB*", lineBTokenizer()); + + lineMapper.setTokenizers(tokenizers); + + Map mappers = new HashMap<>(2); + mappers.put("USER*", userFieldSetMapper()); + mappers.put("LINE*", lineFieldSetMapper()); + + lineMapper.setFieldSetMappers(mappers); + + return lineMapper; +} +``` + +In this example, "LINEA" and "LINEB" have separate `LineTokenizer` instances, but they both use +the same `FieldSetMapper`. + +The `PatternMatchingCompositeLineMapper` uses the `PatternMatcher#match` method +in order to select the correct delegate for each line. The `PatternMatcher` allows for +two wildcard characters with special meaning: the question mark ("?") matches exactly one +character, while the asterisk ("\*") matches zero or more characters. Note that, in the +preceding configuration, all patterns end with an asterisk, making them effectively +prefixes to lines. The `PatternMatcher` always matches the most specific pattern +possible, regardless of the order in the configuration. So if "LINE\*" and "LINEA\*" were +both listed as patterns, "LINEA" would match pattern "LINEA\*", while "LINEB" would match +pattern "LINE\*". Additionally, a single asterisk ("\*") can serve as a default by matching +any line not matched by any other pattern. + +The following example shows how to match a line not matched by any other pattern in XML: + +XML Configuration + +``` + +``` + +The following example shows how to match a line not matched by any other pattern in Java: + +Java Configuration + +``` +... +tokenizers.put("*", defaultLineTokenizer()); +... +``` + +There is also a `PatternMatchingCompositeLineTokenizer` that can be used for tokenization +alone. + +It is also common for a flat file to contain records that each span multiple lines. To +handle this situation, a more complex strategy is required. A demonstration of this +common pattern can be found in the `multiLineRecords` sample. + +##### Exception Handling in Flat Files + +There are many scenarios when tokenizing a line may cause exceptions to be thrown. Many +flat files are imperfect and contain incorrectly formatted records. Many users choose to +skip these erroneous lines while logging the issue, the original line, and the line +number. These logs can later be inspected manually or by another batch job. For this +reason, Spring Batch provides a hierarchy of exceptions for handling parse exceptions:`FlatFileParseException` and `FlatFileFormatException`. `FlatFileParseException` is +thrown by the `FlatFileItemReader` when any errors are encountered while trying to read a +file. `FlatFileFormatException` is thrown by implementations of the `LineTokenizer`interface and indicates a more specific error encountered while tokenizing. + +###### `IncorrectTokenCountException` + +Both `DelimitedLineTokenizer` and `FixedLengthLineTokenizer` have the ability to specify +column names that can be used for creating a `FieldSet`. However, if the number of column +names does not match the number of columns found while tokenizing a line, the `FieldSet`cannot be created, and an `IncorrectTokenCountException` is thrown, which contains the +number of tokens encountered, and the number expected, as shown in the following example: + +``` +tokenizer.setNames(new String[] {"A", "B", "C", "D"}); + +try { + tokenizer.tokenize("a,b,c"); +} +catch (IncorrectTokenCountException e) { + assertEquals(4, e.getExpectedCount()); + assertEquals(3, e.getActualCount()); +} +``` + +Because the tokenizer was configured with 4 column names but only 3 tokens were found in +the file, an `IncorrectTokenCountException` was thrown. + +###### `IncorrectLineLengthException` + +Files formatted in a fixed-length format have additional requirements when parsing +because, unlike a delimited format, each column must strictly adhere to its predefined +width. If the total line length does not equal the widest value of this column, an +exception is thrown, as shown in the following example: + +``` +tokenizer.setColumns(new Range[] { new Range(1, 5), + new Range(6, 10), + new Range(11, 15) }); +try { + tokenizer.tokenize("12345"); + fail("Expected IncorrectLineLengthException"); +} +catch (IncorrectLineLengthException ex) { + assertEquals(15, ex.getExpectedLength()); + assertEquals(5, ex.getActualLength()); +} +``` + +The configured ranges for the tokenizer above are: 1-5, 6-10, and 11-15. Consequently, +the total length of the line is 15. However, in the preceding example, a line of length 5 +was passed in, causing an `IncorrectLineLengthException` to be thrown. Throwing an +exception here rather than only mapping the first column allows the processing of the +line to fail earlier and with more information than it would contain if it failed while +trying to read in column 2 in a `FieldSetMapper`. However, there are scenarios where the +length of the line is not always constant. For this reason, validation of line length can +be turned off via the 'strict' property, as shown in the following example: + +``` +tokenizer.setColumns(new Range[] { new Range(1, 5), new Range(6, 10) }); +tokenizer.setStrict(false); +FieldSet tokens = tokenizer.tokenize("12345"); +assertEquals("12345", tokens.readString(0)); +assertEquals("", tokens.readString(1)); +``` + +The preceding example is almost identical to the one before it, except that`tokenizer.setStrict(false)` was called. This setting tells the tokenizer to not enforce +line lengths when tokenizing the line. A `FieldSet` is now correctly created and +returned. However, it contains only empty tokens for the remaining values. + +#### `FlatFileItemWriter` + +Writing out to flat files has the same problems and issues that reading in from a file +must overcome. A step must be able to write either delimited or fixed length formats in a +transactional manner. + +##### `LineAggregator` + +Just as the `LineTokenizer` interface is necessary to take an item and turn it into a`String`, file writing must have a way to aggregate multiple fields into a single string +for writing to a file. In Spring Batch, this is the `LineAggregator`, shown in the +following interface definition: + +``` +public interface LineAggregator { + + public String aggregate(T item); + +} +``` + +The `LineAggregator` is the logical opposite of `LineTokenizer`. `LineTokenizer` takes a`String` and returns a `FieldSet`, whereas `LineAggregator` takes an `item` and returns a`String`. + +###### `PassThroughLineAggregator` + +The most basic implementation of the `LineAggregator` interface is the`PassThroughLineAggregator`, which assumes that the object is already a string or that +its string representation is acceptable for writing, as shown in the following code: + +``` +public class PassThroughLineAggregator implements LineAggregator { + + public String aggregate(T item) { + return item.toString(); + } +} +``` + +The preceding implementation is useful if direct control of creating the string is +required but the advantages of a `FlatFileItemWriter`, such as transaction and restart +support, are necessary. + +##### Simplified File Writing Example + +Now that the `LineAggregator` interface and its most basic implementation,`PassThroughLineAggregator`, have been defined, the basic flow of writing can be +explained: + +1. The object to be written is passed to the `LineAggregator` in order to obtain a`String`. + +2. The returned `String` is written to the configured file. + +The following excerpt from the `FlatFileItemWriter` expresses this in code: + +``` +public void write(T item) throws Exception { + write(lineAggregator.aggregate(item) + LINE_SEPARATOR); +} +``` + +In XML, a simple example of configuration might look like the following: + +XML Configuration + +``` + + + + + + +``` + +In Java, a simple example of configuration might look like the following: + +Java Configuration + +``` +@Bean +public FlatFileItemWriter itemWriter() { + return new FlatFileItemWriterBuilder() + .name("itemWriter") + .resource(new FileSystemResource("target/test-outputs/output.txt")) + .lineAggregator(new PassThroughLineAggregator<>()) + .build(); +} +``` + +##### `FieldExtractor` + +The preceding example may be useful for the most basic uses of a writing to a file. +However, most users of the `FlatFileItemWriter` have a domain object that needs to be +written out and, thus, must be converted into a line. In file reading, the following was +required: + +1. Read one line from the file. + +2. Pass the line into the `LineTokenizer#tokenize()` method, in order to retrieve a`FieldSet`. + +3. Pass the `FieldSet` returned from tokenizing to a `FieldSetMapper`, returning the + result from the `ItemReader#read()` method. + +File writing has similar but inverse steps: + +1. Pass the item to be written to the writer. + +2. Convert the fields on the item into an array. + +3. Aggregate the resulting array into a line. + +Because there is no way for the framework to know which fields from the object need to +be written out, a `FieldExtractor` must be written to accomplish the task of turning the +item into an array, as shown in the following interface definition: + +``` +public interface FieldExtractor { + + Object[] extract(T item); + +} +``` + +Implementations of the `FieldExtractor` interface should create an array from the fields +of the provided object, which can then be written out with a delimiter between the +elements or as part of a fixed-width line. + +###### `PassThroughFieldExtractor` + +There are many cases where a collection, such as an array, `Collection`, or `FieldSet`, +needs to be written out. "Extracting" an array from one of these collection types is very +straightforward. To do so, convert the collection to an array. Therefore, the`PassThroughFieldExtractor` should be used in this scenario. It should be noted that, if +the object passed in is not a type of collection, then the `PassThroughFieldExtractor`returns an array containing solely the item to be extracted. + +###### `BeanWrapperFieldExtractor` + +As with the `BeanWrapperFieldSetMapper` described in the file reading section, it is +often preferable to configure how to convert a domain object to an object array, rather +than writing the conversion yourself. The `BeanWrapperFieldExtractor` provides this +functionality, as shown in the following example: + +``` +BeanWrapperFieldExtractor extractor = new BeanWrapperFieldExtractor<>(); +extractor.setNames(new String[] { "first", "last", "born" }); + +String first = "Alan"; +String last = "Turing"; +int born = 1912; + +Name n = new Name(first, last, born); +Object[] values = extractor.extract(n); + +assertEquals(first, values[0]); +assertEquals(last, values[1]); +assertEquals(born, values[2]); +``` + +This extractor implementation has only one required property: the names of the fields to +map. Just as the `BeanWrapperFieldSetMapper` needs field names to map fields on the`FieldSet` to setters on the provided object, the `BeanWrapperFieldExtractor` needs names +to map to getters for creating an object array. It is worth noting that the order of the +names determines the order of the fields within the array. + +##### Delimited File Writing Example + +The most basic flat file format is one in which all fields are separated by a delimiter. +This can be accomplished using a `DelimitedLineAggregator`. The following example writes +out a simple domain object that represents a credit to a customer account: + +``` +public class CustomerCredit { + + private int id; + private String name; + private BigDecimal credit; + + //getters and setters removed for clarity +} +``` + +Because a domain object is being used, an implementation of the `FieldExtractor`interface must be provided, along with the delimiter to use. + +The following example shows how to use the `FieldExtractor` with a delimiter in XML: + +XML Configuration + +``` + + + + + + + + + + + + + +``` + +The following example shows how to use the `FieldExtractor` with a delimiter in Java: + +Java Configuration + +``` +@Bean +public FlatFileItemWriter itemWriter(Resource outputResource) throws Exception { + BeanWrapperFieldExtractor fieldExtractor = new BeanWrapperFieldExtractor<>(); + fieldExtractor.setNames(new String[] {"name", "credit"}); + fieldExtractor.afterPropertiesSet(); + + DelimitedLineAggregator lineAggregator = new DelimitedLineAggregator<>(); + lineAggregator.setDelimiter(","); + lineAggregator.setFieldExtractor(fieldExtractor); + + return new FlatFileItemWriterBuilder() + .name("customerCreditWriter") + .resource(outputResource) + .lineAggregator(lineAggregator) + .build(); +} +``` + +In the previous example, the `BeanWrapperFieldExtractor` described earlier in this +chapter is used to turn the name and credit fields within `CustomerCredit` into an object +array, which is then written out with commas between each field. + +It is also possible to use the `FlatFileItemWriterBuilder.DelimitedBuilder` to +automatically create the `BeanWrapperFieldExtractor` and `DelimitedLineAggregator`as shown in the following example: + +Java Configuration + +``` +@Bean +public FlatFileItemWriter itemWriter(Resource outputResource) throws Exception { + return new FlatFileItemWriterBuilder() + .name("customerCreditWriter") + .resource(outputResource) + .delimited() + .delimiter("|") + .names(new String[] {"name", "credit"}) + .build(); +} +``` + +##### Fixed Width File Writing Example + +Delimited is not the only type of flat file format. Many prefer to use a set width for +each column to delineate between fields, which is usually referred to as 'fixed width'. +Spring Batch supports this in file writing with the `FormatterLineAggregator`. + +Using the same `CustomerCredit` domain object described above, it can be configured as +follows in XML: + +XML Configuration + +``` + + + + + + + + + + + + + +``` + +Using the same `CustomerCredit` domain object described above, it can be configured as +follows in Java: + +Java Configuration + +``` +@Bean +public FlatFileItemWriter itemWriter(Resource outputResource) throws Exception { + BeanWrapperFieldExtractor fieldExtractor = new BeanWrapperFieldExtractor<>(); + fieldExtractor.setNames(new String[] {"name", "credit"}); + fieldExtractor.afterPropertiesSet(); + + FormatterLineAggregator lineAggregator = new FormatterLineAggregator<>(); + lineAggregator.setFormat("%-9s%-2.0f"); + lineAggregator.setFieldExtractor(fieldExtractor); + + return new FlatFileItemWriterBuilder() + .name("customerCreditWriter") + .resource(outputResource) + .lineAggregator(lineAggregator) + .build(); +} +``` + +Most of the preceding example should look familiar. However, the value of the format +property is new. + +The following example shows the format property in XML: + +``` + +``` + +The following example shows the format property in Java: + +``` +... +FormatterLineAggregator lineAggregator = new FormatterLineAggregator<>(); +lineAggregator.setFormat("%-9s%-2.0f"); +... +``` + +The underlying implementation is built using the same`Formatter` added as part of Java 5. The Java`Formatter` is based on the`printf` functionality of the C programming +language. Most details on how to configure a formatter can be found in +the Javadoc of [Formatter](https://docs.oracle.com/javase/8/docs/api/java/util/Formatter.html). + +It is also possible to use the `FlatFileItemWriterBuilder.FormattedBuilder` to +automatically create the `BeanWrapperFieldExtractor` and `FormatterLineAggregator`as shown in following example: + +Java Configuration + +``` +@Bean +public FlatFileItemWriter itemWriter(Resource outputResource) throws Exception { + return new FlatFileItemWriterBuilder() + .name("customerCreditWriter") + .resource(outputResource) + .formatted() + .format("%-9s%-2.0f") + .names(new String[] {"name", "credit"}) + .build(); +} +``` + +##### Handling File Creation + +`FlatFileItemReader` has a very simple relationship with file resources. When the reader +is initialized, it opens the file (if it exists), and throws an exception if it does not. +File writing isn’t quite so simple. At first glance, it seems like a similar +straightforward contract should exist for `FlatFileItemWriter`: If the file already +exists, throw an exception, and, if it does not, create it and start writing. However, +potentially restarting a `Job` can cause issues. In normal restart scenarios, the +contract is reversed: If the file exists, start writing to it from the last known good +position, and, if it does not, throw an exception. However, what happens if the file name +for this job is always the same? In this case, you would want to delete the file if it +exists, unless it’s a restart. Because of this possibility, the `FlatFileItemWriter`contains the property, `shouldDeleteIfExists`. Setting this property to true causes an +existing file with the same name to be deleted when the writer is opened. + +### XML Item Readers and Writers + +Spring Batch provides transactional infrastructure for both reading XML records and +mapping them to Java objects as well as writing Java objects as XML records. + +| |Constraints on streaming XML

The StAX API is used for I/O, as other standard XML parsing APIs do not fit batch
processing requirements (DOM loads the whole input into memory at once and SAX controls
the parsing process by allowing the user to provide only callbacks).| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +We need to consider how XML input and output works in Spring Batch. First, there are a +few concepts that vary from file reading and writing but are common across Spring Batch +XML processing. With XML processing, instead of lines of records (`FieldSet` instances) that need +to be tokenized, it is assumed an XML resource is a collection of 'fragments' +corresponding to individual records, as shown in the following image: + +![XML Input](https://docs.spring.io/spring-batch/docs/current/reference/html/images/xmlinput.png) + +Figure 1. XML Input + +The 'trade' tag is defined as the 'root element' in the scenario above. Everything +between '\' and '\' is considered one 'fragment'. Spring Batch +uses Object/XML Mapping (OXM) to bind fragments to objects. However, Spring Batch is not +tied to any particular XML binding technology. Typical use is to delegate to[Spring OXM](https://docs.spring.io/spring/docs/current/spring-framework-reference/data-access.html#oxm), which +provides uniform abstraction for the most popular OXM technologies. The dependency on +Spring OXM is optional and you can choose to implement Spring Batch specific interfaces +if desired. The relationship to the technologies that OXM supports is shown in the +following image: + +![OXM Binding](https://docs.spring.io/spring-batch/docs/current/reference/html/images/oxm-fragments.png) + +Figure 2. OXM Binding + +With an introduction to OXM and how one can use XML fragments to represent records, we +can now more closely examine readers and writers. + +#### `StaxEventItemReader` + +The `StaxEventItemReader` configuration provides a typical setup for the processing of +records from an XML input stream. First, consider the following set of XML records that +the `StaxEventItemReader` can process: + +``` + + + + XYZ0001 + 5 + 11.39 + Customer1 + + + XYZ0002 + 2 + 72.99 + Customer2c + + + XYZ0003 + 9 + 99.99 + Customer3 + + +``` + +To be able to process the XML records, the following is needed: + +* Root Element Name: The name of the root element of the fragment that constitutes the + object to be mapped. The example configuration demonstrates this with the value of trade. + +* Resource: A Spring Resource that represents the file to read. + +* `Unmarshaller`: An unmarshalling facility provided by Spring OXM for mapping the XML + fragment to an object. + +The following example shows how to define a `StaxEventItemReader` that works with a root +element named `trade`, a resource of `data/iosample/input/input.xml`, and an unmarshaller +called `tradeMarshaller` in XML: + +XML Configuration + +``` + + + + + +``` + +The following example shows how to define a `StaxEventItemReader` that works with a root +element named `trade`, a resource of `data/iosample/input/input.xml`, and an unmarshaller +called `tradeMarshaller` in Java: + +Java Configuration + +``` +@Bean +public StaxEventItemReader itemReader() { + return new StaxEventItemReaderBuilder() + .name("itemReader") + .resource(new FileSystemResource("org/springframework/batch/item/xml/domain/trades.xml")) + .addFragmentRootElements("trade") + .unmarshaller(tradeMarshaller()) + .build(); + +} +``` + +Note that, in this example, we have chosen to use an `XStreamMarshaller`, which accepts +an alias passed in as a map with the first key and value being the name of the fragment +(that is, a root element) and the object type to bind. Then, similar to a `FieldSet`, the +names of the other elements that map to fields within the object type are described as +key/value pairs in the map. In the configuration file, we can use a Spring configuration +utility to describe the required alias. + +The following example shows how to describe the alias in XML: + +XML Configuration + +``` + + + + + + + + + + + +``` + +The following example shows how to describe the alias in Java: + +Java Configuration + +``` +@Bean +public XStreamMarshaller tradeMarshaller() { + Map aliases = new HashMap<>(); + aliases.put("trade", Trade.class); + aliases.put("price", BigDecimal.class); + aliases.put("isin", String.class); + aliases.put("customer", String.class); + aliases.put("quantity", Long.class); + + XStreamMarshaller marshaller = new XStreamMarshaller(); + + marshaller.setAliases(aliases); + + return marshaller; +} +``` + +On input, the reader reads the XML resource until it recognizes that a new fragment is +about to start. By default, the reader matches the element name to recognize that a new +fragment is about to start. The reader creates a standalone XML document from the +fragment and passes the document to a deserializer (typically a wrapper around a Spring +OXM `Unmarshaller`) to map the XML to a Java object. + +In summary, this procedure is analogous to the following Java code, which uses the +injection provided by the Spring configuration: + +``` +StaxEventItemReader xmlStaxEventItemReader = new StaxEventItemReader<>(); +Resource resource = new ByteArrayResource(xmlResource.getBytes()); + +Map aliases = new HashMap(); +aliases.put("trade","org.springframework.batch.sample.domain.trade.Trade"); +aliases.put("price","java.math.BigDecimal"); +aliases.put("customer","java.lang.String"); +aliases.put("isin","java.lang.String"); +aliases.put("quantity","java.lang.Long"); +XStreamMarshaller unmarshaller = new XStreamMarshaller(); +unmarshaller.setAliases(aliases); +xmlStaxEventItemReader.setUnmarshaller(unmarshaller); +xmlStaxEventItemReader.setResource(resource); +xmlStaxEventItemReader.setFragmentRootElementName("trade"); +xmlStaxEventItemReader.open(new ExecutionContext()); + +boolean hasNext = true; + +Trade trade = null; + +while (hasNext) { + trade = xmlStaxEventItemReader.read(); + if (trade == null) { + hasNext = false; + } + else { + System.out.println(trade); + } +} +``` + +#### `StaxEventItemWriter` + +Output works symmetrically to input. The `StaxEventItemWriter` needs a `Resource`, a +marshaller, and a `rootTagName`. A Java object is passed to a marshaller (typically a +standard Spring OXM Marshaller) which writes to a `Resource` by using a custom event +writer that filters the `StartDocument` and `EndDocument` events produced for each +fragment by the OXM tools. + +The following XML example uses the `MarshallingEventWriterSerializer`: + +XML Configuration + +``` + + + + + + +``` + +The following Java example uses the `MarshallingEventWriterSerializer`: + +Java Configuration + +``` +@Bean +public StaxEventItemWriter itemWriter(Resource outputResource) { + return new StaxEventItemWriterBuilder() + .name("tradesWriter") + .marshaller(tradeMarshaller()) + .resource(outputResource) + .rootTagName("trade") + .overwriteOutput(true) + .build(); + +} +``` + +The preceding configuration sets up the three required properties and sets the optional`overwriteOutput=true` attrbute, mentioned earlier in this chapter for specifying whether +an existing file can be overwritten. + +The following XML example uses the same marshaller as the one used in the reading example +shown earlier in the chapter: + +XML Configuration + +``` + + + + + + + + + + + +``` + +The following Java example uses the same marshaller as the one used in the reading example +shown earlier in the chapter: + +Java Configuration + +``` +@Bean +public XStreamMarshaller customerCreditMarshaller() { + XStreamMarshaller marshaller = new XStreamMarshaller(); + + Map aliases = new HashMap<>(); + aliases.put("trade", Trade.class); + aliases.put("price", BigDecimal.class); + aliases.put("isin", String.class); + aliases.put("customer", String.class); + aliases.put("quantity", Long.class); + + marshaller.setAliases(aliases); + + return marshaller; +} +``` + +To summarize with a Java example, the following code illustrates all of the points +discussed, demonstrating the programmatic setup of the required properties: + +``` +FileSystemResource resource = new FileSystemResource("data/outputFile.xml") + +Map aliases = new HashMap(); +aliases.put("trade","org.springframework.batch.sample.domain.trade.Trade"); +aliases.put("price","java.math.BigDecimal"); +aliases.put("customer","java.lang.String"); +aliases.put("isin","java.lang.String"); +aliases.put("quantity","java.lang.Long"); +Marshaller marshaller = new XStreamMarshaller(); +marshaller.setAliases(aliases); + +StaxEventItemWriter staxItemWriter = + new StaxEventItemWriterBuilder() + .name("tradesWriter") + .marshaller(marshaller) + .resource(resource) + .rootTagName("trade") + .overwriteOutput(true) + .build(); + +staxItemWriter.afterPropertiesSet(); + +ExecutionContext executionContext = new ExecutionContext(); +staxItemWriter.open(executionContext); +Trade trade = new Trade(); +trade.setPrice(11.39); +trade.setIsin("XYZ0001"); +trade.setQuantity(5L); +trade.setCustomer("Customer1"); +staxItemWriter.write(trade); +``` + +### JSON Item Readers And Writers + +Spring Batch provides support for reading and Writing JSON resources in the following format: + +``` +[ + { + "isin": "123", + "quantity": 1, + "price": 1.2, + "customer": "foo" + }, + { + "isin": "456", + "quantity": 2, + "price": 1.4, + "customer": "bar" + } +] +``` + +It is assumed that the JSON resource is an array of JSON objects corresponding to +individual items. Spring Batch is not tied to any particular JSON library. + +#### `JsonItemReader` + +The `JsonItemReader` delegates JSON parsing and binding to implementations of the`org.springframework.batch.item.json.JsonObjectReader` interface. This interface +is intended to be implemented by using a streaming API to read JSON objects +in chunks. Two implementations are currently provided: + +* [Jackson](https://github.com/FasterXML/jackson) through the `org.springframework.batch.item.json.JacksonJsonObjectReader` + +* [Gson](https://github.com/google/gson) through the `org.springframework.batch.item.json.GsonJsonObjectReader` + +To be able to process JSON records, the following is needed: + +* `Resource`: A Spring Resource that represents the JSON file to read. + +* `JsonObjectReader`: A JSON object reader to parse and bind JSON objects to items + +The following example shows how to define a `JsonItemReader` that works with the +previous JSON resource `org/springframework/batch/item/json/trades.json` and a`JsonObjectReader` based on Jackson: + +``` +@Bean +public JsonItemReader jsonItemReader() { + return new JsonItemReaderBuilder() + .jsonObjectReader(new JacksonJsonObjectReader<>(Trade.class)) + .resource(new ClassPathResource("trades.json")) + .name("tradeJsonItemReader") + .build(); +} +``` + +#### `JsonFileItemWriter` + +The `JsonFileItemWriter` delegates the marshalling of items to the`org.springframework.batch.item.json.JsonObjectMarshaller` interface. The contract +of this interface is to take an object and marshall it to a JSON `String`. +Two implementations are currently provided: + +* [Jackson](https://github.com/FasterXML/jackson) through the `org.springframework.batch.item.json.JacksonJsonObjectMarshaller` + +* [Gson](https://github.com/google/gson) through the `org.springframework.batch.item.json.GsonJsonObjectMarshaller` + +To be able to write JSON records, the following is needed: + +* `Resource`: A Spring `Resource` that represents the JSON file to write + +* `JsonObjectMarshaller`: A JSON object marshaller to marshall objects to JSON format + +The following example shows how to define a `JsonFileItemWriter`: + +``` +@Bean +public JsonFileItemWriter jsonFileItemWriter() { + return new JsonFileItemWriterBuilder() + .jsonObjectMarshaller(new JacksonJsonObjectMarshaller<>()) + .resource(new ClassPathResource("trades.json")) + .name("tradeJsonFileItemWriter") + .build(); +} +``` + +### Multi-File Input + +It is a common requirement to process multiple files within a single `Step`. Assuming the +files all have the same formatting, the `MultiResourceItemReader` supports this type of +input for both XML and flat file processing. Consider the following files in a directory: + +``` +file-1.txt file-2.txt ignored.txt +``` + +file-1.txt and file-2.txt are formatted the same and, for business reasons, should be +processed together. The `MultiResourceItemReader` can be used to read in both files by +using wildcards. + +The following example shows how to read files with wildcards in XML: + +XML Configuration + +``` + + + + +``` + +The following example shows how to read files with wildcards in Java: + +Java Configuration + +``` +@Bean +public MultiResourceItemReader multiResourceReader() { + return new MultiResourceItemReaderBuilder() + .delegate(flatFileItemReader()) + .resources(resources()) + .build(); +} +``` + +The referenced delegate is a simple `FlatFileItemReader`. The above configuration reads +input from both files, handling rollback and restart scenarios. It should be noted that, +as with any `ItemReader`, adding extra input (in this case a file) could cause potential +issues when restarting. It is recommended that batch jobs work with their own individual +directories until completed successfully. + +| |Input resources are ordered by using `MultiResourceItemReader#setComparator(Comparator)`to make sure resource ordering is preserved between job runs in restart scenario.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### Database + +Like most enterprise application styles, a database is the central storage mechanism for +batch. However, batch differs from other application styles due to the sheer size of the +datasets with which the system must work. If a SQL statement returns 1 million rows, the +result set probably holds all returned results in memory until all rows have been read. +Spring Batch provides two types of solutions for this problem: + +* [Cursor-based `ItemReader` Implementations](#cursorBasedItemReaders) + +* [Paging `ItemReader` Implementations](#pagingItemReaders) + +#### Cursor-based `ItemReader` Implementations + +Using a database cursor is generally the default approach of most batch developers, +because it is the database’s solution to the problem of 'streaming' relational data. The +Java `ResultSet` class is essentially an object oriented mechanism for manipulating a +cursor. A `ResultSet` maintains a cursor to the current row of data. Calling `next` on a`ResultSet` moves this cursor to the next row. The Spring Batch cursor-based `ItemReader`implementation opens a cursor on initialization and moves the cursor forward one row for +every call to `read`, returning a mapped object that can be used for processing. The`close` method is then called to ensure all resources are freed up. The Spring core`JdbcTemplate` gets around this problem by using the callback pattern to completely map +all rows in a `ResultSet` and close before returning control back to the method caller. +However, in batch, this must wait until the step is complete. The following image shows a +generic diagram of how a cursor-based `ItemReader` works. Note that, while the example +uses SQL (because SQL is so widely known), any technology could implement the basic +approach. + +![Cursor Example](https://docs.spring.io/spring-batch/docs/current/reference/html/images/cursorExample.png) + +Figure 3. Cursor Example + +This example illustrates the basic pattern. Given a 'FOO' table, which has three columns:`ID`, `NAME`, and `BAR`, select all rows with an ID greater than 1 but less than 7. This +puts the beginning of the cursor (row 1) on ID 2. The result of this row should be a +completely mapped `Foo` object. Calling `read()` again moves the cursor to the next row, +which is the `Foo` with an ID of 3. The results of these reads are written out after each`read`, allowing the objects to be garbage collected (assuming no instance variables are +maintaining references to them). + +##### `JdbcCursorItemReader` + +`JdbcCursorItemReader` is the JDBC implementation of the cursor-based technique. It works +directly with a `ResultSet` and requires an SQL statement to run against a connection +obtained from a `DataSource`. The following database schema is used as an example: + +``` +CREATE TABLE CUSTOMER ( + ID BIGINT IDENTITY PRIMARY KEY, + NAME VARCHAR(45), + CREDIT FLOAT +); +``` + +Many people prefer to use a domain object for each row, so the following example uses an +implementation of the `RowMapper` interface to map a `CustomerCredit` object: + +``` +public class CustomerCreditRowMapper implements RowMapper { + + public static final String ID_COLUMN = "id"; + public static final String NAME_COLUMN = "name"; + public static final String CREDIT_COLUMN = "credit"; + + public CustomerCredit mapRow(ResultSet rs, int rowNum) throws SQLException { + CustomerCredit customerCredit = new CustomerCredit(); + + customerCredit.setId(rs.getInt(ID_COLUMN)); + customerCredit.setName(rs.getString(NAME_COLUMN)); + customerCredit.setCredit(rs.getBigDecimal(CREDIT_COLUMN)); + + return customerCredit; + } +} +``` + +Because `JdbcCursorItemReader` shares key interfaces with `JdbcTemplate`, it is useful to +see an example of how to read in this data with `JdbcTemplate`, in order to contrast it +with the `ItemReader`. For the purposes of this example, assume there are 1,000 rows in +the `CUSTOMER` database. The first example uses `JdbcTemplate`: + +``` +//For simplicity sake, assume a dataSource has already been obtained +JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource); +List customerCredits = jdbcTemplate.query("SELECT ID, NAME, CREDIT from CUSTOMER", + new CustomerCreditRowMapper()); +``` + +After running the preceding code snippet, the `customerCredits` list contains 1,000`CustomerCredit` objects. In the query method, a connection is obtained from the`DataSource`, the provided SQL is run against it, and the `mapRow` method is called for +each row in the `ResultSet`. Contrast this with the approach of the`JdbcCursorItemReader`, shown in the following example: + +``` +JdbcCursorItemReader itemReader = new JdbcCursorItemReader(); +itemReader.setDataSource(dataSource); +itemReader.setSql("SELECT ID, NAME, CREDIT from CUSTOMER"); +itemReader.setRowMapper(new CustomerCreditRowMapper()); +int counter = 0; +ExecutionContext executionContext = new ExecutionContext(); +itemReader.open(executionContext); +Object customerCredit = new Object(); +while(customerCredit != null){ + customerCredit = itemReader.read(); + counter++; +} +itemReader.close(); +``` + +After running the preceding code snippet, the counter equals 1,000. If the code above had +put the returned `customerCredit` into a list, the result would have been exactly the +same as with the `JdbcTemplate` example. However, the big advantage of the `ItemReader`is that it allows items to be 'streamed'. The `read` method can be called once, the item +can be written out by an `ItemWriter`, and then the next item can be obtained with`read`. This allows item reading and writing to be done in 'chunks' and committed +periodically, which is the essence of high performance batch processing. Furthermore, it +is easily configured for injection into a Spring Batch `Step`. + +The following example shows how to inject an `ItemReader` into a `Step` in XML: + +XML Configuration + +``` + + + + + + + +``` + +The following example shows how to inject an `ItemReader` into a `Step` in Java: + +Java Configuration + +``` +@Bean +public JdbcCursorItemReader itemReader() { + return new JdbcCursorItemReaderBuilder() + .dataSource(this.dataSource) + .name("creditReader") + .sql("select ID, NAME, CREDIT from CUSTOMER") + .rowMapper(new CustomerCreditRowMapper()) + .build(); + +} +``` + +###### Additional Properties + +Because there are so many varying options for opening a cursor in Java, there are many +properties on the `JdbcCursorItemReader` that can be set, as described in the following +table: + +| ignoreWarnings | Determines whether or not SQLWarnings are logged or cause an exception.
The default is `true` (meaning that warnings are logged). | +|------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| fetchSize | Gives the JDBC driver a hint as to the number of rows that should be fetched
from the database when more rows are needed by the `ResultSet` object used by the`ItemReader`. By default, no hint is given. | +| maxRows | Sets the limit for the maximum number of rows the underlying `ResultSet` can
hold at any one time. | +| queryTimeout | Sets the number of seconds the driver waits for a `Statement` object to
run. If the limit is exceeded, a `DataAccessException` is thrown. (Consult your driver
vendor documentation for details). | +| verifyCursorPosition | Because the same `ResultSet` held by the `ItemReader` is passed to
the `RowMapper`, it is possible for users to call `ResultSet.next()` themselves, which
could cause issues with the reader’s internal count. Setting this value to `true` causes
an exception to be thrown if the cursor position is not the same after the `RowMapper`call as it was before. | +| saveState | Indicates whether or not the reader’s state should be saved in the`ExecutionContext` provided by `ItemStream#update(ExecutionContext)`. The default is`true`. | +| driverSupportsAbsolute | Indicates whether the JDBC driver supports
setting the absolute row on a `ResultSet`. It is recommended that this is set to `true`for JDBC drivers that support `ResultSet.absolute()`, as it may improve performance,
especially if a step fails while working with a large data set. Defaults to `false`. | +|setUseSharedExtendedConnection|Indicates whether the connection
used for the cursor should be used by all other processing, thus sharing the same
transaction. If this is set to `false`, then the cursor is opened with its own connection
and does not participate in any transactions started for the rest of the step processing.
If you set this flag to `true` then you must wrap the DataSource in an`ExtendedConnectionDataSourceProxy` to prevent the connection from being closed and
released after each commit. When you set this option to `true`, the statement used to
open the cursor is created with both 'READ\_ONLY' and 'HOLD\_CURSORS\_OVER\_COMMIT' options.
This allows holding the cursor open over transaction start and commits performed in the
step processing. To use this feature, you need a database that supports this and a JDBC
driver supporting JDBC 3.0 or later. Defaults to `false`.| + +##### `HibernateCursorItemReader` + +Just as normal Spring users make important decisions about whether or not to use ORM +solutions, which affect whether or not they use a `JdbcTemplate` or a`HibernateTemplate`, Spring Batch users have the same options.`HibernateCursorItemReader` is the Hibernate implementation of the cursor technique. +Hibernate’s usage in batch has been fairly controversial. This has largely been because +Hibernate was originally developed to support online application styles. However, that +does not mean it cannot be used for batch processing. The easiest approach for solving +this problem is to use a `StatelessSession` rather than a standard session. This removes +all of the caching and dirty checking Hibernate employs and that can cause issues in a +batch scenario. For more information on the differences between stateless and normal +hibernate sessions, refer to the documentation of your specific hibernate release. The`HibernateCursorItemReader` lets you declare an HQL statement and pass in a`SessionFactory`, which will pass back one item per call to read in the same basic +fashion as the `JdbcCursorItemReader`. The following example configuration uses the same +'customer credit' example as the JDBC reader: + +``` +HibernateCursorItemReader itemReader = new HibernateCursorItemReader(); +itemReader.setQueryString("from CustomerCredit"); +//For simplicity sake, assume sessionFactory already obtained. +itemReader.setSessionFactory(sessionFactory); +itemReader.setUseStatelessSession(true); +int counter = 0; +ExecutionContext executionContext = new ExecutionContext(); +itemReader.open(executionContext); +Object customerCredit = new Object(); +while(customerCredit != null){ + customerCredit = itemReader.read(); + counter++; +} +itemReader.close(); +``` + +This configured `ItemReader` returns `CustomerCredit` objects in the exact same manner +as described by the `JdbcCursorItemReader`, assuming hibernate mapping files have been +created correctly for the `Customer` table. The 'useStatelessSession' property defaults +to true but has been added here to draw attention to the ability to switch it on or off. +It is also worth noting that the fetch size of the underlying cursor can be set with the`setFetchSize` property. As with `JdbcCursorItemReader`, configuration is +straightforward. + +The following example shows how to inject a Hibernate `ItemReader` in XML: + +XML Configuration + +``` + + + + +``` + +The following example shows how to inject a Hibernate `ItemReader` in Java: + +Java Configuration + +``` +@Bean +public HibernateCursorItemReader itemReader(SessionFactory sessionFactory) { + return new HibernateCursorItemReaderBuilder() + .name("creditReader") + .sessionFactory(sessionFactory) + .queryString("from CustomerCredit") + .build(); +} +``` + +##### `StoredProcedureItemReader` + +Sometimes it is necessary to obtain the cursor data by using a stored procedure. The`StoredProcedureItemReader` works like the `JdbcCursorItemReader`, except that, instead +of running a query to obtain a cursor, it runs a stored procedure that returns a cursor. +The stored procedure can return the cursor in three different ways: + +* As a returned `ResultSet` (used by SQL Server, Sybase, DB2, Derby, and MySQL). + +* As a ref-cursor returned as an out parameter (used by Oracle and PostgreSQL). + +* As the return value of a stored function call. + +The following XML example configuration uses the same 'customer credit' example as earlier +examples: + +XML Configuration + +``` + + + + + + + +``` + +The following Java example configuration uses the same 'customer credit' example as +earlier examples: + +Java Configuration + +``` +@Bean +public StoredProcedureItemReader reader(DataSource dataSource) { + StoredProcedureItemReader reader = new StoredProcedureItemReader(); + + reader.setDataSource(dataSource); + reader.setProcedureName("sp_customer_credit"); + reader.setRowMapper(new CustomerCreditRowMapper()); + + return reader; +} +``` + +The preceding example relies on the stored procedure to provide a `ResultSet` as a +returned result (option 1 from earlier). + +If the stored procedure returned a `ref-cursor` (option 2), then we would need to provide +the position of the out parameter that is the returned `ref-cursor`. + +The following example shows how to work with the first parameter being a ref-cursor in +XML: + +XML Configuration + +``` + + + + + + + + +``` + +The following example shows how to work with the first parameter being a ref-cursor in +Java: + +Java Configuration + +``` +@Bean +public StoredProcedureItemReader reader(DataSource dataSource) { + StoredProcedureItemReader reader = new StoredProcedureItemReader(); + + reader.setDataSource(dataSource); + reader.setProcedureName("sp_customer_credit"); + reader.setRowMapper(new CustomerCreditRowMapper()); + reader.setRefCursorPosition(1); + + return reader; +} +``` + +If the cursor was returned from a stored function (option 3), we would need to set the +property "function" to `true`. It defaults to `false`. + +The following example shows property to `true` in XML: + +XML Configuration + +``` + + + + + + + + +``` + +The following example shows property to `true` in Java: + +Java Configuration + +``` +@Bean +public StoredProcedureItemReader reader(DataSource dataSource) { + StoredProcedureItemReader reader = new StoredProcedureItemReader(); + + reader.setDataSource(dataSource); + reader.setProcedureName("sp_customer_credit"); + reader.setRowMapper(new CustomerCreditRowMapper()); + reader.setFunction(true); + + return reader; +} +``` + +In all of these cases, we need to define a `RowMapper` as well as a `DataSource` and the +actual procedure name. + +If the stored procedure or function takes in parameters, then they must be declared and +set by using the `parameters` property. The following example, for Oracle, declares three +parameters. The first one is the `out` parameter that returns the ref-cursor, and the +second and third are in parameters that takes a value of type `INTEGER`. + +The following example shows how to work with parameters in XML: + +XML Configuration + +``` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +``` + +The following example shows how to work with parameters in Java: + +Java Configuration + +``` +@Bean +public StoredProcedureItemReader reader(DataSource dataSource) { + List parameters = new ArrayList<>(); + parameters.add(new SqlOutParameter("newId", OracleTypes.CURSOR)); + parameters.add(new SqlParameter("amount", Types.INTEGER); + parameters.add(new SqlParameter("custId", Types.INTEGER); + + StoredProcedureItemReader reader = new StoredProcedureItemReader(); + + reader.setDataSource(dataSource); + reader.setProcedureName("spring.cursor_func"); + reader.setParameters(parameters); + reader.setRefCursorPosition(1); + reader.setRowMapper(rowMapper()); + reader.setPreparedStatementSetter(parameterSetter()); + + return reader; +} +``` + +In addition to the parameter declarations, we need to specify a `PreparedStatementSetter`implementation that sets the parameter values for the call. This works the same as for +the `JdbcCursorItemReader` above. All the additional properties listed in[Additional Properties](#JdbcCursorItemReaderProperties) apply to the `StoredProcedureItemReader` as well. + +#### Paging `ItemReader` Implementations + +An alternative to using a database cursor is running multiple queries where each query +fetches a portion of the results. We refer to this portion as a page. Each query must +specify the starting row number and the number of rows that we want returned in the page. + +##### `JdbcPagingItemReader` + +One implementation of a paging `ItemReader` is the `JdbcPagingItemReader`. The`JdbcPagingItemReader` needs a `PagingQueryProvider` responsible for providing the SQL +queries used to retrieve the rows making up a page. Since each database has its own +strategy for providing paging support, we need to use a different `PagingQueryProvider`for each supported database type. There is also the `SqlPagingQueryProviderFactoryBean`that auto-detects the database that is being used and determine the appropriate`PagingQueryProvider` implementation. This simplifies the configuration and is the +recommended best practice. + +The `SqlPagingQueryProviderFactoryBean` requires that you specify a `select` clause and a`from` clause. You can also provide an optional `where` clause. These clauses and the +required `sortKey` are used to build an SQL statement. + +| |It is important to have a unique key constraint on the `sortKey` to guarantee that
no data is lost between executions.| +|---|--------------------------------------------------------------------------------------------------------------------------| + +After the reader has been opened, it passes back one item per call to `read` in the same +basic fashion as any other `ItemReader`. The paging happens behind the scenes when +additional rows are needed. + +The following XML example configuration uses a similar 'customer credit' example as the +cursor-based `ItemReaders` shown previously: + +XML Configuration + +``` + + + + + + + + + + + + + + + + + + +``` + +The following Java example configuration uses a similar 'customer credit' example as the +cursor-based `ItemReaders` shown previously: + +Java Configuration + +``` +@Bean +public JdbcPagingItemReader itemReader(DataSource dataSource, PagingQueryProvider queryProvider) { + Map parameterValues = new HashMap<>(); + parameterValues.put("status", "NEW"); + + return new JdbcPagingItemReaderBuilder() + .name("creditReader") + .dataSource(dataSource) + .queryProvider(queryProvider) + .parameterValues(parameterValues) + .rowMapper(customerCreditMapper()) + .pageSize(1000) + .build(); +} + +@Bean +public SqlPagingQueryProviderFactoryBean queryProvider() { + SqlPagingQueryProviderFactoryBean provider = new SqlPagingQueryProviderFactoryBean(); + + provider.setSelectClause("select id, name, credit"); + provider.setFromClause("from customer"); + provider.setWhereClause("where status=:status"); + provider.setSortKey("id"); + + return provider; +} +``` + +This configured `ItemReader` returns `CustomerCredit` objects using the `RowMapper`, +which must be specified. The 'pageSize' property determines the number of entities read +from the database for each query run. + +The 'parameterValues' property can be used to specify a `Map` of parameter values for the +query. If you use named parameters in the `where` clause, the key for each entry should +match the name of the named parameter. If you use a traditional '?' placeholder, then the +key for each entry should be the number of the placeholder, starting with 1. + +##### `JpaPagingItemReader` + +Another implementation of a paging `ItemReader` is the `JpaPagingItemReader`. JPA does +not have a concept similar to the Hibernate `StatelessSession`, so we have to use other +features provided by the JPA specification. Since JPA supports paging, this is a natural +choice when it comes to using JPA for batch processing. After each page is read, the +entities become detached and the persistence context is cleared, to allow the entities to +be garbage collected once the page is processed. + +The `JpaPagingItemReader` lets you declare a JPQL statement and pass in a`EntityManagerFactory`. It then passes back one item per call to read in the same basic +fashion as any other `ItemReader`. The paging happens behind the scenes when additional +entities are needed. + +The following XML example configuration uses the same 'customer credit' example as the +JDBC reader shown previously: + +XML Configuration + +``` + + + + + +``` + +The following Java example configuration uses the same 'customer credit' example as the +JDBC reader shown previously: + +Java Configuration + +``` +@Bean +public JpaPagingItemReader itemReader() { + return new JpaPagingItemReaderBuilder() + .name("creditReader") + .entityManagerFactory(entityManagerFactory()) + .queryString("select c from CustomerCredit c") + .pageSize(1000) + .build(); +} +``` + +This configured `ItemReader` returns `CustomerCredit` objects in the exact same manner as +described for the `JdbcPagingItemReader` above, assuming the `CustomerCredit` object has the +correct JPA annotations or ORM mapping file. The 'pageSize' property determines the +number of entities read from the database for each query execution. + +#### Database ItemWriters + +While both flat files and XML files have a specific `ItemWriter` instance, there is no exact equivalent +in the database world. This is because transactions provide all the needed functionality.`ItemWriter` implementations are necessary for files because they must act as if they’re transactional, +keeping track of written items and flushing or clearing at the appropriate times. +Databases have no need for this functionality, since the write is already contained in a +transaction. Users can create their own DAOs that implement the `ItemWriter` interface or +use one from a custom `ItemWriter` that’s written for generic processing concerns. Either +way, they should work without any issues. One thing to look out for is the performance +and error handling capabilities that are provided by batching the outputs. This is most +common when using hibernate as an `ItemWriter` but could have the same issues when using +JDBC batch mode. Batching database output does not have any inherent flaws, assuming we +are careful to flush and there are no errors in the data. However, any errors while +writing can cause confusion, because there is no way to know which individual item caused +an exception or even if any individual item was responsible, as illustrated in the +following image: + +![Error On Flush](https://docs.spring.io/spring-batch/docs/current/reference/html/images/errorOnFlush.png) + +Figure 4. Error On Flush + +If items are buffered before being written, any errors are not thrown until the buffer is +flushed just before a commit. For example, assume that 20 items are written per chunk, +and the 15th item throws a `DataIntegrityViolationException`. As far as the `Step`is concerned, all 20 item are written successfully, since there is no way to know that an +error occurs until they are actually written. Once `Session#flush()` is called, the +buffer is emptied and the exception is hit. At this point, there is nothing the `Step`can do. The transaction must be rolled back. Normally, this exception might cause the +item to be skipped (depending upon the skip/retry policies), and then it is not written +again. However, in the batched scenario, there is no way to know which item caused the +issue. The whole buffer was being written when the failure happened. The only way to +solve this issue is to flush after each item, as shown in the following image: + +![Error On Write](https://docs.spring.io/spring-batch/docs/current/reference/html/images/errorOnWrite.png) + +Figure 5. Error On Write + +This is a common use case, especially when using Hibernate, and the simple guideline for +implementations of `ItemWriter` is to flush on each call to `write()`. Doing so allows +for items to be skipped reliably, with Spring Batch internally taking care of the +granularity of the calls to `ItemWriter` after an error. + +### Reusing Existing Services + +Batch systems are often used in conjunction with other application styles. The most +common is an online system, but it may also support integration or even a thick client +application by moving necessary bulk data that each application style uses. For this +reason, it is common that many users want to reuse existing DAOs or other services within +their batch jobs. The Spring container itself makes this fairly easy by allowing any +necessary class to be injected. However, there may be cases where the existing service +needs to act as an `ItemReader` or `ItemWriter`, either to satisfy the dependency of +another Spring Batch class or because it truly is the main `ItemReader` for a step. It is +fairly trivial to write an adapter class for each service that needs wrapping, but +because it is such a common concern, Spring Batch provides implementations:`ItemReaderAdapter` and `ItemWriterAdapter`. Both classes implement the standard Spring +method by invoking the delegate pattern and are fairly simple to set up. + +The following XML example uses the `ItemReaderAdapter`: + +XML Configuration + +``` + + + + + + +``` + +The following Java example uses the `ItemReaderAdapter`: + +Java Configuration + +``` +@Bean +public ItemReaderAdapter itemReader() { + ItemReaderAdapter reader = new ItemReaderAdapter(); + + reader.setTargetObject(fooService()); + reader.setTargetMethod("generateFoo"); + + return reader; +} + +@Bean +public FooService fooService() { + return new FooService(); +} +``` + +One important point to note is that the contract of the `targetMethod` must be the same +as the contract for `read`: When exhausted, it returns `null`. Otherwise, it returns an`Object`. Anything else prevents the framework from knowing when processing should end, +either causing an infinite loop or incorrect failure, depending upon the implementation +of the `ItemWriter`. + +The following XML example uses the `ItemWriterAdapter`: + +XML Configuration + +``` + + + + + + +``` + +The following Java example uses the `ItemWriterAdapter`: + +Java Configuration + +``` +@Bean +public ItemWriterAdapter itemWriter() { + ItemWriterAdapter writer = new ItemWriterAdapter(); + + writer.setTargetObject(fooService()); + writer.setTargetMethod("processFoo"); + + return writer; +} + +@Bean +public FooService fooService() { + return new FooService(); +} +``` + +### Preventing State Persistence + +By default, all of the `ItemReader` and `ItemWriter` implementations store their current +state in the `ExecutionContext` before it is committed. However, this may not always be +the desired behavior. For example, many developers choose to make their database readers +'rerunnable' by using a process indicator. An extra column is added to the input data to +indicate whether or not it has been processed. When a particular record is being read (or +written) the processed flag is flipped from `false` to `true`. The SQL statement can then +contain an extra statement in the `where` clause, such as `where PROCESSED_IND = false`, +thereby ensuring that only unprocessed records are returned in the case of a restart. In +this scenario, it is preferable to not store any state, such as the current row number, +since it is irrelevant upon restart. For this reason, all readers and writers include the +'saveState' property. + +The following bean definition shows how to prevent state persistence in XML: + +XML Configuration + +``` + + + + + + + + + SELECT games.player_id, games.year_no, SUM(COMPLETES), + SUM(ATTEMPTS), SUM(PASSING_YARDS), SUM(PASSING_TD), + SUM(INTERCEPTIONS), SUM(RUSHES), SUM(RUSH_YARDS), + SUM(RECEPTIONS), SUM(RECEPTIONS_YARDS), SUM(TOTAL_TD) + from games, players where players.player_id = + games.player_id group by games.player_id, games.year_no + + + +``` + +The following bean definition shows how to prevent state persistence in Java: + +Java Configuration + +``` +@Bean +public JdbcCursorItemReader playerSummarizationSource(DataSource dataSource) { + return new JdbcCursorItemReaderBuilder() + .dataSource(dataSource) + .rowMapper(new PlayerSummaryMapper()) + .saveState(false) + .sql("SELECT games.player_id, games.year_no, SUM(COMPLETES)," + + "SUM(ATTEMPTS), SUM(PASSING_YARDS), SUM(PASSING_TD)," + + "SUM(INTERCEPTIONS), SUM(RUSHES), SUM(RUSH_YARDS)," + + "SUM(RECEPTIONS), SUM(RECEPTIONS_YARDS), SUM(TOTAL_TD)" + + "from games, players where players.player_id =" + + "games.player_id group by games.player_id, games.year_no") + .build(); + +} +``` + +The `ItemReader` configured above does not make any entries in the `ExecutionContext` for +any executions in which it participates. + +### Creating Custom ItemReaders and ItemWriters + +So far, this chapter has discussed the basic contracts of reading and writing in Spring +Batch and some common implementations for doing so. However, these are all fairly +generic, and there are many potential scenarios that may not be covered by out-of-the-box +implementations. This section shows, by using a simple example, how to create a custom`ItemReader` and `ItemWriter` implementation and implement their contracts correctly. The`ItemReader` also implements `ItemStream`, in order to illustrate how to make a reader or +writer restartable. + +#### Custom `ItemReader` Example + +For the purpose of this example, we create a simple `ItemReader` implementation that +reads from a provided list. We start by implementing the most basic contract of`ItemReader`, the `read` method, as shown in the following code: + +``` +public class CustomItemReader implements ItemReader { + + List items; + + public CustomItemReader(List items) { + this.items = items; + } + + public T read() throws Exception, UnexpectedInputException, + NonTransientResourceException, ParseException { + + if (!items.isEmpty()) { + return items.remove(0); + } + return null; + } +} +``` + +The preceding class takes a list of items and returns them one at a time, removing each +from the list. When the list is empty, it returns `null`, thus satisfying the most basic +requirements of an `ItemReader`, as illustrated in the following test code: + +``` +List items = new ArrayList<>(); +items.add("1"); +items.add("2"); +items.add("3"); + +ItemReader itemReader = new CustomItemReader<>(items); +assertEquals("1", itemReader.read()); +assertEquals("2", itemReader.read()); +assertEquals("3", itemReader.read()); +assertNull(itemReader.read()); +``` + +##### Making the `ItemReader` Restartable + +The final challenge is to make the `ItemReader` restartable. Currently, if processing is +interrupted and begins again, the `ItemReader` must start at the beginning. This is +actually valid in many scenarios, but it is sometimes preferable that a batch job +restarts where it left off. The key discriminant is often whether the reader is stateful +or stateless. A stateless reader does not need to worry about restartability, but a +stateful one has to try to reconstitute its last known state on restart. For this reason, +we recommend that you keep custom readers stateless if possible, so you need not worry +about restartability. + +If you do need to store state, then the `ItemStream` interface should be used: + +``` +public class CustomItemReader implements ItemReader, ItemStream { + + List items; + int currentIndex = 0; + private static final String CURRENT_INDEX = "current.index"; + + public CustomItemReader(List items) { + this.items = items; + } + + public T read() throws Exception, UnexpectedInputException, + ParseException, NonTransientResourceException { + + if (currentIndex < items.size()) { + return items.get(currentIndex++); + } + + return null; + } + + public void open(ExecutionContext executionContext) throws ItemStreamException { + if (executionContext.containsKey(CURRENT_INDEX)) { + currentIndex = new Long(executionContext.getLong(CURRENT_INDEX)).intValue(); + } + else { + currentIndex = 0; + } + } + + public void update(ExecutionContext executionContext) throws ItemStreamException { + executionContext.putLong(CURRENT_INDEX, new Long(currentIndex).longValue()); + } + + public void close() throws ItemStreamException {} +} +``` + +On each call to the `ItemStream` `update` method, the current index of the `ItemReader`is stored in the provided `ExecutionContext` with a key of 'current.index'. When the`ItemStream` `open` method is called, the `ExecutionContext` is checked to see if it +contains an entry with that key. If the key is found, then the current index is moved to +that location. This is a fairly trivial example, but it still meets the general contract: + +``` +ExecutionContext executionContext = new ExecutionContext(); +((ItemStream)itemReader).open(executionContext); +assertEquals("1", itemReader.read()); +((ItemStream)itemReader).update(executionContext); + +List items = new ArrayList<>(); +items.add("1"); +items.add("2"); +items.add("3"); +itemReader = new CustomItemReader<>(items); + +((ItemStream)itemReader).open(executionContext); +assertEquals("2", itemReader.read()); +``` + +Most `ItemReaders` have much more sophisticated restart logic. The`JdbcCursorItemReader`, for example, stores the row ID of the last processed row in the +cursor. + +It is also worth noting that the key used within the `ExecutionContext` should not be +trivial. That is because the same `ExecutionContext` is used for all `ItemStreams` within +a `Step`. In most cases, simply prepending the key with the class name should be enough +to guarantee uniqueness. However, in the rare cases where two of the same type of`ItemStream` are used in the same step (which can happen if two files are needed for +output), a more unique name is needed. For this reason, many of the Spring Batch`ItemReader` and `ItemWriter` implementations have a `setName()` property that lets this +key name be overridden. + +#### Custom `ItemWriter` Example + +Implementing a Custom `ItemWriter` is similar in many ways to the `ItemReader` example +above but differs in enough ways as to warrant its own example. However, adding +restartability is essentially the same, so it is not covered in this example. As with the`ItemReader` example, a `List` is used in order to keep the example as simple as +possible: + +``` +public class CustomItemWriter implements ItemWriter { + + List output = TransactionAwareProxyFactory.createTransactionalList(); + + public void write(List items) throws Exception { + output.addAll(items); + } + + public List getOutput() { + return output; + } +} +``` + +##### Making the `ItemWriter` Restartable + +To make the `ItemWriter` restartable, we would follow the same process as for the`ItemReader`, adding and implementing the `ItemStream` interface to synchronize the +execution context. In the example, we might have to count the number of items processed +and add that as a footer record. If we needed to do that, we could implement`ItemStream` in our `ItemWriter` so that the counter was reconstituted from the execution +context if the stream was re-opened. + +In many realistic cases, custom `ItemWriters` also delegate to another writer that itself +is restartable (for example, when writing to a file), or else it writes to a +transactional resource and so does not need to be restartable, because it is stateless. +When you have a stateful writer you should probably be sure to implement `ItemStream` as +well as `ItemWriter`. Remember also that the client of the writer needs to be aware of +the `ItemStream`, so you may need to register it as a stream in the configuration. + +### Item Reader and Writer Implementations + +In this section, we will introduce you to readers and writers that have not already been +discussed in the previous sections. + +#### Decorators + +In some cases, a user needs specialized behavior to be appended to a pre-existing`ItemReader`. Spring Batch offers some out of the box decorators that can add +additional behavior to to your `ItemReader` and `ItemWriter` implementations. + +Spring Batch includes the following decorators: + +* [`SynchronizedItemStreamReader`](#synchronizedItemStreamReader) + +* [`SingleItemPeekableItemReader`](#singleItemPeekableItemReader) + +* [`SynchronizedItemStreamWriter`](#synchronizedItemStreamWriter) + +* [`MultiResourceItemWriter`](#multiResourceItemWriter) + +* [`ClassifierCompositeItemWriter`](#classifierCompositeItemWriter) + +* [`ClassifierCompositeItemProcessor`](#classifierCompositeItemProcessor) + +##### `SynchronizedItemStreamReader` + +When using an `ItemReader` that is not thread safe, Spring Batch offers the`SynchronizedItemStreamReader` decorator, which can be used to make the `ItemReader`thread safe. Spring Batch provides a `SynchronizedItemStreamReaderBuilder` to construct +an instance of the `SynchronizedItemStreamReader`. + +##### `SingleItemPeekableItemReader` + +Spring Batch includes a decorator that adds a peek method to an `ItemReader`. This peek +method lets the user peek one item ahead. Repeated calls to the peek returns the same +item, and this is the next item returned from the `read` method. Spring Batch provides a`SingleItemPeekableItemReaderBuilder` to construct an instance of the`SingleItemPeekableItemReader`. + +| |SingleItemPeekableItemReader’s peek method is not thread-safe, because it would not
be possible to honor the peek in multiple threads. Only one of the threads that peeked
would get that item in the next call to read.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### `SynchronizedItemStreamWriter` + +When using an `ItemWriter` that is not thread safe, Spring Batch offers the`SynchronizedItemStreamWriter` decorator, which can be used to make the `ItemWriter`thread safe. Spring Batch provides a `SynchronizedItemStreamWriterBuilder` to construct +an instance of the `SynchronizedItemStreamWriter`. + +##### `MultiResourceItemWriter` + +The `MultiResourceItemWriter` wraps a `ResourceAwareItemWriterItemStream` and creates a new +output resource when the count of items written in the current resource exceeds the`itemCountLimitPerResource`. Spring Batch provides a `MultiResourceItemWriterBuilder` to +construct an instance of the `MultiResourceItemWriter`. + +##### `ClassifierCompositeItemWriter` + +The `ClassifierCompositeItemWriter` calls one of a collection of `ItemWriter`implementations for each item, based on a router pattern implemented through the provided`Classifier`. The implementation is thread-safe if all delegates are thread-safe. Spring +Batch provides a `ClassifierCompositeItemWriterBuilder` to construct an instance of the`ClassifierCompositeItemWriter`. + +##### `ClassifierCompositeItemProcessor` + +The `ClassifierCompositeItemProcessor` is an `ItemProcessor` that calls one of a +collection of `ItemProcessor` implementations, based on a router pattern implemented +through the provided `Classifier`. Spring Batch provides a`ClassifierCompositeItemProcessorBuilder` to construct an instance of the`ClassifierCompositeItemProcessor`. + +#### Messaging Readers And Writers + +Spring Batch offers the following readers and writers for commonly used messaging systems: + +* [`AmqpItemReader`](#amqpItemReader) + +* [`AmqpItemWriter`](#amqpItemWriter) + +* [`JmsItemReader`](#jmsItemReader) + +* [`JmsItemWriter`](#jmsItemWriter) + +* [`KafkaItemReader`](#kafkaItemReader) + +* [`KafkaItemWriter`](#kafkaItemWriter) + +##### `AmqpItemReader` + +The `AmqpItemReader` is an `ItemReader` that uses an `AmqpTemplate` to receive or convert +messages from an exchange. Spring Batch provides a `AmqpItemReaderBuilder` to construct +an instance of the `AmqpItemReader`. + +##### `AmqpItemWriter` + +The `AmqpItemWriter` is an `ItemWriter` that uses an `AmqpTemplate` to send messages to +an AMQP exchange. Messages are sent to the nameless exchange if the name not specified in +the provided `AmqpTemplate`. Spring Batch provides an `AmqpItemWriterBuilder` to +construct an instance of the `AmqpItemWriter`. + +##### `JmsItemReader` + +The `JmsItemReader` is an `ItemReader` for JMS that uses a `JmsTemplate`. The template +should have a default destination, which is used to provide items for the `read()`method. Spring Batch provides a `JmsItemReaderBuilder` to construct an instance of the`JmsItemReader`. + +##### `JmsItemWriter` + +The `JmsItemWriter` is an `ItemWriter` for JMS that uses a `JmsTemplate`. The template +should have a default destination, which is used to send items in `write(List)`. Spring +Batch provides a `JmsItemWriterBuilder` to construct an instance of the `JmsItemWriter`. + +##### `KafkaItemReader` + +The `KafkaItemReader` is an `ItemReader` for an Apache Kafka topic. It can be configured +to read messages from multiple partitions of the same topic. It stores message offsets +in the execution context to support restart capabilities. Spring Batch provides a`KafkaItemReaderBuilder` to construct an instance of the `KafkaItemReader`. + +##### `KafkaItemWriter` + +The `KafkaItemWriter` is an `ItemWriter` for Apache Kafka that uses a `KafkaTemplate` to +send events to a default topic. Spring Batch provides a `KafkaItemWriterBuilder` to +construct an instance of the `KafkaItemWriter`. + +#### Database Readers + +Spring Batch offers the following database readers: + +* [`Neo4jItemReader`](#Neo4jItemReader) + +* [`MongoItemReader`](#mongoItemReader) + +* [`HibernateCursorItemReader`](#hibernateCursorItemReader) + +* [`HibernatePagingItemReader`](#hibernatePagingItemReader) + +* [`RepositoryItemReader`](#repositoryItemReader) + +##### `Neo4jItemReader` + +The `Neo4jItemReader` is an `ItemReader` that reads objects from the graph database Neo4j +by using a paging technique. Spring Batch provides a `Neo4jItemReaderBuilder` to +construct an instance of the `Neo4jItemReader`. + +##### `MongoItemReader` + +The `MongoItemReader` is an `ItemReader` that reads documents from MongoDB by using a +paging technique. Spring Batch provides a `MongoItemReaderBuilder` to construct an +instance of the `MongoItemReader`. + +##### `HibernateCursorItemReader` + +The `HibernateCursorItemReader` is an `ItemStreamReader` for reading database records +built on top of Hibernate. It executes the HQL query and then, when initialized, iterates +over the result set as the `read()` method is called, successively returning an object +corresponding to the current row. Spring Batch provides a`HibernateCursorItemReaderBuilder` to construct an instance of the`HibernateCursorItemReader`. + +##### `HibernatePagingItemReader` + +The `HibernatePagingItemReader` is an `ItemReader` for reading database records built on +top of Hibernate and reading only up to a fixed number of items at a time. Spring Batch +provides a `HibernatePagingItemReaderBuilder` to construct an instance of the`HibernatePagingItemReader`. + +##### `RepositoryItemReader` + +The `RepositoryItemReader` is an `ItemReader` that reads records by using a`PagingAndSortingRepository`. Spring Batch provides a `RepositoryItemReaderBuilder` to +construct an instance of the `RepositoryItemReader`. + +#### Database Writers + +Spring Batch offers the following database writers: + +* [`Neo4jItemWriter`](#neo4jItemWriter) + +* [`MongoItemWriter`](#mongoItemWriter) + +* [`RepositoryItemWriter`](#repositoryItemWriter) + +* [`HibernateItemWriter`](#hibernateItemWriter) + +* [`JdbcBatchItemWriter`](#jdbcBatchItemWriter) + +* [`JpaItemWriter`](#jpaItemWriter) + +* [`GemfireItemWriter`](#gemfireItemWriter) + +##### `Neo4jItemWriter` + +The `Neo4jItemWriter` is an `ItemWriter` implementation that writes to a Neo4j database. +Spring Batch provides a `Neo4jItemWriterBuilder` to construct an instance of the`Neo4jItemWriter`. + +##### `MongoItemWriter` + +The `MongoItemWriter` is an `ItemWriter` implementation that writes to a MongoDB store +using an implementation of Spring Data’s `MongoOperations`. Spring Batch provides a`MongoItemWriterBuilder` to construct an instance of the `MongoItemWriter`. + +##### `RepositoryItemWriter` + +The `RepositoryItemWriter` is an `ItemWriter` wrapper for a `CrudRepository` from Spring +Data. Spring Batch provides a `RepositoryItemWriterBuilder` to construct an instance of +the `RepositoryItemWriter`. + +##### `HibernateItemWriter` + +The `HibernateItemWriter` is an `ItemWriter` that uses a Hibernate session to save or +update entities that are not part of the current Hibernate session. Spring Batch provides +a `HibernateItemWriterBuilder` to construct an instance of the `HibernateItemWriter`. + +##### `JdbcBatchItemWriter` + +The `JdbcBatchItemWriter` is an `ItemWriter` that uses the batching features from`NamedParameterJdbcTemplate` to execute a batch of statements for all items provided. +Spring Batch provides a `JdbcBatchItemWriterBuilder` to construct an instance of the`JdbcBatchItemWriter`. + +##### `JpaItemWriter` + +The `JpaItemWriter` is an `ItemWriter` that uses a JPA `EntityManagerFactory` to merge +any entities that are not part of the persistence context. Spring Batch provides a`JpaItemWriterBuilder` to construct an instance of the `JpaItemWriter`. + +##### `GemfireItemWriter` + +The `GemfireItemWriter` is an `ItemWriter` that uses a `GemfireTemplate` that stores +items in GemFire as key/value pairs. Spring Batch provides a `GemfireItemWriterBuilder`to construct an instance of the `GemfireItemWriter`. + +#### Specialized Readers + +Spring Batch offers the following specialized readers: + +* [`LdifReader`](#ldifReader) + +* [`MappingLdifReader`](#mappingLdifReader) + +* [`AvroItemReader`](#avroItemReader) + +##### `LdifReader` + +The `LdifReader` reads LDIF (LDAP Data Interchange Format) records from a `Resource`, +parses them, and returns a `LdapAttribute` object for each `read` executed. Spring Batch +provides a `LdifReaderBuilder` to construct an instance of the `LdifReader`. + +##### `MappingLdifReader` + +The `MappingLdifReader` reads LDIF (LDAP Data Interchange Format) records from a`Resource`, parses them then maps each LDIF record to a POJO (Plain Old Java Object). +Each read returns a POJO. Spring Batch provides a `MappingLdifReaderBuilder` to construct +an instance of the `MappingLdifReader`. + +##### `AvroItemReader` + +The `AvroItemReader` reads serialized Avro data from a Resource. +Each read returns an instance of the type specified by a Java class or Avro Schema. +The reader may be optionally configured for input that embeds an Avro schema or not. +Spring Batch provides an `AvroItemReaderBuilder` to construct an instance of the `AvroItemReader`. + +#### Specialized Writers + +Spring Batch offers the following specialized writers: + +* [`SimpleMailMessageItemWriter`](#simpleMailMessageItemWriter) + +* [`AvroItemWriter`](#avroItemWriter) + +##### `SimpleMailMessageItemWriter` + +The `SimpleMailMessageItemWriter` is an `ItemWriter` that can send mail messages. It +delegates the actual sending of messages to an instance of `MailSender`. Spring Batch +provides a `SimpleMailMessageItemWriterBuilder` to construct an instance of the`SimpleMailMessageItemWriter`. + +##### `AvroItemWriter` + +The `AvroItemWrite` serializes Java objects to a WriteableResource according to the given type or Schema. +The writer may be optionally configured to embed an Avro schema in the output or not. +Spring Batch provides an `AvroItemWriterBuilder` to construct an instance of the `AvroItemWriter`. + +#### Specialized Processors + +Spring Batch offers the following specialized processors: + +* [`ScriptItemProcessor`](#scriptItemProcessor) + +##### `ScriptItemProcessor` + +The `ScriptItemProcessor` is an `ItemProcessor` that passes the current item to process +to the provided script and the result of the script is returned by the processor. Spring +Batch provides a `ScriptItemProcessorBuilder` to construct an instance of the`ScriptItemProcessor`. \ No newline at end of file diff --git a/docs/en/spring-batch/repeat.md b/docs/en/spring-batch/repeat.md new file mode 100644 index 0000000000000000000000000000000000000000..faf19b5c82840c13e072014d77186fac80eec54e --- /dev/null +++ b/docs/en/spring-batch/repeat.md @@ -0,0 +1,212 @@ +# Repeat + +## Repeat + +XMLJavaBoth + +### RepeatTemplate + +Batch processing is about repetitive actions, either as a simple optimization or as part +of a job. To strategize and generalize the repetition and to provide what amounts to an +iterator framework, Spring Batch has the `RepeatOperations` interface. The`RepeatOperations` interface has the following definition: + +``` +public interface RepeatOperations { + + RepeatStatus iterate(RepeatCallback callback) throws RepeatException; + +} +``` + +The callback is an interface, shown in the following definition, that lets you insert +some business logic to be repeated: + +``` +public interface RepeatCallback { + + RepeatStatus doInIteration(RepeatContext context) throws Exception; + +} +``` + +The callback is executed repeatedly until the implementation determines that the +iteration should end. The return value in these interfaces is an enumeration that can +either be `RepeatStatus.CONTINUABLE` or `RepeatStatus.FINISHED`. A `RepeatStatus`enumeration conveys information to the caller of the repeat operations about whether +there is any more work to do. Generally speaking, implementations of `RepeatOperations`should inspect the `RepeatStatus` and use it as part of the decision to end the +iteration. Any callback that wishes to signal to the caller that there is no more work to +do can return `RepeatStatus.FINISHED`. + +The simplest general purpose implementation of `RepeatOperations` is `RepeatTemplate`, as +shown in the following example: + +``` +RepeatTemplate template = new RepeatTemplate(); + +template.setCompletionPolicy(new SimpleCompletionPolicy(2)); + +template.iterate(new RepeatCallback() { + + public RepeatStatus doInIteration(RepeatContext context) { + // Do stuff in batch... + return RepeatStatus.CONTINUABLE; + } + +}); +``` + +In the preceding example, we return `RepeatStatus.CONTINUABLE`, to show that there is +more work to do. The callback can also return `RepeatStatus.FINISHED`, to signal to the +caller that there is no more work to do. Some iterations can be terminated by +considerations intrinsic to the work being done in the callback. Others are effectively +infinite loops as far as the callback is concerned and the completion decision is +delegated to an external policy, as in the case shown in the preceding example. + +#### RepeatContext + +The method parameter for the `RepeatCallback` is a `RepeatContext`. Many callbacks ignore +the context. However, if necessary, it can be used as an attribute bag to store transient +data for the duration of the iteration. After the `iterate` method returns, the context +no longer exists. + +If there is a nested iteration in progress, a `RepeatContext` has a parent context. The +parent context is occasionally useful for storing data that need to be shared between +calls to `iterate`. This is the case, for instance, if you want to count the number of +occurrences of an event in the iteration and remember it across subsequent calls. + +#### RepeatStatus + +`RepeatStatus` is an enumeration used by Spring Batch to indicate whether processing has +finished. It has two possible `RepeatStatus` values, described in the following table: + +| *Value* | *Description* | +|-----------|--------------------------------------| +|CONTINUABLE| There is more work to do. | +| FINISHED |No more repetitions should take place.| + +`RepeatStatus` values can also be combined with a logical AND operation by using the`and()` method in `RepeatStatus`. The effect of this is to do a logical AND on the +continuable flag. In other words, if either status is `FINISHED`, then the result is`FINISHED`. + +### Completion Policies + +Inside a `RepeatTemplate`, the termination of the loop in the `iterate` method is +determined by a `CompletionPolicy`, which is also a factory for the `RepeatContext`. The`RepeatTemplate` has the responsibility to use the current policy to create a`RepeatContext` and pass that in to the `RepeatCallback` at every stage in the iteration. +After a callback completes its `doInIteration`, the `RepeatTemplate` has to make a call +to the `CompletionPolicy` to ask it to update its state (which will be stored in the`RepeatContext`). Then it asks the policy if the iteration is complete. + +Spring Batch provides some simple general purpose implementations of `CompletionPolicy`.`SimpleCompletionPolicy` allows execution up to a fixed number of times (with`RepeatStatus.FINISHED` forcing early completion at any time). + +Users might need to implement their own completion policies for more complicated +decisions. For example, a batch processing window that prevents batch jobs from executing +once the online systems are in use would require a custom policy. + +### Exception Handling + +If there is an exception thrown inside a `RepeatCallback`, the `RepeatTemplate` consults +an `ExceptionHandler`, which can decide whether or not to re-throw the exception. + +The following listing shows the `ExceptionHandler` interface definition: + +``` +public interface ExceptionHandler { + + void handleException(RepeatContext context, Throwable throwable) + throws Throwable; + +} +``` + +A common use case is to count the number of exceptions of a given type and fail when a +limit is reached. For this purpose, Spring Batch provides the`SimpleLimitExceptionHandler` and a slightly more flexible`RethrowOnThresholdExceptionHandler`. The `SimpleLimitExceptionHandler` has a limit +property and an exception type that should be compared with the current exception. All +subclasses of the provided type are also counted. Exceptions of the given type are +ignored until the limit is reached, and then they are rethrown. Exceptions of other types +are always rethrown. + +An important optional property of the `SimpleLimitExceptionHandler` is the boolean flag +called `useParent`. It is `false` by default, so the limit is only accounted for in the +current `RepeatContext`. When set to `true`, the limit is kept across sibling contexts in +a nested iteration (such as a set of chunks inside a step). + +### Listeners + +Often, it is useful to be able to receive additional callbacks for cross-cutting concerns +across a number of different iterations. For this purpose, Spring Batch provides the`RepeatListener` interface. The `RepeatTemplate` lets users register `RepeatListener`implementations, and they are given callbacks with the `RepeatContext` and `RepeatStatus`where available during the iteration. + +The `RepeatListener` interface has the following definition: + +``` +public interface RepeatListener { + void before(RepeatContext context); + void after(RepeatContext context, RepeatStatus result); + void open(RepeatContext context); + void onError(RepeatContext context, Throwable e); + void close(RepeatContext context); +} +``` + +The `open` and `close` callbacks come before and after the entire iteration. `before`,`after`, and `onError` apply to the individual `RepeatCallback` calls. + +Note that, when there is more than one listener, they are in a list, so there is an +order. In this case, `open` and `before` are called in the same order while `after`,`onError`, and `close` are called in reverse order. + +### Parallel Processing + +Implementations of `RepeatOperations` are not restricted to executing the callback +sequentially. It is quite important that some implementations are able to execute their +callbacks in parallel. To this end, Spring Batch provides the`TaskExecutorRepeatTemplate`, which uses the Spring `TaskExecutor` strategy to run the`RepeatCallback`. The default is to use a `SynchronousTaskExecutor`, which has the effect +of executing the whole iteration in the same thread (the same as a normal`RepeatTemplate`). + +### Declarative Iteration + +Sometimes there is some business processing that you know you want to repeat every time +it happens. The classic example of this is the optimization of a message pipeline. It is +more efficient to process a batch of messages, if they are arriving frequently, than to +bear the cost of a separate transaction for every message. Spring Batch provides an AOP +interceptor that wraps a method call in a `RepeatOperations` object for just this +purpose. The `RepeatOperationsInterceptor` executes the intercepted method and repeats +according to the `CompletionPolicy` in the provided `RepeatTemplate`. + +The following example shows declarative iteration using the Spring AOP namespace to +repeat a service call to a method called `processMessage` (for more detail on how to +configure AOP interceptors, see the Spring User Guide): + +``` + + + + + + +``` + +The following example demonstrates using Java configuration to +repeat a service call to a method called `processMessage` (for more detail on how to +configure AOP interceptors, see the Spring User Guide): + +``` +@Bean +public MyService myService() { + ProxyFactory factory = new ProxyFactory(RepeatOperations.class.getClassLoader()); + factory.setInterfaces(MyService.class); + factory.setTarget(new MyService()); + + MyService service = (MyService) factory.getProxy(); + JdkRegexpMethodPointcut pointcut = new JdkRegexpMethodPointcut(); + pointcut.setPatterns(".*processMessage.*"); + + RepeatOperationsInterceptor interceptor = new RepeatOperationsInterceptor(); + + ((Advised) service).addAdvisor(new DefaultPointcutAdvisor(pointcut, interceptor)); + + return service; +} +``` + +The preceding example uses a default `RepeatTemplate` inside the interceptor. To change +the policies, listeners, and other details, you can inject an instance of`RepeatTemplate` into the interceptor. + +If the intercepted method returns `void`, then the interceptor always returns`RepeatStatus.CONTINUABLE` (so there is a danger of an infinite loop if the`CompletionPolicy` does not have a finite end point). Otherwise, it returns`RepeatStatus.CONTINUABLE` until the return value from the intercepted method is `null`, +at which point it returns `RepeatStatus.FINISHED`. Consequently, the business logic +inside the target method can signal that there is no more work to do by returning `null`or by throwing an exception that is re-thrown by the `ExceptionHandler` in the provided`RepeatTemplate`. diff --git a/docs/en/spring-batch/retry.md b/docs/en/spring-batch/retry.md new file mode 100644 index 0000000000000000000000000000000000000000..1c4eadd02c3cba8a90120ac0a6c10d5ecf7923e2 --- /dev/null +++ b/docs/en/spring-batch/retry.md @@ -0,0 +1,312 @@ +# Retry + +## Retry + +XMLJavaBoth + +To make processing more robust and less prone to failure, it sometimes helps to +automatically retry a failed operation in case it might succeed on a subsequent attempt. +Errors that are susceptible to intermittent failure are often transient in nature. +Examples include remote calls to a web service that fails because of a network glitch or a`DeadlockLoserDataAccessException` in a database update. + +### `RetryTemplate` + +| |The retry functionality was pulled out of Spring Batch as of 2.2.0.
It is now part of a new library, [Spring Retry](https://github.com/spring-projects/spring-retry).| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +To automate retry operations Spring Batch has the `RetryOperations` strategy. The +following interface definition for `RetryOperations`: + +``` +public interface RetryOperations { + + T execute(RetryCallback retryCallback) throws E; + + T execute(RetryCallback retryCallback, RecoveryCallback recoveryCallback) + throws E; + + T execute(RetryCallback retryCallback, RetryState retryState) + throws E, ExhaustedRetryException; + + T execute(RetryCallback retryCallback, RecoveryCallback recoveryCallback, + RetryState retryState) throws E; + +} +``` + +The basic callback is a simple interface that lets you insert some business logic to be +retried, as shown in the following interface definition: + +``` +public interface RetryCallback { + + T doWithRetry(RetryContext context) throws E; + +} +``` + +The callback runs and, if it fails (by throwing an `Exception`), it is retried until +either it is successful or the implementation aborts. There are a number of overloaded`execute` methods in the `RetryOperations` interface. Those methods deal with various use +cases for recovery when all retry attempts are exhausted and deal with retry state, which +lets clients and implementations store information between calls (we cover this in more +detail later in the chapter). + +The simplest general purpose implementation of `RetryOperations` is `RetryTemplate`. It +can be used as follows: + +``` +RetryTemplate template = new RetryTemplate(); + +TimeoutRetryPolicy policy = new TimeoutRetryPolicy(); +policy.setTimeout(30000L); + +template.setRetryPolicy(policy); + +Foo result = template.execute(new RetryCallback() { + + public Foo doWithRetry(RetryContext context) { + // Do stuff that might fail, e.g. webservice operation + return result; + } + +}); +``` + +In the preceding example, we make a web service call and return the result to the user. If +that call fails, then it is retried until a timeout is reached. + +#### `RetryContext` + +The method parameter for the `RetryCallback` is a `RetryContext`. Many callbacks ignore +the context, but, if necessary, it can be used as an attribute bag to store data for the +duration of the iteration. + +A `RetryContext` has a parent context if there is a nested retry in progress in the same +thread. The parent context is occasionally useful for storing data that need to be shared +between calls to `execute`. + +#### `RecoveryCallback` + +When a retry is exhausted, the `RetryOperations` can pass control to a different callback, +called the `RecoveryCallback`. To use this feature, clients pass in the callbacks together +to the same method, as shown in the following example: + +``` +Foo foo = template.execute(new RetryCallback() { + public Foo doWithRetry(RetryContext context) { + // business logic here + }, + new RecoveryCallback() { + Foo recover(RetryContext context) throws Exception { + // recover logic here + } +}); +``` + +If the business logic does not succeed before the template decides to abort, then the +client is given the chance to do some alternate processing through the recovery callback. + +#### Stateless Retry + +In the simplest case, a retry is just a while loop. The `RetryTemplate` can just keep +trying until it either succeeds or fails. The `RetryContext` contains some state to +determine whether to retry or abort, but this state is on the stack and there is no need +to store it anywhere globally, so we call this stateless retry. The distinction between +stateless and stateful retry is contained in the implementation of the `RetryPolicy` (the`RetryTemplate` can handle both). In a stateless retry, the retry callback is always +executed in the same thread it was on when it failed. + +#### Stateful Retry + +Where the failure has caused a transactional resource to become invalid, there are some +special considerations. This does not apply to a simple remote call because there is no +transactional resource (usually), but it does sometimes apply to a database update, +especially when using Hibernate. In this case it only makes sense to re-throw the +exception that called the failure immediately, so that the transaction can roll back and +we can start a new, valid transaction. + +In cases involving transactions, a stateless retry is not good enough, because the +re-throw and roll back necessarily involve leaving the `RetryOperations.execute()` method +and potentially losing the context that was on the stack. To avoid losing it we have to +introduce a storage strategy to lift it off the stack and put it (at a minimum) in heap +storage. For this purpose, Spring Batch provides a storage strategy called`RetryContextCache`, which can be injected into the `RetryTemplate`. The default +implementation of the `RetryContextCache` is in memory, using a simple `Map`. Advanced +usage with multiple processes in a clustered environment might also consider implementing +the `RetryContextCache` with a cluster cache of some sort (however, even in a clustered +environment, this might be overkill). + +Part of the responsibility of the `RetryOperations` is to recognize the failed operations +when they come back in a new execution (and usually wrapped in a new transaction). To +facilitate this, Spring Batch provides the `RetryState` abstraction. This works in +conjunction with a special `execute` methods in the `RetryOperations` interface. + +The way the failed operations are recognized is by identifying the state across multiple +invocations of the retry. To identify the state, the user can provide a `RetryState`object that is responsible for returning a unique key identifying the item. The identifier +is used as a key in the `RetryContextCache` interface. + +| |Be very careful with the implementation of `Object.equals()` and `Object.hashCode()` in
the key returned by `RetryState`. The best advice is to use a business key to identify the
items. In the case of a JMS message, the message ID can be used.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +When the retry is exhausted, there is also the option to handle the failed item in a +different way, instead of calling the `RetryCallback` (which is now presumed to be likely +to fail). Just like in the stateless case, this option is provided by the`RecoveryCallback`, which can be provided by passing it in to the `execute` method of`RetryOperations`. + +The decision to retry or not is actually delegated to a regular `RetryPolicy`, so the +usual concerns about limits and timeouts can be injected there (described later in this +chapter). + +### Retry Policies + +Inside a `RetryTemplate`, the decision to retry or fail in the `execute` method is +determined by a `RetryPolicy`, which is also a factory for the `RetryContext`. The`RetryTemplate` has the responsibility to use the current policy to create a`RetryContext` and pass that in to the `RetryCallback` at every attempt. After a callback +fails, the `RetryTemplate` has to make a call to the `RetryPolicy` to ask it to update its +state (which is stored in the `RetryContext`) and then asks the policy if another attempt +can be made. If another attempt cannot be made (such as when a limit is reached or a +timeout is detected) then the policy is also responsible for handling the exhausted state. +Simple implementations throw `RetryExhaustedException`, which causes any enclosing +transaction to be rolled back. More sophisticated implementations might attempt to take +some recovery action, in which case the transaction can remain intact. + +| |Failures are inherently either retryable or not. If the same exception is always going to
be thrown from the business logic, it does no good to retry it. So do not retry on all
exception types. Rather, try to focus on only those exceptions that you expect to be
retryable. It is not usually harmful to the business logic to retry more aggressively, but
it is wasteful, because, if a failure is deterministic, you spend time retrying something
that you know in advance is fatal.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Spring Batch provides some simple general purpose implementations of stateless`RetryPolicy`, such as `SimpleRetryPolicy` and `TimeoutRetryPolicy` (used in the preceding example). + +The `SimpleRetryPolicy` allows a retry on any of a named list of exception types, up to a +fixed number of times. It also has a list of "fatal" exceptions that should never be +retried, and this list overrides the retryable list so that it can be used to give finer +control over the retry behavior, as shown in the following example: + +``` +SimpleRetryPolicy policy = new SimpleRetryPolicy(); +// Set the max retry attempts +policy.setMaxAttempts(5); +// Retry on all exceptions (this is the default) +policy.setRetryableExceptions(new Class[] {Exception.class}); +// ... but never retry IllegalStateException +policy.setFatalExceptions(new Class[] {IllegalStateException.class}); + +// Use the policy... +RetryTemplate template = new RetryTemplate(); +template.setRetryPolicy(policy); +template.execute(new RetryCallback() { + public Foo doWithRetry(RetryContext context) { + // business logic here + } +}); +``` + +There is also a more flexible implementation called `ExceptionClassifierRetryPolicy`, +which lets the user configure different retry behavior for an arbitrary set of exception +types though the `ExceptionClassifier` abstraction. The policy works by calling on the +classifier to convert an exception into a delegate `RetryPolicy`. For example, one +exception type can be retried more times before failure than another by mapping it to a +different policy. + +Users might need to implement their own retry policies for more customized decisions. For +instance, a custom retry policy makes sense when there is a well-known, solution-specific +classification of exceptions into retryable and not retryable. + +### Backoff Policies + +When retrying after a transient failure, it often helps to wait a bit before trying again, +because usually the failure is caused by some problem that can only be resolved by +waiting. If a `RetryCallback` fails, the `RetryTemplate` can pause execution according to +the `BackoffPolicy`. + +The following code shows the interface definition for the `BackOffPolicy` interface: + +``` +public interface BackoffPolicy { + + BackOffContext start(RetryContext context); + + void backOff(BackOffContext backOffContext) + throws BackOffInterruptedException; + +} +``` + +A `BackoffPolicy` is free to implement the backOff in any way it chooses. The policies +provided by Spring Batch out of the box all use `Object.wait()`. A common use case is to +backoff with an exponentially increasing wait period, to avoid two retries getting into +lock step and both failing (this is a lesson learned from ethernet). For this purpose, +Spring Batch provides the `ExponentialBackoffPolicy`. + +### Listeners + +Often, it is useful to be able to receive additional callbacks for cross cutting concerns +across a number of different retries. For this purpose, Spring Batch provides the`RetryListener` interface. The `RetryTemplate` lets users register `RetryListeners`, and +they are given callbacks with `RetryContext` and `Throwable` where available during the +iteration. + +The following code shows the interface definition for `RetryListener`: + +``` +public interface RetryListener { + + boolean open(RetryContext context, RetryCallback callback); + + void onError(RetryContext context, RetryCallback callback, Throwable throwable); + + void close(RetryContext context, RetryCallback callback, Throwable throwable); +} +``` + +The `open` and `close` callbacks come before and after the entire retry in the simplest +case, and `onError` applies to the individual `RetryCallback` calls. The `close` method +might also receive a `Throwable`. If there has been an error, it is the last one thrown by +the `RetryCallback`. + +Note that, when there is more than one listener, they are in a list, so there is an order. +In this case, `open` is called in the same order while `onError` and `close` are called in +reverse order. + +### Declarative Retry + +Sometimes, there is some business processing that you know you want to retry every time it +happens. The classic example of this is the remote service call. Spring Batch provides an +AOP interceptor that wraps a method call in a `RetryOperations` implementation for just +this purpose. The `RetryOperationsInterceptor` executes the intercepted method and retries +on failure according to the `RetryPolicy` in the provided `RepeatTemplate`. + +The following example shows a declarative retry that uses the Spring AOP namespace to +retry a service call to a method called `remoteCall` (for more detail on how to configure +AOP interceptors, see the Spring User Guide): + +``` + + + + + + +``` + +The following example shows a declarative retry that uses java configuration to retry a +service call to a method called `remoteCall` (for more detail on how to configure AOP +interceptors, see the Spring User Guide): + +``` +@Bean +public MyService myService() { + ProxyFactory factory = new ProxyFactory(RepeatOperations.class.getClassLoader()); + factory.setInterfaces(MyService.class); + factory.setTarget(new MyService()); + + MyService service = (MyService) factory.getProxy(); + JdkRegexpMethodPointcut pointcut = new JdkRegexpMethodPointcut(); + pointcut.setPatterns(".*remoteCall.*"); + + RetryOperationsInterceptor interceptor = new RetryOperationsInterceptor(); + + ((Advised) service).addAdvisor(new DefaultPointcutAdvisor(pointcut, interceptor)); + + return service; +} +``` + +The preceding example uses a default `RetryTemplate` inside the interceptor. To change the +policies or listeners, you can inject an instance of `RetryTemplate` into the interceptor. \ No newline at end of file diff --git a/docs/en/spring-batch/scalability.md b/docs/en/spring-batch/scalability.md new file mode 100644 index 0000000000000000000000000000000000000000..df00f11321661d44b10400634759f2c8a347dbc5 --- /dev/null +++ b/docs/en/spring-batch/scalability.md @@ -0,0 +1,447 @@ +# Scaling and Parallel Processing + +## Scaling and Parallel Processing + +XMLJavaBoth + +Many batch processing problems can be solved with single threaded, single process jobs, +so it is always a good idea to properly check if that meets your needs before thinking +about more complex implementations. Measure the performance of a realistic job and see if +the simplest implementation meets your needs first. You can read and write a file of +several hundred megabytes in well under a minute, even with standard hardware. + +When you are ready to start implementing a job with some parallel processing, Spring +Batch offers a range of options, which are described in this chapter, although some +features are covered elsewhere. At a high level, there are two modes of parallel +processing: + +* Single process, multi-threaded + +* Multi-process + +These break down into categories as well, as follows: + +* Multi-threaded Step (single process) + +* Parallel Steps (single process) + +* Remote Chunking of Step (multi process) + +* Partitioning a Step (single or multi process) + +First, we review the single-process options. Then we review the multi-process options. + +### Multi-threaded Step + +The simplest way to start parallel processing is to add a `TaskExecutor` to your Step +configuration. + +For example, you might add an attribute of the `tasklet`, as follows: + +``` + + ... + +``` + +When using java configuration, a `TaskExecutor` can be added to the step, +as shown in the following example: + +Java Configuration + +``` +@Bean +public TaskExecutor taskExecutor() { + return new SimpleAsyncTaskExecutor("spring_batch"); +} + +@Bean +public Step sampleStep(TaskExecutor taskExecutor) { + return this.stepBuilderFactory.get("sampleStep") + .chunk(10) + .reader(itemReader()) + .writer(itemWriter()) + .taskExecutor(taskExecutor) + .build(); +} +``` + +In this example, the `taskExecutor` is a reference to another bean definition that +implements the `TaskExecutor` interface.[`TaskExecutor`](https://docs.spring.io/spring/docs/current/javadoc-api/org/springframework/core/task/TaskExecutor.html)is a standard Spring interface, so consult the Spring User Guide for details of available +implementations. The simplest multi-threaded `TaskExecutor` is a`SimpleAsyncTaskExecutor`. + +The result of the above configuration is that the `Step` executes by reading, processing, +and writing each chunk of items (each commit interval) in a separate thread of execution. +Note that this means there is no fixed order for the items to be processed, and a chunk +might contain items that are non-consecutive compared to the single-threaded case. In +addition to any limits placed by the task executor (such as whether it is backed by a +thread pool), there is a throttle limit in the tasklet configuration which defaults to 4. +You may need to increase this to ensure that a thread pool is fully utilized. + +For example you might increase the throttle-limit, as shown in the following example: + +``` + ... + +``` + +When using Java configuration, the builders provide access to the throttle limit, as shown +in the following example: + +Java Configuration + +``` +@Bean +public Step sampleStep(TaskExecutor taskExecutor) { + return this.stepBuilderFactory.get("sampleStep") + .chunk(10) + .reader(itemReader()) + .writer(itemWriter()) + .taskExecutor(taskExecutor) + .throttleLimit(20) + .build(); +} +``` + +Note also that there may be limits placed on concurrency by any pooled resources used in +your step, such as a `DataSource`. Be sure to make the pool in those resources at least +as large as the desired number of concurrent threads in the step. + +There are some practical limitations of using multi-threaded `Step` implementations for +some common batch use cases. Many participants in a `Step` (such as readers and writers) +are stateful. If the state is not segregated by thread, then those components are not +usable in a multi-threaded `Step`. In particular, most of the off-the-shelf readers and +writers from Spring Batch are not designed for multi-threaded use. It is, however, +possible to work with stateless or thread safe readers and writers, and there is a sample +(called `parallelJob`) in the[Spring +Batch Samples](https://github.com/spring-projects/spring-batch/tree/master/spring-batch-samples) that shows the use of a process indicator (see[Preventing State Persistence](readersAndWriters.html#process-indicator)) to keep track +of items that have been processed in a database input table. + +Spring Batch provides some implementations of `ItemWriter` and `ItemReader`. Usually, +they say in the Javadoc if they are thread safe or not or what you have to do to avoid +problems in a concurrent environment. If there is no information in the Javadoc, you can +check the implementation to see if there is any state. If a reader is not thread safe, +you can decorate it with the provided `SynchronizedItemStreamReader` or use it in your own +synchronizing delegator. You can synchronize the call to `read()` and as long as the +processing and writing is the most expensive part of the chunk, your step may still +complete much faster than it would in a single threaded configuration. + +### Parallel Steps + +As long as the application logic that needs to be parallelized can be split into distinct +responsibilities and assigned to individual steps, then it can be parallelized in a +single process. Parallel Step execution is easy to configure and use. + +For example, executing steps `(step1,step2)` in parallel with `step3` is straightforward, +as shown in the following example: + +``` + + + + + + + + + + + + + + +``` + +When using Java configuration, executing steps `(step1,step2)` in parallel with `step3`is straightforward, as shown in the following example: + +Java Configuration + +``` +@Bean +public Job job() { + return jobBuilderFactory.get("job") + .start(splitFlow()) + .next(step4()) + .build() //builds FlowJobBuilder instance + .build(); //builds Job instance +} + +@Bean +public Flow splitFlow() { + return new FlowBuilder("splitFlow") + .split(taskExecutor()) + .add(flow1(), flow2()) + .build(); +} + +@Bean +public Flow flow1() { + return new FlowBuilder("flow1") + .start(step1()) + .next(step2()) + .build(); +} + +@Bean +public Flow flow2() { + return new FlowBuilder("flow2") + .start(step3()) + .build(); +} + +@Bean +public TaskExecutor taskExecutor() { + return new SimpleAsyncTaskExecutor("spring_batch"); +} +``` + +The configurable task executor is used to specify which `TaskExecutor`implementation should be used to execute the individual flows. The default is`SyncTaskExecutor`, but an asynchronous `TaskExecutor` is required to run the steps in +parallel. Note that the job ensures that every flow in the split completes before +aggregating the exit statuses and transitioning. + +See the section on [Split Flows](step.html#split-flows) for more detail. + +### Remote Chunking + +In remote chunking, the `Step` processing is split across multiple processes, +communicating with each other through some middleware. The following image shows the +pattern: + +![Remote Chunking](https://docs.spring.io/spring-batch/docs/current/reference/html/images/remote-chunking.png) + +Figure 1. Remote Chunking + +The manager component is a single process, and the workers are multiple remote processes. +This pattern works best if the manager is not a bottleneck, so the processing must be more +expensive than the reading of items (as is often the case in practice). + +The manager is an implementation of a Spring Batch `Step` with the `ItemWriter` replaced +by a generic version that knows how to send chunks of items to the middleware as +messages. The workers are standard listeners for whatever middleware is being used (for +example, with JMS, they would be `MessageListener` implementations), and their role is +to process the chunks of items using a standard `ItemWriter` or `ItemProcessor` plus`ItemWriter`, through the `ChunkProcessor` interface. One of the advantages of using this +pattern is that the reader, processor, and writer components are off-the-shelf (the same +as would be used for a local execution of the step). The items are divided up dynamically +and work is shared through the middleware, so that, if the listeners are all eager +consumers, then load balancing is automatic. + +The middleware has to be durable, with guaranteed delivery and a single consumer for each +message. JMS is the obvious candidate, but other options (such as JavaSpaces) exist in +the grid computing and shared memory product space. + +See the section on[Spring Batch Integration - Remote Chunking](spring-batch-integration.html#remote-chunking)for more detail. + +### Partitioning + +Spring Batch also provides an SPI for partitioning a `Step` execution and executing it +remotely. In this case, the remote participants are `Step` instances that could just as +easily have been configured and used for local processing. The following image shows the +pattern: + +![Partitioning Overview](https://docs.spring.io/spring-batch/docs/current/reference/html/images/partitioning-overview.png) + +Figure 2. Partitioning + +The `Job` runs on the left-hand side as a sequence of `Step` instances, and one of the`Step` instances is labeled as a manager. The workers in this picture are all identical +instances of a `Step`, which could in fact take the place of the manager, resulting in the +same outcome for the `Job`. The workers are typically going to be remote services but +could also be local threads of execution. The messages sent by the manager to the workers +in this pattern do not need to be durable or have guaranteed delivery. Spring Batch +metadata in the `JobRepository` ensures that each worker is executed once and only once for +each `Job` execution. + +The SPI in Spring Batch consists of a special implementation of `Step` (called the`PartitionStep`) and two strategy interfaces that need to be implemented for the specific +environment. The strategy interfaces are `PartitionHandler` and `StepExecutionSplitter`, +and their role is shown in the following sequence diagram: + +![Partitioning SPI](https://docs.spring.io/spring-batch/docs/current/reference/html/images/partitioning-spi.png) + +Figure 3. Partitioning SPI + +The `Step` on the right in this case is the “remote” worker, so, potentially, there are +many objects and or processes playing this role, and the `PartitionStep` is shown driving +the execution. + +The following example shows the `PartitionStep` configuration when using XML +configuration: + +``` + + + + + +``` + +The following example shows the `PartitionStep` configuration when using Java +configuration: + +Java Configuration + +``` +@Bean +public Step step1Manager() { + return stepBuilderFactory.get("step1.manager") + .partitioner("step1", partitioner()) + .step(step1()) + .gridSize(10) + .taskExecutor(taskExecutor()) + .build(); +} +``` + +Similar to the multi-threaded step’s `throttle-limit` attribute, the `grid-size`attribute prevents the task executor from being saturated with requests from a single +step. + +There is a simple example that can be copied and extended in the unit test suite for[Spring +Batch Samples](https://github.com/spring-projects/spring-batch/tree/master/spring-batch-samples/src/main/resources/jobs) (see `partition*Job.xml` configuration). + +Spring Batch creates step executions for the partitions called "step1:partition0", and so +on. Many people prefer to call the manager step "step1:manager" for consistency. You can +use an alias for the step (by specifying the `name` attribute instead of the `id`attribute). + +#### PartitionHandler + +The `PartitionHandler` is the component that knows about the fabric of the remoting or +grid environment. It is able to send `StepExecution` requests to the remote `Step`instances, wrapped in some fabric-specific format, like a DTO. It does not have to know +how to split the input data or how to aggregate the result of multiple `Step` executions. +Generally speaking, it probably also does not need to know about resilience or failover, +since those are features of the fabric in many cases. In any case, Spring Batch always +provides restartability independent of the fabric. A failed `Job` can always be restarted +and only the failed `Steps` are re-executed. + +The `PartitionHandler` interface can have specialized implementations for a variety of +fabric types, including simple RMI remoting, EJB remoting, custom web service, JMS, Java +Spaces, shared memory grids (like Terracotta or Coherence), and grid execution fabrics +(like GridGain). Spring Batch does not contain implementations for any proprietary grid +or remoting fabrics. + +Spring Batch does, however, provide a useful implementation of `PartitionHandler` that +executes `Step` instances locally in separate threads of execution, using the`TaskExecutor` strategy from Spring. The implementation is called`TaskExecutorPartitionHandler`. + +The `TaskExecutorPartitionHandler` is the default for a step configured with the XML +namespace shown previously. It can also be configured explicitly, as shown in the +following example: + +``` + + + + + + + + + +``` + +The `TaskExecutorPartitionHandler` can be configured explicitly within java configuration, +as shown in the following example: + +Java Configuration + +``` +@Bean +public Step step1Manager() { + return stepBuilderFactory.get("step1.manager") + .partitioner("step1", partitioner()) + .partitionHandler(partitionHandler()) + .build(); +} + +@Bean +public PartitionHandler partitionHandler() { + TaskExecutorPartitionHandler retVal = new TaskExecutorPartitionHandler(); + retVal.setTaskExecutor(taskExecutor()); + retVal.setStep(step1()); + retVal.setGridSize(10); + return retVal; +} +``` + +The `gridSize` attribute determines the number of separate step executions to create, so +it can be matched to the size of the thread pool in the `TaskExecutor`. Alternatively, it +can be set to be larger than the number of threads available, which makes the blocks of +work smaller. + +The `TaskExecutorPartitionHandler` is useful for IO-intensive `Step` instances, such as +copying large numbers of files or replicating filesystems into content management +systems. It can also be used for remote execution by providing a `Step` implementation +that is a proxy for a remote invocation (such as using Spring Remoting). + +#### Partitioner + +The `Partitioner` has a simpler responsibility: to generate execution contexts as input +parameters for new step executions only (no need to worry about restarts). It has a +single method, as shown in the following interface definition: + +``` +public interface Partitioner { + Map partition(int gridSize); +} +``` + +The return value from this method associates a unique name for each step execution (the`String`) with input parameters in the form of an `ExecutionContext`. The names show up +later in the Batch metadata as the step name in the partitioned `StepExecutions`. The`ExecutionContext` is just a bag of name-value pairs, so it might contain a range of +primary keys, line numbers, or the location of an input file. The remote `Step` then +normally binds to the context input using `#{…​}` placeholders (late binding in step +scope), as illustrated in the next section. + +The names of the step executions (the keys in the `Map` returned by `Partitioner`) need +to be unique amongst the step executions of a `Job` but do not have any other specific +requirements. The easiest way to do this (and to make the names meaningful for users) is +to use a prefix+suffix naming convention, where the prefix is the name of the step that +is being executed (which itself is unique in the `Job`), and the suffix is just a +counter. There is a `SimplePartitioner` in the framework that uses this convention. + +An optional interface called `PartitionNameProvider` can be used to provide the partition +names separately from the partitions themselves. If a `Partitioner` implements this +interface, then, on a restart, only the names are queried. If partitioning is expensive, +this can be a useful optimization. The names provided by the `PartitionNameProvider` must +match those provided by the `Partitioner`. + +#### Binding Input Data to Steps + +It is very efficient for the steps that are executed by the `PartitionHandler` to have +identical configuration and for their input parameters to be bound at runtime from the`ExecutionContext`. This is easy to do with the StepScope feature of Spring Batch +(covered in more detail in the section on [Late Binding](step.html#late-binding)). For +example, if the `Partitioner` creates `ExecutionContext` instances with an attribute key +called `fileName`, pointing to a different file (or directory) for each step invocation, +the `Partitioner` output might resemble the content of the following table: + +|*Step Execution Name (key)*|*ExecutionContext (value)*| +|---------------------------|--------------------------| +| filecopy:partition0 | fileName=/home/data/one | +| filecopy:partition1 | fileName=/home/data/two | +| filecopy:partition2 |fileName=/home/data/three | + +Then the file name can be bound to a step using late binding to the execution context. + +The following example shows how to define late binding in XML: + +XML Configuration + +``` + + + +``` + +The following example shows how to define late binding in Java: + +Java Configuration + +``` +@Bean +public MultiResourceItemReader itemReader( + @Value("#{stepExecutionContext['fileName']}/*") Resource [] resources) { + return new MultiResourceItemReaderBuilder() + .delegate(fileReader()) + .name("itemReader") + .resources(resources) + .build(); +} +``` \ No newline at end of file diff --git a/docs/en/spring-batch/schema-appendix.md b/docs/en/spring-batch/schema-appendix.md new file mode 100644 index 0000000000000000000000000000000000000000..eecf926584e1f5a7d7ccf3d7eaa42cd340227ff9 --- /dev/null +++ b/docs/en/spring-batch/schema-appendix.md @@ -0,0 +1,389 @@ +# Meta-Data Schema + +## Appendix A: Meta-Data Schema + +### Overview + +The Spring Batch Metadata tables closely match the Domain objects that represent them in +Java. For example, `JobInstance`, `JobExecution`, `JobParameters`, and `StepExecution`map to `BATCH_JOB_INSTANCE`, `BATCH_JOB_EXECUTION`, `BATCH_JOB_EXECUTION_PARAMS`, and`BATCH_STEP_EXECUTION`, respectively. `ExecutionContext` maps to both`BATCH_JOB_EXECUTION_CONTEXT` and `BATCH_STEP_EXECUTION_CONTEXT`. The `JobRepository` is +responsible for saving and storing each Java object into its correct table. This appendix +describes the metadata tables in detail, along with many of the design decisions that +were made when creating them. When viewing the various table creation statements below, +it is important to realize that the data types used are as generic as possible. Spring +Batch provides many schemas as examples, all of which have varying data types, due to +variations in how individual database vendors handle data types. The following image +shows an ERD model of all 6 tables and their relationships to one another: + +![Spring Batch Meta-Data ERD](https://docs.spring.io/spring-batch/docs/current/reference/html/images/meta-data-erd.png) + +Figure 1. Spring Batch Meta-Data ERD + +#### Example DDL Scripts + +The Spring Batch Core JAR file contains example scripts to create the relational tables +for a number of database platforms (which are, in turn, auto-detected by the job +repository factory bean or namespace equivalent). These scripts can be used as is or +modified with additional indexes and constraints as desired. The file names are in the +form `schema-*.sql`, where "\*" is the short name of the target database platform. +The scripts are in the package `org.springframework.batch.core`. + +#### Migration DDL Scripts + +Spring Batch provides migration DDL scripts that you need to execute when you upgrade versions. +These scripts can be found in the Core Jar file under `org/springframework/batch/core/migration`. +Migration scripts are organized into folders corresponding to version numbers in which they were introduced: + +* `2.2`: contains scripts needed if you are migrating from a version before `2.2` to version `2.2` + +* `4.1`: contains scripts needed if you are migrating from a version before `4.1` to version `4.1` + +#### Version + +Many of the database tables discussed in this appendix contain a version column. This +column is important because Spring Batch employs an optimistic locking strategy when +dealing with updates to the database. This means that each time a record is 'touched' +(updated) the value in the version column is incremented by one. When the repository goes +back to save the value, if the version number has changed it throws an`OptimisticLockingFailureException`, indicating there has been an error with concurrent +access. This check is necessary, since, even though different batch jobs may be running +in different machines, they all use the same database tables. + +#### Identity + +`BATCH_JOB_INSTANCE`, `BATCH_JOB_EXECUTION`, and `BATCH_STEP_EXECUTION` each contain +columns ending in `_ID`. These fields act as primary keys for their respective tables. +However, they are not database generated keys. Rather, they are generated by separate +sequences. This is necessary because, after inserting one of the domain objects into the +database, the key it is given needs to be set on the actual object so that they can be +uniquely identified in Java. Newer database drivers (JDBC 3.0 and up) support this +feature with database-generated keys. However, rather than require that feature, +sequences are used. Each variation of the schema contains some form of the following +statements: + +``` +CREATE SEQUENCE BATCH_STEP_EXECUTION_SEQ; +CREATE SEQUENCE BATCH_JOB_EXECUTION_SEQ; +CREATE SEQUENCE BATCH_JOB_SEQ; +``` + +Many database vendors do not support sequences. In these cases, work-arounds are used, +such as the following statements for MySQL: + +``` +CREATE TABLE BATCH_STEP_EXECUTION_SEQ (ID BIGINT NOT NULL) type=InnoDB; +INSERT INTO BATCH_STEP_EXECUTION_SEQ values(0); +CREATE TABLE BATCH_JOB_EXECUTION_SEQ (ID BIGINT NOT NULL) type=InnoDB; +INSERT INTO BATCH_JOB_EXECUTION_SEQ values(0); +CREATE TABLE BATCH_JOB_SEQ (ID BIGINT NOT NULL) type=InnoDB; +INSERT INTO BATCH_JOB_SEQ values(0); +``` + +In the preceding case, a table is used in place of each sequence. The Spring core class,`MySQLMaxValueIncrementer`, then increments the one column in this sequence in order to +give similar functionality. + +### `BATCH_JOB_INSTANCE` + +The `BATCH_JOB_INSTANCE` table holds all information relevant to a `JobInstance`, and +serves as the top of the overall hierarchy. The following generic DDL statement is used +to create it: + +``` +CREATE TABLE BATCH_JOB_INSTANCE ( + JOB_INSTANCE_ID BIGINT PRIMARY KEY , + VERSION BIGINT, + JOB_NAME VARCHAR(100) NOT NULL , + JOB_KEY VARCHAR(2500) +); +``` + +The following list describes each column in the table: + +* `JOB_INSTANCE_ID`: The unique ID that identifies the instance. It is also the primary + key. The value of this column should be obtainable by calling the `getId` method on`JobInstance`. + +* `VERSION`: See [Version](#metaDataVersion). + +* `JOB_NAME`: Name of the job obtained from the `Job` object. Because it is required to + identify the instance, it must not be null. + +* `JOB_KEY`: A serialization of the `JobParameters` that uniquely identifies separate + instances of the same job from one another. (`JobInstances` with the same job name must + have different `JobParameters` and, thus, different `JOB_KEY` values). + +### `BATCH_JOB_EXECUTION_PARAMS` + +The `BATCH_JOB_EXECUTION_PARAMS` table holds all information relevant to the`JobParameters` object. It contains 0 or more key/value pairs passed to a `Job` and +serves as a record of the parameters with which a job was run. For each parameter that +contributes to the generation of a job’s identity, the `IDENTIFYING` flag is set to true. +Note that the table has been denormalized. Rather than creating a separate table for each +type, there is one table with a column indicating the type, as shown in the following +listing: + +``` +CREATE TABLE BATCH_JOB_EXECUTION_PARAMS ( + JOB_EXECUTION_ID BIGINT NOT NULL , + TYPE_CD VARCHAR(6) NOT NULL , + KEY_NAME VARCHAR(100) NOT NULL , + STRING_VAL VARCHAR(250) , + DATE_VAL DATETIME DEFAULT NULL , + LONG_VAL BIGINT , + DOUBLE_VAL DOUBLE PRECISION , + IDENTIFYING CHAR(1) NOT NULL , + constraint JOB_EXEC_PARAMS_FK foreign key (JOB_EXECUTION_ID) + references BATCH_JOB_EXECUTION(JOB_EXECUTION_ID) +); +``` + +The following list describes each column: + +* `JOB_EXECUTION_ID`: Foreign key from the `BATCH_JOB_EXECUTION` table that indicates the + job execution to which the parameter entry belongs. Note that multiple rows (that is, + key/value pairs) may exist for each execution. + +* TYPE\_CD: String representation of the type of value stored, which can be a string, a + date, a long, or a double. Because the type must be known, it cannot be null. + +* KEY\_NAME: The parameter key. + +* STRING\_VAL: Parameter value, if the type is string. + +* DATE\_VAL: Parameter value, if the type is date. + +* LONG\_VAL: Parameter value, if the type is long. + +* DOUBLE\_VAL: Parameter value, if the type is double. + +* IDENTIFYING: Flag indicating whether the parameter contributed to the identity of the + related `JobInstance`. + +Note that there is no primary key for this table. This is because the framework has no +use for one and, thus, does not require it. If need be, you can add a primary key may be +added with a database generated key without causing any issues to the framework itself. + +### `BATCH_JOB_EXECUTION` + +The `BATCH_JOB_EXECUTION` table holds all information relevant to the `JobExecution`object. Every time a `Job` is run, there is always a new `JobExecution`, and a new row in +this table. The following listing shows the definition of the `BATCH_JOB_EXECUTION`table: + +``` +CREATE TABLE BATCH_JOB_EXECUTION ( + JOB_EXECUTION_ID BIGINT PRIMARY KEY , + VERSION BIGINT, + JOB_INSTANCE_ID BIGINT NOT NULL, + CREATE_TIME TIMESTAMP NOT NULL, + START_TIME TIMESTAMP DEFAULT NULL, + END_TIME TIMESTAMP DEFAULT NULL, + STATUS VARCHAR(10), + EXIT_CODE VARCHAR(20), + EXIT_MESSAGE VARCHAR(2500), + LAST_UPDATED TIMESTAMP, + JOB_CONFIGURATION_LOCATION VARCHAR(2500) NULL, + constraint JOB_INSTANCE_EXECUTION_FK foreign key (JOB_INSTANCE_ID) + references BATCH_JOB_INSTANCE(JOB_INSTANCE_ID) +) ; +``` + +The following list describes each column: + +* `JOB_EXECUTION_ID`: Primary key that uniquely identifies this execution. The value of + this column is obtainable by calling the `getId` method of the `JobExecution` object. + +* `VERSION`: See [Version](#metaDataVersion). + +* `JOB_INSTANCE_ID`: Foreign key from the `BATCH_JOB_INSTANCE` table. It indicates the + instance to which this execution belongs. There may be more than one execution per + instance. + +* `CREATE_TIME`: Timestamp representing the time when the execution was created. + +* `START_TIME`: Timestamp representing the time when the execution was started. + +* `END_TIME`: Timestamp representing the time when the execution finished, regardless of + success or failure. An empty value in this column when the job is not currently running + indicates that there has been some type of error and the framework was unable to perform + a last save before failing. + +* `STATUS`: Character string representing the status of the execution. This may be`COMPLETED`, `STARTED`, and others. The object representation of this column is the`BatchStatus` enumeration. + +* `EXIT_CODE`: Character string representing the exit code of the execution. In the case + of a command-line job, this may be converted into a number. + +* `EXIT_MESSAGE`: Character string representing a more detailed description of how the + job exited. In the case of failure, this might include as much of the stack trace as is + possible. + +* `LAST_UPDATED`: Timestamp representing the last time this execution was persisted. + +### `BATCH_STEP_EXECUTION` + +The BATCH\_STEP\_EXECUTION table holds all information relevant to the `StepExecution`object. This table is similar in many ways to the `BATCH_JOB_EXECUTION` table, and there +is always at least one entry per `Step` for each `JobExecution` created. The following +listing shows the definition of the `BATCH_STEP_EXECUTION` table: + +``` +CREATE TABLE BATCH_STEP_EXECUTION ( + STEP_EXECUTION_ID BIGINT PRIMARY KEY , + VERSION BIGINT NOT NULL, + STEP_NAME VARCHAR(100) NOT NULL, + JOB_EXECUTION_ID BIGINT NOT NULL, + START_TIME TIMESTAMP NOT NULL , + END_TIME TIMESTAMP DEFAULT NULL, + STATUS VARCHAR(10), + COMMIT_COUNT BIGINT , + READ_COUNT BIGINT , + FILTER_COUNT BIGINT , + WRITE_COUNT BIGINT , + READ_SKIP_COUNT BIGINT , + WRITE_SKIP_COUNT BIGINT , + PROCESS_SKIP_COUNT BIGINT , + ROLLBACK_COUNT BIGINT , + EXIT_CODE VARCHAR(20) , + EXIT_MESSAGE VARCHAR(2500) , + LAST_UPDATED TIMESTAMP, + constraint JOB_EXECUTION_STEP_FK foreign key (JOB_EXECUTION_ID) + references BATCH_JOB_EXECUTION(JOB_EXECUTION_ID) +) ; +``` + +The following list describes for each column: + +* `STEP_EXECUTION_ID`: Primary key that uniquely identifies this execution. The value of + this column should be obtainable by calling the `getId` method of the `StepExecution`object. + +* `VERSION`: See [Version](#metaDataVersion). + +* `STEP_NAME`: The name of the step to which this execution belongs. + +* `JOB_EXECUTION_ID`: Foreign key from the `BATCH_JOB_EXECUTION` table. It indicates the`JobExecution` to which this `StepExecution` belongs. There may be only one`StepExecution` for a given `JobExecution` for a given `Step` name. + +* `START_TIME`: Timestamp representing the time when the execution was started. + +* `END_TIME`: Timestamp representing the time the when execution was finished, regardless + of success or failure. An empty value in this column, even though the job is not + currently running, indicates that there has been some type of error and the framework was + unable to perform a last save before failing. + +* `STATUS`: Character string representing the status of the execution. This may be`COMPLETED`, `STARTED`, and others. The object representation of this column is the`BatchStatus` enumeration. + +* `COMMIT_COUNT`: The number of times in which the step has committed a transaction + during this execution. + +* `READ_COUNT`: The number of items read during this execution. + +* `FILTER_COUNT`: The number of items filtered out of this execution. + +* `WRITE_COUNT`: The number of items written and committed during this execution. + +* `READ_SKIP_COUNT`: The number of items skipped on read during this execution. + +* `WRITE_SKIP_COUNT`: The number of items skipped on write during this execution. + +* `PROCESS_SKIP_COUNT`: The number of items skipped during processing during this + execution. + +* `ROLLBACK_COUNT`: The number of rollbacks during this execution. Note that this count + includes each time rollback occurs, including rollbacks for retry and those in the skip + recovery procedure. + +* `EXIT_CODE`: Character string representing the exit code of the execution. In the case + of a command-line job, this may be converted into a number. + +* `EXIT_MESSAGE`: Character string representing a more detailed description of how the + job exited. In the case of failure, this might include as much of the stack trace as is + possible. + +* `LAST_UPDATED`: Timestamp representing the last time this execution was persisted. + +### `BATCH_JOB_EXECUTION_CONTEXT` + +The `BATCH_JOB_EXECUTION_CONTEXT` table holds all information relevant to the`ExecutionContext` of a `Job`. There is exactly one `Job` `ExecutionContext` per`JobExecution`, and it contains all of the job-level data that is needed for a particular +job execution. This data typically represents the state that must be retrieved after a +failure, so that a `JobInstance` can "start from where it left off". The following +listing shows the definition of the `BATCH_JOB_EXECUTION_CONTEXT` table: + +``` +CREATE TABLE BATCH_JOB_EXECUTION_CONTEXT ( + JOB_EXECUTION_ID BIGINT PRIMARY KEY, + SHORT_CONTEXT VARCHAR(2500) NOT NULL, + SERIALIZED_CONTEXT CLOB, + constraint JOB_EXEC_CTX_FK foreign key (JOB_EXECUTION_ID) + references BATCH_JOB_EXECUTION(JOB_EXECUTION_ID) +) ; +``` + +The following list describes each column: + +* `JOB_EXECUTION_ID`: Foreign key representing the `JobExecution` to which the context + belongs. There may be more than one row associated with a given execution. + +* `SHORT_CONTEXT`: A string version of the `SERIALIZED_CONTEXT`. + +* `SERIALIZED_CONTEXT`: The entire context, serialized. + +### `BATCH_STEP_EXECUTION_CONTEXT` + +The `BATCH_STEP_EXECUTION_CONTEXT` table holds all information relevant to the`ExecutionContext` of a `Step`. There is exactly one `ExecutionContext` per`StepExecution`, and it contains all of the data that +needs to be persisted for a particular step execution. This data typically represents the +state that must be retrieved after a failure, so that a `JobInstance` can 'start from +where it left off'. The following listing shows the definition of the`BATCH_STEP_EXECUTION_CONTEXT` table: + +``` +CREATE TABLE BATCH_STEP_EXECUTION_CONTEXT ( + STEP_EXECUTION_ID BIGINT PRIMARY KEY, + SHORT_CONTEXT VARCHAR(2500) NOT NULL, + SERIALIZED_CONTEXT CLOB, + constraint STEP_EXEC_CTX_FK foreign key (STEP_EXECUTION_ID) + references BATCH_STEP_EXECUTION(STEP_EXECUTION_ID) +) ; +``` + +The following list describes each column: + +* `STEP_EXECUTION_ID`: Foreign key representing the `StepExecution` to which the context + belongs. There may be more than one row associated to a given execution. + +* `SHORT_CONTEXT`: A string version of the `SERIALIZED_CONTEXT`. + +* `SERIALIZED_CONTEXT`: The entire context, serialized. + +### Archiving + +Because there are entries in multiple tables every time a batch job is run, it is common +to create an archive strategy for the metadata tables. The tables themselves are designed +to show a record of what happened in the past and generally do not affect the run of any +job, with a few notable exceptions pertaining to restart: + +* The framework uses the metadata tables to determine whether a particular `JobInstance`has been run before. If it has been run and if the job is not restartable, then an + exception is thrown. + +* If an entry for a `JobInstance` is removed without having completed successfully, the + framework thinks that the job is new rather than a restart. + +* If a job is restarted, the framework uses any data that has been persisted to the`ExecutionContext` to restore the `Job’s` state. Therefore, removing any entries from + this table for jobs that have not completed successfully prevents them from starting at + the correct point if run again. + +### International and Multi-byte Characters + +If you are using multi-byte character sets (such as Chinese or Cyrillic) in your business +processing, then those characters might need to be persisted in the Spring Batch schema. +Many users find that simply changing the schema to double the length of the `VARCHAR`columns is enough. Others prefer to configure the[JobRepository](job.html#configuringJobRepository) with `max-varchar-length` half the +value of the `VARCHAR` column length. Some users have also reported that they use`NVARCHAR` in place of `VARCHAR` in their schema definitions. The best result depends on +the database platform and the way the database server has been configured locally. + +### Recommendations for Indexing Meta Data Tables + +Spring Batch provides DDL samples for the metadata tables in the core jar file for +several common database platforms. Index declarations are not included in that DDL, +because there are too many variations in how users may want to index, depending on their +precise platform, local conventions, and the business requirements of how the jobs are +operated. The following below provides some indication as to which columns are going to +be used in a `WHERE` clause by the DAO implementations provided by Spring Batch and how +frequently they might be used, so that individual projects can make up their own minds +about indexing: + +| Default Table Name | Where Clause | Frequency | +|----------------------|-----------------------------------------|-------------------------------------------------------------------| +| BATCH\_JOB\_INSTANCE | JOB\_NAME = ? and JOB\_KEY = ? | Every time a job is launched | +|BATCH\_JOB\_EXECUTION | JOB\_INSTANCE\_ID = ? | Every time a job is restarted | +|BATCH\_STEP\_EXECUTION| VERSION = ? |On commit interval, a.k.a. chunk (and at start and end of
step)| +|BATCH\_STEP\_EXECUTION|STEP\_NAME = ? and JOB\_EXECUTION\_ID = ?| Before each step execution | \ No newline at end of file diff --git a/docs/en/spring-batch/spring-batch-integration.md b/docs/en/spring-batch/spring-batch-integration.md new file mode 100644 index 0000000000000000000000000000000000000000..2c1f98547278d23f0adee7711d4b3f26282b9a5b --- /dev/null +++ b/docs/en/spring-batch/spring-batch-integration.md @@ -0,0 +1,1249 @@ +# Spring Batch Integration + +## Spring Batch Integration + +XMLJavaBoth + +### Spring Batch Integration Introduction + +Many users of Spring Batch may encounter requirements that are +outside the scope of Spring Batch but that may be efficiently and +concisely implemented by using Spring Integration. Conversely, Spring +Integration users may encounter Spring Batch requirements and need a way +to efficiently integrate both frameworks. In this context, several +patterns and use-cases emerge, and Spring Batch Integration +addresses those requirements. + +The line between Spring Batch and Spring Integration is not always +clear, but two pieces of advice can +help: Think about granularity, and apply common patterns. Some +of those common patterns are described in this reference manual +section. + +Adding messaging to a batch process enables automation of +operations and also separation and strategizing of key concerns. +For example, a message might trigger a job to execute, and then the +sending of the message can be exposed in a variety of ways. Alternatively, when +a job completes or fails, that event might trigger a message to be sent, +and the consumers of those messages might have operational concerns +that have nothing to do with the application itself. Messaging can +also be embedded in a job (for example reading or writing items for +processing via channels). Remote partitioning and remote chunking +provide methods to distribute workloads over a number of workers. + +This section covers the following key concepts: + +* [Namespace Support](#namespace-support) + +* [Launching Batch Jobs through Messages](#launching-batch-jobs-through-messages) + +* [Providing Feedback with Informational Messages](#providing-feedback-with-informational-messages) + +* [Asynchronous Processors](#asynchronous-processors) + +* [Externalizing + Batch Process Execution](#externalizing-batch-process-execution) + +#### Namespace Support + +Since Spring Batch Integration 1.3, dedicated XML Namespace +support was added, with the aim to provide an easier configuration +experience. In order to activate the namespace, add the following +namespace declarations to your Spring XML Application Context +file: + +``` + + + ... + + +``` + +A fully configured Spring XML Application Context file for Spring +Batch Integration may look like the following: + +``` + + + ... + + +``` + +Appending version numbers to the referenced XSD file is also +allowed, but, as a version-less declaration always uses the +latest schema, we generally do not recommend appending the version +number to the XSD name. Adding a version number +could possibly create issues when updating the Spring Batch +Integration dependencies, as they may require more recent versions +of the XML schema. + +#### Launching Batch Jobs through Messages + +When starting batch jobs by using the core Spring Batch API, you +basically have 2 options: + +* From the command line, with the `CommandLineJobRunner` + +* Programmatically, with either `JobOperator.start()` or `JobLauncher.run()` + +For example, you may want to use the`CommandLineJobRunner` when invoking Batch Jobs by +using a shell script. Alternatively, you may use the`JobOperator` directly (for example, when using +Spring Batch as part of a web application). However, what about +more complex use cases? Maybe you need to poll a remote (S)FTP +server to retrieve the data for the Batch Job or your application +has to support multiple different data sources simultaneously. For +example, you may receive data files not only from the web, but also from +FTP and other sources. Maybe additional transformation of the input files is +needed before invoking Spring Batch. + +Therefore, it would be much more powerful to execute the batch job +using Spring Integration and its numerous adapters. For example, +you can use a *File Inbound Channel Adapter* to +monitor a directory in the file-system and start the Batch Job as +soon as the input file arrives. Additionally, you can create Spring +Integration flows that use multiple different adapters to easily +ingest data for your batch jobs from multiple sources +simultaneously using only configuration. Implementing all these +scenarios with Spring Integration is easy, as it allows for +decoupled, event-driven execution of the`JobLauncher`. + +Spring Batch Integration provides the`JobLaunchingMessageHandler` class that you can +use to launch batch jobs. The input for the`JobLaunchingMessageHandler` is provided by a +Spring Integration message, which has a payload of type`JobLaunchRequest`. This class is a wrapper around the `Job`that needs to be launched and around the `JobParameters`necessary to launch the Batch job. + +The following image illustrates the typical Spring Integration +message flow in order to start a Batch job. The[EIP (Enterprise Integration Patterns) website](https://www.enterpriseintegrationpatterns.com/toc.html)provides a full overview of messaging icons and their descriptions. + +![Launch Batch Job](https://docs.spring.io/spring-batch/docs/current/reference/html/images/launch-batch-job.png) + +Figure 1. Launch Batch Job + +##### Transforming a file into a JobLaunchRequest + +``` +package io.spring.sbi; + +import org.springframework.batch.core.Job; +import org.springframework.batch.core.JobParametersBuilder; +import org.springframework.batch.integration.launch.JobLaunchRequest; +import org.springframework.integration.annotation.Transformer; +import org.springframework.messaging.Message; + +import java.io.File; + +public class FileMessageToJobRequest { + private Job job; + private String fileParameterName; + + public void setFileParameterName(String fileParameterName) { + this.fileParameterName = fileParameterName; + } + + public void setJob(Job job) { + this.job = job; + } + + @Transformer + public JobLaunchRequest toRequest(Message message) { + JobParametersBuilder jobParametersBuilder = + new JobParametersBuilder(); + + jobParametersBuilder.addString(fileParameterName, + message.getPayload().getAbsolutePath()); + + return new JobLaunchRequest(job, jobParametersBuilder.toJobParameters()); + } +} +``` + +##### The `JobExecution` Response + +When a batch job is being executed, a`JobExecution` instance is returned. This +instance can be used to determine the status of an execution. If +a `JobExecution` is able to be created +successfully, it is always returned, regardless of whether +or not the actual execution is successful. + +The exact behavior on how the `JobExecution`instance is returned depends on the provided`TaskExecutor`. If a`synchronous` (single-threaded)`TaskExecutor` implementation is used, the`JobExecution` response is returned only`after` the job completes. When using an`asynchronous``TaskExecutor`, the`JobExecution` instance is returned +immediately. Users can then take the `id` of`JobExecution` instance +(with `JobExecution.getJobId()`) and query the`JobRepository` for the job’s updated status +using the `JobExplorer`. For more +information, please refer to the Spring +Batch reference documentation on[Querying the Repository](job.html#queryingRepository). + +##### Spring Batch Integration Configuration + +Consider a case where someone needs to create a file `inbound-channel-adapter` to listen +for CSV files in the provided directory, hand them off to a transformer +(`FileMessageToJobRequest`), launch the job through the *Job Launching Gateway*, and then +log the output of the `JobExecution` with the `logging-channel-adapter`. + +The following example shows how that common case can be configured in XML: + +XML Configuration + +``` + + + + + + + + + + + + + + + + + + +``` + +The following example shows how that common case can be configured in Java: + +Java Configuration + +``` +@Bean +public FileMessageToJobRequest fileMessageToJobRequest() { + FileMessageToJobRequest fileMessageToJobRequest = new FileMessageToJobRequest(); + fileMessageToJobRequest.setFileParameterName("input.file.name"); + fileMessageToJobRequest.setJob(personJob()); + return fileMessageToJobRequest; +} + +@Bean +public JobLaunchingGateway jobLaunchingGateway() { + SimpleJobLauncher simpleJobLauncher = new SimpleJobLauncher(); + simpleJobLauncher.setJobRepository(jobRepository); + simpleJobLauncher.setTaskExecutor(new SyncTaskExecutor()); + JobLaunchingGateway jobLaunchingGateway = new JobLaunchingGateway(simpleJobLauncher); + + return jobLaunchingGateway; +} + +@Bean +public IntegrationFlow integrationFlow(JobLaunchingGateway jobLaunchingGateway) { + return IntegrationFlows.from(Files.inboundAdapter(new File("/tmp/myfiles")). + filter(new SimplePatternFileListFilter("*.csv")), + c -> c.poller(Pollers.fixedRate(1000).maxMessagesPerPoll(1))). + transform(fileMessageToJobRequest()). + handle(jobLaunchingGateway). + log(LoggingHandler.Level.WARN, "headers.id + ': ' + payload"). + get(); +} +``` + +##### Example ItemReader Configuration + +Now that we are polling for files and launching jobs, we need to configure our Spring +Batch `ItemReader` (for example) to use the files found at the location defined by the job +parameter called "input.file.name", as shown in the following bean configuration: + +The following XML example shows the necessary bean configuration: + +XML Configuration + +``` + + + ... + +``` + +The following Java example shows the necessary bean configuration: + +Java Configuration + +``` +@Bean +@StepScope +public ItemReader sampleReader(@Value("#{jobParameters[input.file.name]}") String resource) { +... + FlatFileItemReader flatFileItemReader = new FlatFileItemReader(); + flatFileItemReader.setResource(new FileSystemResource(resource)); +... + return flatFileItemReader; +} +``` + +The main points of interest in the preceding example are injecting the value of`#{jobParameters['input.file.name']}`as the Resource property value and setting the `ItemReader` bean +to have *Step scope*. Setting the bean to have Step scope takes advantage of +the late binding support, which allows access to the`jobParameters` variable. + +### Available Attributes of the Job-Launching Gateway + +The job-launching gateway has the following attributes that you can set to control a job: + +* `id`: Identifies the underlying Spring bean definition, which is an instance of either: + + * `EventDrivenConsumer` + + * `PollingConsumer`(The exact implementation depends on whether the component’s input channel is a`SubscribableChannel` or `PollableChannel`.) + +* `auto-startup`: Boolean flag to indicate that the endpoint should start automatically on + startup. The default is *true*. + +* `request-channel`: The input `MessageChannel` of this endpoint. + +* `reply-channel`: `MessageChannel` to which the resulting `JobExecution` payload is sent. + +* `reply-timeout`: Lets you specify how long (in milliseconds) this gateway waits for the reply message + to be sent successfully to the reply channel before throwing + an exception. This attribute only applies when the channel + might block (for example, when using a bounded queue channel + that is currently full). Also, keep in mind that, when sending to a`DirectChannel`, the invocation occurs + in the sender’s thread. Therefore, the failing of the send + operation may be caused by other components further downstream. + The `reply-timeout` attribute maps to the`sendTimeout` property of the underlying`MessagingTemplate` instance. If not specified, the attribute + defaults to\-1\, + meaning that, by default, the `Gateway` waits indefinitely. + +* `job-launcher`: Optional. Accepts a + custom`JobLauncher`bean reference. + If not specified the adapter + re-uses the instance that is registered under the `id` of`jobLauncher`. If no default instance + exists, an exception is thrown. + +* `order`: Specifies the order of invocation when this endpoint is connected as a subscriber + to a `SubscribableChannel`. + +### Sub-Elements + +When this `Gateway` is receiving messages from a`PollableChannel`, you must either provide +a global default `Poller` or provide a `Poller` sub-element to the`Job Launching Gateway`. + +The following example shows how to provide a poller in XML: + +XML Configuration + +``` + + + +``` + +The following example shows how to provide a poller in Java: + +Java Configuration + +``` +@Bean +@ServiceActivator(inputChannel = "queueChannel", poller = @Poller(fixedRate="1000")) +public JobLaunchingGateway sampleJobLaunchingGateway() { + JobLaunchingGateway jobLaunchingGateway = new JobLaunchingGateway(jobLauncher()); + jobLaunchingGateway.setOutputChannel(replyChannel()); + return jobLaunchingGateway; +} +``` + +#### Providing Feedback with Informational Messages + +As Spring Batch jobs can run for long times, providing progress +information is often critical. For example, stake-holders may want +to be notified if some or all parts of a batch job have failed. +Spring Batch provides support for this information being gathered +through: + +* Active polling + +* Event-driven listeners + +When starting a Spring Batch job asynchronously (for example, by using the `Job Launching +Gateway`), a `JobExecution` instance is returned. Thus, `JobExecution.getJobId()` can be +used to continuously poll for status updates by retrieving updated instances of the`JobExecution` from the `JobRepository` by using the `JobExplorer`. However, this is +considered sub-optimal, and an event-driven approach should be preferred. + +Therefore, Spring Batch provides listeners, including the three most commonly used +listeners: + +* `StepListener` + +* `ChunkListener` + +* `JobExecutionListener` + +In the example shown in the following image, a Spring Batch job has been configured with a`StepExecutionListener`. Thus, Spring Integration receives and processes any step before +or after events. For example, the received `StepExecution` can be inspected by using a`Router`. Based on the results of that inspection, various things can occur (such as +routing a message to a Mail Outbound Channel Adapter), so that an Email notification can +be sent out based on some condition. + +![Handling Informational Messages](https://docs.spring.io/spring-batch/docs/current/reference/html/images/handling-informational-messages.png) + +Figure 2. Handling Informational Messages + +The following two-part example shows how a listener is configured to send a +message to a `Gateway` for a `StepExecution` events and log its output to a`logging-channel-adapter`. + +First, create the notification integration beans. + +The following example shows the how to create the notification integration beans in XML: + +XML Configuration + +``` + + + + + +``` + +The following example shows the how to create the notification integration beans in Java: + +Java Configuration + +``` +@Bean +@ServiceActivator(inputChannel = "stepExecutionsChannel") +public LoggingHandler loggingHandler() { + LoggingHandler adapter = new LoggingHandler(LoggingHandler.Level.WARN); + adapter.setLoggerName("TEST_LOGGER"); + adapter.setLogExpressionString("headers.id + ': ' + payload"); + return adapter; +} + +@MessagingGateway(name = "notificationExecutionsListener", defaultRequestChannel = "stepExecutionsChannel") +public interface NotificationExecutionListener extends StepExecutionListener {} +``` + +| |You need to add the `@IntegrationComponentScan` annotation to your configuration.| +|---|---------------------------------------------------------------------------------| + +Second, modify your job to add a step-level listener. + +The following example shows the how to add a step-level listener in XML: + +XML Configuration + +``` + + + + + + + + + ... + + +``` + +The following example shows the how to add a step-level listener in Java: + +Java Configuration + +``` +public Job importPaymentsJob() { + return jobBuilderFactory.get("importPayments") + .start(stepBuilderFactory.get("step1") + .chunk(200) + .listener(notificationExecutionsListener()) + ... +} +``` + +#### Asynchronous Processors + +Asynchronous Processors help you to scale the processing of items. In the asynchronous +processor use case, an `AsyncItemProcessor` serves as a dispatcher, executing the logic of +the `ItemProcessor` for an item on a new thread. Once the item completes, the `Future` is +passed to the `AsynchItemWriter` to be written. + +Therefore, you can increase performance by using asynchronous item processing, basically +letting you implement *fork-join* scenarios. The `AsyncItemWriter` gathers the results and +writes back the chunk as soon as all the results become available. + +The following example shows how to configuration the `AsyncItemProcessor` in XML: + +XML Configuration + +``` + + + + + + + + +``` + +The following example shows how to configuration the `AsyncItemProcessor` in XML: + +Java Configuration + +``` +@Bean +public AsyncItemProcessor processor(ItemProcessor itemProcessor, TaskExecutor taskExecutor) { + AsyncItemProcessor asyncItemProcessor = new AsyncItemProcessor(); + asyncItemProcessor.setTaskExecutor(taskExecutor); + asyncItemProcessor.setDelegate(itemProcessor); + return asyncItemProcessor; +} +``` + +The `delegate` property refers to your `ItemProcessor` bean, and the `taskExecutor`property refers to the `TaskExecutor` of your choice. + +The following example shows how to configure the `AsyncItemWriter` in XML: + +XML Configuration + +``` + + + + + +``` + +The following example shows how to configure the `AsyncItemWriter` in Java: + +Java Configuration + +``` +@Bean +public AsyncItemWriter writer(ItemWriter itemWriter) { + AsyncItemWriter asyncItemWriter = new AsyncItemWriter(); + asyncItemWriter.setDelegate(itemWriter); + return asyncItemWriter; +} +``` + +Again, the `delegate` property is +actually a reference to your `ItemWriter` bean. + +#### Externalizing Batch Process Execution + +The integration approaches discussed so far suggest use cases +where Spring Integration wraps Spring Batch like an outer-shell. +However, Spring Batch can also use Spring Integration internally. +Using this approach, Spring Batch users can delegate the +processing of items or even chunks to outside processes. This +allows you to offload complex processing. Spring Batch Integration +provides dedicated support for: + +* Remote Chunking + +* Remote Partitioning + +##### Remote Chunking + +![Remote Chunking](https://docs.spring.io/spring-batch/docs/current/reference/html/images/remote-chunking-sbi.png) + +Figure 3. Remote Chunking + +Taking things one step further, one can also externalize the +chunk processing by using the`ChunkMessageChannelItemWriter`(provided by Spring Batch Integration), which sends items out +and collects the result. Once sent, Spring Batch continues the +process of reading and grouping items, without waiting for the results. +Rather, it is the responsibility of the `ChunkMessageChannelItemWriter`to gather the results and integrate them back into the Spring Batch process. + +With Spring Integration, you have full +control over the concurrency of your processes (for instance, by +using a `QueueChannel` instead of a`DirectChannel`). Furthermore, by relying on +Spring Integration’s rich collection of Channel Adapters (such as +JMS and AMQP), you can distribute chunks of a Batch job to +external systems for processing. + +A job with a step to be remotely chunked might have a configuration similar to the +following in XML: + +XML Configuration + +``` + + + + + + ... + + +``` + +A job with a step to be remotely chunked might have a configuration similar to the +following in Java: + +Java Configuration + +``` +public Job chunkJob() { + return jobBuilderFactory.get("personJob") + .start(stepBuilderFactory.get("step1") + .chunk(200) + .reader(itemReader()) + .writer(itemWriter()) + .build()) + .build(); + } +``` + +The `ItemReader` reference points to the bean you want to use for reading data on the +manager. The `ItemWriter` reference points to a special `ItemWriter` (called`ChunkMessageChannelItemWriter`), as described above. The processor (if any) is left off +the manager configuration, as it is configured on the worker. You should check any +additional component properties, such as throttle limits and so on, when implementing +your use case. + +The following XML configuration provides a basic manager setup: + +XML Configuration + +``` + + + + + + + + + + + + + + + + + + + + + +``` + +The following Java configuration provides a basic manager setup: + +Java Configuration + +``` +@Bean +public org.apache.activemq.ActiveMQConnectionFactory connectionFactory() { + ActiveMQConnectionFactory factory = new ActiveMQConnectionFactory(); + factory.setBrokerURL("tcp://localhost:61616"); + return factory; +} + +/* + * Configure outbound flow (requests going to workers) + */ +@Bean +public DirectChannel requests() { + return new DirectChannel(); +} + +@Bean +public IntegrationFlow outboundFlow(ActiveMQConnectionFactory connectionFactory) { + return IntegrationFlows + .from(requests()) + .handle(Jms.outboundAdapter(connectionFactory).destination("requests")) + .get(); +} + +/* + * Configure inbound flow (replies coming from workers) + */ +@Bean +public QueueChannel replies() { + return new QueueChannel(); +} + +@Bean +public IntegrationFlow inboundFlow(ActiveMQConnectionFactory connectionFactory) { + return IntegrationFlows + .from(Jms.messageDrivenChannelAdapter(connectionFactory).destination("replies")) + .channel(replies()) + .get(); +} + +/* + * Configure the ChunkMessageChannelItemWriter + */ +@Bean +public ItemWriter itemWriter() { + MessagingTemplate messagingTemplate = new MessagingTemplate(); + messagingTemplate.setDefaultChannel(requests()); + messagingTemplate.setReceiveTimeout(2000); + ChunkMessageChannelItemWriter chunkMessageChannelItemWriter + = new ChunkMessageChannelItemWriter<>(); + chunkMessageChannelItemWriter.setMessagingOperations(messagingTemplate); + chunkMessageChannelItemWriter.setReplyChannel(replies()); + return chunkMessageChannelItemWriter; +} +``` + +The preceding configuration provides us with a number of beans. We +configure our messaging middleware using ActiveMQ and the +inbound/outbound JMS adapters provided by Spring Integration. As +shown, our `itemWriter` bean, which is +referenced by our job step, uses the`ChunkMessageChannelItemWriter` for writing chunks over the +configured middleware. + +Now we can move on to the worker configuration, as shown in the following example: + +The following example shows the worker configuration in XML: + +XML Configuration + +``` + + + + + + + + + + + + + + + + + + + + + + + + + + +``` + +The following example shows the worker configuration in Java: + +Java Configuration + +``` +@Bean +public org.apache.activemq.ActiveMQConnectionFactory connectionFactory() { + ActiveMQConnectionFactory factory = new ActiveMQConnectionFactory(); + factory.setBrokerURL("tcp://localhost:61616"); + return factory; +} + +/* + * Configure inbound flow (requests coming from the manager) + */ +@Bean +public DirectChannel requests() { + return new DirectChannel(); +} + +@Bean +public IntegrationFlow inboundFlow(ActiveMQConnectionFactory connectionFactory) { + return IntegrationFlows + .from(Jms.messageDrivenChannelAdapter(connectionFactory).destination("requests")) + .channel(requests()) + .get(); +} + +/* + * Configure outbound flow (replies going to the manager) + */ +@Bean +public DirectChannel replies() { + return new DirectChannel(); +} + +@Bean +public IntegrationFlow outboundFlow(ActiveMQConnectionFactory connectionFactory) { + return IntegrationFlows + .from(replies()) + .handle(Jms.outboundAdapter(connectionFactory).destination("replies")) + .get(); +} + +/* + * Configure the ChunkProcessorChunkHandler + */ +@Bean +@ServiceActivator(inputChannel = "requests", outputChannel = "replies") +public ChunkProcessorChunkHandler chunkProcessorChunkHandler() { + ChunkProcessor chunkProcessor + = new SimpleChunkProcessor<>(itemProcessor(), itemWriter()); + ChunkProcessorChunkHandler chunkProcessorChunkHandler + = new ChunkProcessorChunkHandler<>(); + chunkProcessorChunkHandler.setChunkProcessor(chunkProcessor); + return chunkProcessorChunkHandler; +} +``` + +Most of these configuration items should look familiar from the +manager configuration. Workers do not need access to +the Spring Batch `JobRepository` nor +to the actual job configuration file. The main bean of interest +is the `chunkProcessorChunkHandler`. The`chunkProcessor` property of `ChunkProcessorChunkHandler` takes a +configured `SimpleChunkProcessor`, which is where you would provide a reference to your`ItemWriter` (and, optionally, your`ItemProcessor`) that will run on the worker +when it receives chunks from the manager. + +For more information, see the section of the "Scalability" chapter on[Remote Chunking](https://docs.spring.io/spring-batch/docs/current/reference/html/scalability.html#remoteChunking). + +Starting from version 4.1, Spring Batch Integration introduces the `@EnableBatchIntegration`annotation that can be used to simplify a remote chunking setup. This annotation provides +two beans that can be autowired in the application context: + +* `RemoteChunkingManagerStepBuilderFactory`: used to configure the manager step + +* `RemoteChunkingWorkerBuilder`: used to configure the remote worker integration flow + +These APIs take care of configuring a number of components as described in the following diagram: + +![Remote Chunking Configuration](https://docs.spring.io/spring-batch/docs/current/reference/html/images/remote-chunking-config.png) + +Figure 4. Remote Chunking Configuration + +On the manager side, the `RemoteChunkingManagerStepBuilderFactory` lets you +configure a manager step by declaring: + +* the item reader to read items and send them to workers + +* the output channel ("Outgoing requests") to send requests to workers + +* the input channel ("Incoming replies") to receive replies from workers + +A `ChunkMessageChannelItemWriter` and the `MessagingTemplate` are not needed to be explicitly configured +(Those can still be explicitly configured if required). + +On the worker side, the `RemoteChunkingWorkerBuilder` allows you to configure a worker to: + +* listen to requests sent by the manager on the input channel ("Incoming requests") + +* call the `handleChunk` method of `ChunkProcessorChunkHandler` for each request + with the configured `ItemProcessor` and `ItemWriter` + +* send replies on the output channel ("Outgoing replies") to the manager + +There is no need to explicitly configure the `SimpleChunkProcessor`and the `ChunkProcessorChunkHandler` (Those can be explicitly configured if required). + +The following example shows how to use these APIs: + +``` +@EnableBatchIntegration +@EnableBatchProcessing +public class RemoteChunkingJobConfiguration { + + @Configuration + public static class ManagerConfiguration { + + @Autowired + private RemoteChunkingManagerStepBuilderFactory managerStepBuilderFactory; + + @Bean + public TaskletStep managerStep() { + return this.managerStepBuilderFactory.get("managerStep") + .chunk(100) + .reader(itemReader()) + .outputChannel(requests()) // requests sent to workers + .inputChannel(replies()) // replies received from workers + .build(); + } + + // Middleware beans setup omitted + + } + + @Configuration + public static class WorkerConfiguration { + + @Autowired + private RemoteChunkingWorkerBuilder workerBuilder; + + @Bean + public IntegrationFlow workerFlow() { + return this.workerBuilder + .itemProcessor(itemProcessor()) + .itemWriter(itemWriter()) + .inputChannel(requests()) // requests received from the manager + .outputChannel(replies()) // replies sent to the manager + .build(); + } + + // Middleware beans setup omitted + + } + +} +``` + +You can find a complete example of a remote chunking job[here](https://github.com/spring-projects/spring-batch/tree/master/spring-batch-samples#remote-chunking-sample). + +##### Remote Partitioning + +![Remote Partitioning](https://docs.spring.io/spring-batch/docs/current/reference/html/images/remote-partitioning.png) + +Figure 5. Remote Partitioning + +Remote Partitioning, on the other hand, is useful when it +is not the processing of items but rather the associated I/O that +causes the bottleneck. Using Remote Partitioning, work can +be farmed out to workers that execute complete Spring Batch +steps. Thus, each worker has its own `ItemReader`, `ItemProcessor`, and`ItemWriter`. For this purpose, Spring Batch +Integration provides the `MessageChannelPartitionHandler`. + +This implementation of the `PartitionHandler`interface uses `MessageChannel` instances to +send instructions to remote workers and receive their responses. +This provides a nice abstraction from the transports (such as JMS +and AMQP) being used to communicate with the remote workers. + +The section of the "Scalability" chapter that addresses[remote partitioning](scalability.html#partitioning) provides an overview of the concepts and +components needed to configure remote partitioning and shows an +example of using the default`TaskExecutorPartitionHandler` to partition +in separate local threads of execution. For remote partitioning +to multiple JVMs, two additional components are required: + +* A remoting fabric or grid environment + +* A `PartitionHandler` implementation that supports the desired + remoting fabric or grid environment + +Similar to remote chunking, JMS can be used as the “remoting fabric”. In that case, use +a `MessageChannelPartitionHandler` instance as the `PartitionHandler` implementation, +as described earlier. + +The following example assumes an existing partitioned job and focuses on the`MessageChannelPartitionHandler` and JMS configuration in XML: + +XML Configuration + +``` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +``` + +The following example assumes an existing partitioned job and focuses on the`MessageChannelPartitionHandler` and JMS configuration in Java: + +Java Configuration + +``` +/* + * Configuration of the manager side + */ +@Bean +public PartitionHandler partitionHandler() { + MessageChannelPartitionHandler partitionHandler = new MessageChannelPartitionHandler(); + partitionHandler.setStepName("step1"); + partitionHandler.setGridSize(3); + partitionHandler.setReplyChannel(outboundReplies()); + MessagingTemplate template = new MessagingTemplate(); + template.setDefaultChannel(outboundRequests()); + template.setReceiveTimeout(100000); + partitionHandler.setMessagingOperations(template); + return partitionHandler; +} + +@Bean +public QueueChannel outboundReplies() { + return new QueueChannel(); +} + +@Bean +public DirectChannel outboundRequests() { + return new DirectChannel(); +} + +@Bean +public IntegrationFlow outboundJmsRequests() { + return IntegrationFlows.from("outboundRequests") + .handle(Jms.outboundGateway(connectionFactory()) + .requestDestination("requestsQueue")) + .get(); +} + +@Bean +@ServiceActivator(inputChannel = "inboundStaging") +public AggregatorFactoryBean partitioningMessageHandler() throws Exception { + AggregatorFactoryBean aggregatorFactoryBean = new AggregatorFactoryBean(); + aggregatorFactoryBean.setProcessorBean(partitionHandler()); + aggregatorFactoryBean.setOutputChannel(outboundReplies()); + // configure other propeties of the aggregatorFactoryBean + return aggregatorFactoryBean; +} + +@Bean +public DirectChannel inboundStaging() { + return new DirectChannel(); +} + +@Bean +public IntegrationFlow inboundJmsStaging() { + return IntegrationFlows + .from(Jms.messageDrivenChannelAdapter(connectionFactory()) + .configureListenerContainer(c -> c.subscriptionDurable(false)) + .destination("stagingQueue")) + .channel(inboundStaging()) + .get(); +} + +/* + * Configuration of the worker side + */ +@Bean +public StepExecutionRequestHandler stepExecutionRequestHandler() { + StepExecutionRequestHandler stepExecutionRequestHandler = new StepExecutionRequestHandler(); + stepExecutionRequestHandler.setJobExplorer(jobExplorer); + stepExecutionRequestHandler.setStepLocator(stepLocator()); + return stepExecutionRequestHandler; +} + +@Bean +@ServiceActivator(inputChannel = "inboundRequests", outputChannel = "outboundStaging") +public StepExecutionRequestHandler serviceActivator() throws Exception { + return stepExecutionRequestHandler(); +} + +@Bean +public DirectChannel inboundRequests() { + return new DirectChannel(); +} + +public IntegrationFlow inboundJmsRequests() { + return IntegrationFlows + .from(Jms.messageDrivenChannelAdapter(connectionFactory()) + .configureListenerContainer(c -> c.subscriptionDurable(false)) + .destination("requestsQueue")) + .channel(inboundRequests()) + .get(); +} + +@Bean +public DirectChannel outboundStaging() { + return new DirectChannel(); +} + +@Bean +public IntegrationFlow outboundJmsStaging() { + return IntegrationFlows.from("outboundStaging") + .handle(Jms.outboundGateway(connectionFactory()) + .requestDestination("stagingQueue")) + .get(); +} +``` + +You must also ensure that the partition `handler` attribute maps to the `partitionHandler`bean. + +The following example maps the partition `handler` attribute to the `partitionHandler` in +XML: + +XML Configuration + +``` + + + + ... + + +``` + +The following example maps the partition `handler` attribute to the `partitionHandler` in +Java: + +Java Configuration + +``` + public Job personJob() { + return jobBuilderFactory.get("personJob") + .start(stepBuilderFactory.get("step1.manager") + .partitioner("step1.worker", partitioner()) + .partitionHandler(partitionHandler()) + .build()) + .build(); + } +``` + +You can find a complete example of a remote partitioning job[here](https://github.com/spring-projects/spring-batch/tree/master/spring-batch-samples#remote-partitioning-sample). + +The `@EnableBatchIntegration` annotation that can be used to simplify a remote +partitioning setup. This annotation provides two beans useful for remote partitioning: + +* `RemotePartitioningManagerStepBuilderFactory`: used to configure the manager step + +* `RemotePartitioningWorkerStepBuilderFactory`: used to configure the worker step + +These APIs take care of configuring a number of components as described in the following diagram: + +![Remote Partitioning Configuration (with job repository polling)](https://docs.spring.io/spring-batch/docs/current/reference/html/images/remote-partitioning-polling-config.png) + +Figure 6. Remote Partitioning Configuration (with job repository polling) + +![Remote Partitioning Configuration (with replies aggregation)](https://docs.spring.io/spring-batch/docs/current/reference/html/images/remote-partitioning-aggregation-config.png) + +Figure 7. Remote Partitioning Configuration (with replies aggregation) + +On the manager side, the `RemotePartitioningManagerStepBuilderFactory` allows you to +configure a manager step by declaring: + +* the `Partitioner` used to partition data + +* the output channel ("Outgoing requests") to send requests to workers + +* the input channel ("Incoming replies") to receive replies from workers (when configuring replies aggregation) + +* the poll interval and timeout parameters (when configuring job repository polling) + +The `MessageChannelPartitionHandler` and the `MessagingTemplate` are not needed to be explicitly configured +(Those can still be explicitly configured if required). + +On the worker side, the `RemotePartitioningWorkerStepBuilderFactory` allows you to configure a worker to: + +* listen to requests sent by the manager on the input channel ("Incoming requests") + +* call the `handle` method of `StepExecutionRequestHandler` for each request + +* send replies on the output channel ("Outgoing replies") to the manager + +There is no need to explicitly configure the `StepExecutionRequestHandler` (which can be explicitly configured if required). + +The following example shows how to use these APIs: + +``` +@Configuration +@EnableBatchProcessing +@EnableBatchIntegration +public class RemotePartitioningJobConfiguration { + + @Configuration + public static class ManagerConfiguration { + + @Autowired + private RemotePartitioningManagerStepBuilderFactory managerStepBuilderFactory; + + @Bean + public Step managerStep() { + return this.managerStepBuilderFactory + .get("managerStep") + .partitioner("workerStep", partitioner()) + .gridSize(10) + .outputChannel(outgoingRequestsToWorkers()) + .inputChannel(incomingRepliesFromWorkers()) + .build(); + } + + // Middleware beans setup omitted + + } + + @Configuration + public static class WorkerConfiguration { + + @Autowired + private RemotePartitioningWorkerStepBuilderFactory workerStepBuilderFactory; + + @Bean + public Step workerStep() { + return this.workerStepBuilderFactory + .get("workerStep") + .inputChannel(incomingRequestsFromManager()) + .outputChannel(outgoingRepliesToManager()) + .chunk(100) + .reader(itemReader()) + .processor(itemProcessor()) + .writer(itemWriter()) + .build(); + } + + // Middleware beans setup omitted + + } + +} +``` \ No newline at end of file diff --git a/docs/en/spring-batch/spring-batch-intro.md b/docs/en/spring-batch/spring-batch-intro.md new file mode 100644 index 0000000000000000000000000000000000000000..f91446f721572fd654f3f66293815eddf84911db --- /dev/null +++ b/docs/en/spring-batch/spring-batch-intro.md @@ -0,0 +1,569 @@ +# Spring Batch Introduction + +## Spring Batch Introduction + +Many applications within the enterprise domain require bulk processing to perform +business operations in mission critical environments. These business operations include: + +* Automated, complex processing of large volumes of information that is most efficiently + processed without user interaction. These operations typically include time-based events + (such as month-end calculations, notices, or correspondence). + +* Periodic application of complex business rules processed repetitively across very large + data sets (for example, insurance benefit determination or rate adjustments). + +* Integration of information that is received from internal and external systems that + typically requires formatting, validation, and processing in a transactional manner into + the system of record. Batch processing is used to process billions of transactions every + day for enterprises. + +Spring Batch is a lightweight, comprehensive batch framework designed to enable the +development of robust batch applications vital for the daily operations of enterprise +systems. Spring Batch builds upon the characteristics of the Spring Framework that people +have come to expect (productivity, POJO-based development approach, and general ease of +use), while making it easy for developers to access and leverage more advance enterprise +services when necessary. Spring Batch is not a scheduling framework. There are many good +enterprise schedulers (such as Quartz, Tivoli, Control-M, etc.) available in both the +commercial and open source spaces. It is intended to work in conjunction with a +scheduler, not replace a scheduler. + +Spring Batch provides reusable functions that are essential in processing large volumes +of records, including logging/tracing, transaction management, job processing statistics, +job restart, skip, and resource management. It also provides more advanced technical +services and features that enable extremely high-volume and high performance batch jobs +through optimization and partitioning techniques. Spring Batch can be used in both simple +use cases (such as reading a file into a database or running a stored procedure) as well +as complex, high volume use cases (such as moving high volumes of data between databases, +transforming it, and so on). High-volume batch jobs can leverage the framework in a +highly scalable manner to process significant volumes of information. + +### Background + +While open source software projects and associated communities have focused greater +attention on web-based and microservices-based architecture frameworks, there has been a +notable lack of focus on reusable architecture frameworks to accommodate Java-based batch +processing needs, despite continued needs to handle such processing within enterprise IT +environments. The lack of a standard, reusable batch architecture has resulted in the +proliferation of many one-off, in-house solutions developed within client enterprise IT +functions. + +SpringSource (now Pivotal) and Accenture collaborated to change this. Accenture’s +hands-on industry and technical experience in implementing batch architectures, +SpringSource’s depth of technical experience, and Spring’s proven programming model +together made a natural and powerful partnership to create high-quality, market-relevant +software aimed at filling an important gap in enterprise Java. Both companies worked with +a number of clients who were solving similar problems by developing Spring-based batch +architecture solutions. This provided some useful additional detail and real-life +constraints that helped to ensure the solution can be applied to the real-world problems +posed by clients. + +Accenture contributed previously proprietary batch processing architecture frameworks to +the Spring Batch project, along with committer resources to drive support, enhancements, +and the existing feature set. Accenture’s contribution was based upon decades of +experience in building batch architectures with the last several generations of +platforms: COBOL/Mainframe, C++/Unix, and now Java/anywhere. + +The collaborative effort between Accenture and SpringSource aimed to promote the +standardization of software processing approaches, frameworks, and tools that can be +consistently leveraged by enterprise users when creating batch applications. Companies +and government agencies desiring to deliver standard, proven solutions to their +enterprise IT environments can benefit from Spring Batch. + +### Usage Scenarios + +A typical batch program generally: + +* Reads a large number of records from a database, file, or queue. + +* Processes the data in some fashion. + +* Writes back data in a modified form. + +Spring Batch automates this basic batch iteration, providing the capability to process +similar transactions as a set, typically in an offline environment without any user +interaction. Batch jobs are part of most IT projects, and Spring Batch is the only open +source framework that provides a robust, enterprise-scale solution. + +Business Scenarios + +* Commit batch process periodically + +* Concurrent batch processing: parallel processing of a job + +* Staged, enterprise message-driven processing + +* Massively parallel batch processing + +* Manual or scheduled restart after failure + +* Sequential processing of dependent steps (with extensions to workflow-driven batches) + +* Partial processing: skip records (for example, on rollback) + +* Whole-batch transaction, for cases with a small batch size or existing stored + procedures/scripts + +Technical Objectives + +* Batch developers use the Spring programming model: Concentrate on business logic and + let the framework take care of infrastructure. + +* Clear separation of concerns between the infrastructure, the batch execution + environment, and the batch application. + +* Provide common, core execution services as interfaces that all projects can implement. + +* Provide simple and default implementations of the core execution interfaces that can be + used 'out of the box'. + +* Easy to configure, customize, and extend services, by leveraging the spring framework + in all layers. + +* All existing core services should be easy to replace or extend, without any impact to + the infrastructure layer. + +* Provide a simple deployment model, with the architecture JARs completely separate from + the application, built using Maven. + +### Spring Batch Architecture + +Spring Batch is designed with extensibility and a diverse group of end users in mind. The +figure below shows the layered architecture that supports the extensibility and ease of +use for end-user developers. + +![Figure 1.1: Spring Batch Layered Architecture](https://docs.spring.io/spring-batch/docs/current/reference/html/images/spring-batch-layers.png) + +Figure 1. Spring Batch Layered Architecture + +This layered architecture highlights three major high-level components: Application, +Core, and Infrastructure. The application contains all batch jobs and custom code written +by developers using Spring Batch. The Batch Core contains the core runtime classes +necessary to launch and control a batch job. It includes implementations for`JobLauncher`, `Job`, and `Step`. Both Application and Core are built on top of a common +infrastructure. This infrastructure contains common readers and writers and services +(such as the `RetryTemplate`), which are used both by application developers(readers and +writers, such as `ItemReader` and `ItemWriter`) and the core framework itself (retry, +which is its own library). + +### General Batch Principles and Guidelines + +The following key principles, guidelines, and general considerations should be considered +when building a batch solution. + +* Remember that a batch architecture typically affects on-line architecture and vice + versa. Design with both architectures and environments in mind using common building + blocks when possible. + +* Simplify as much as possible and avoid building complex logical structures in single + batch applications. + +* Keep the processing and storage of data physically close together (in other words, keep + your data where your processing occurs). + +* Minimize system resource use, especially I/O. Perform as many operations as possible in + internal memory. + +* Review application I/O (analyze SQL statements) to ensure that unnecessary physical I/O + is avoided. In particular, the following four common flaws need to be looked for: + + * Reading data for every transaction when the data could be read once and cached or kept + in the working storage. + + * Rereading data for a transaction where the data was read earlier in the same + transaction. + + * Causing unnecessary table or index scans. + + * Not specifying key values in the WHERE clause of an SQL statement. + +* Do not do things twice in a batch run. For instance, if you need data summarization for + reporting purposes, you should (if possible) increment stored totals when data is being + initially processed, so your reporting application does not have to reprocess the same + data. + +* Allocate enough memory at the beginning of a batch application to avoid time-consuming + reallocation during the process. + +* Always assume the worst with regard to data integrity. Insert adequate checks and + record validation to maintain data integrity. + +* Implement checksums for internal validation where possible. For example, flat files + should have a trailer record telling the total of records in the file and an aggregate of + the key fields. + +* Plan and execute stress tests as early as possible in a production-like environment + with realistic data volumes. + +* In large batch systems, backups can be challenging, especially if the system is running + concurrent with on-line on a 24-7 basis. Database backups are typically well taken care + of in the on-line design, but file backups should be considered to be just as important. + If the system depends on flat files, file backup procedures should not only be in place + and documented but be regularly tested as well. + +### Batch Processing Strategies + +To help design and implement batch systems, basic batch application building blocks and +patterns should be provided to the designers and programmers in the form of sample +structure charts and code shells. When starting to design a batch job, the business logic +should be decomposed into a series of steps that can be implemented using the following +standard building blocks: + +* *Conversion Applications:* For each type of file supplied by or generated to an + external system, a conversion application must be created to convert the transaction + records supplied into a standard format required for processing. This type of batch + application can partly or entirely consist of translation utility modules (see Basic + Batch Services). + +* *Validation Applications:* Validation applications ensure that all input/output + records are correct and consistent. Validation is typically based on file headers and + trailers, checksums and validation algorithms, and record level cross-checks. + +* *Extract Applications:* An application that reads a set of records from a database or + input file, selects records based on predefined rules, and writes the records to an + output file. + +* *Extract/Update Applications:* An application that reads records from a database or + an input file and makes changes to a database or an output file driven by the data found + in each input record. + +* *Processing and Updating Applications:* An application that performs processing on + input transactions from an extract or a validation application. The processing usually + involves reading a database to obtain data required for processing, potentially updating + the database and creating records for output processing. + +* *Output/Format Applications:* Applications that read an input file, restructure data + from this record according to a standard format, and produce an output file for printing + or transmission to another program or system. + +Additionally, a basic application shell should be provided for business logic that cannot +be built using the previously mentioned building blocks. + +In addition to the main building blocks, each application may use one or more of standard +utility steps, such as: + +* Sort: A program that reads an input file and produces an output file where records + have been re-sequenced according to a sort key field in the records. Sorts are usually + performed by standard system utilities. + +* Split: A program that reads a single input file and writes each record to one of + several output files based on a field value. Splits can be tailored or performed by + parameter-driven standard system utilities. + +* Merge: A program that reads records from multiple input files and produces one output + file with combined data from the input files. Merges can be tailored or performed by + parameter-driven standard system utilities. + +Batch applications can additionally be categorized by their input source: + +* Database-driven applications are driven by rows or values retrieved from the database. + +* File-driven applications are driven by records or values retrieved from a file. + +* Message-driven applications are driven by messages retrieved from a message queue. + +The foundation of any batch system is the processing strategy. Factors affecting the +selection of the strategy include: estimated batch system volume, concurrency with +on-line systems or with other batch systems, available batch windows. (Note that, with +more enterprises wanting to be up and running 24x7, clear batch windows are +disappearing). + +Typical processing options for batch are (in increasing order of implementation +complexity): + +* Normal processing during a batch window in off-line mode. + +* Concurrent batch or on-line processing. + +* Parallel processing of many different batch runs or jobs at the same time. + +* Partitioning (processing of many instances of the same job at the same time). + +* A combination of the preceding options. + +Some or all of these options may be supported by a commercial scheduler. + +The following section discusses these processing options in more detail. It is important +to notice that, as a rule of thumb, the commit and locking strategy adopted by batch +processes depends on the type of processing performed and that the on-line locking +strategy should also use the same principles. Therefore, the batch architecture cannot be +simply an afterthought when designing an overall architecture. + +The locking strategy can be to use only normal database locks or to implement an +additional custom locking service in the architecture. The locking service would track +database locking (for example, by storing the necessary information in a dedicated +db-table) and give or deny permissions to the application programs requesting a db +operation. Retry logic could also be implemented by this architecture to avoid aborting a +batch job in case of a lock situation. + +**1. Normal processing in a batch window** For simple batch processes running in a separate +batch window where the data being updated is not required by on-line users or other batch +processes, concurrency is not an issue and a single commit can be done at the end of the +batch run. + +In most cases, a more robust approach is more appropriate. Keep in mind that batch +systems have a tendency to grow as time goes by, both in terms of complexity and the data +volumes they handle. If no locking strategy is in place and the system still relies on a +single commit point, modifying the batch programs can be painful. Therefore, even with +the simplest batch systems, consider the need for commit logic for restart-recovery +options as well as the information concerning the more complex cases described later in +this section. + +**2. Concurrent batch or on-line processing** Batch applications processing data that can +be simultaneously updated by on-line users should not lock any data (either in the +database or in files) which could be required by on-line users for more than a few +seconds. Also, updates should be committed to the database at the end of every few +transactions. This minimizes the portion of data that is unavailable to other processes +and the elapsed time the data is unavailable. + +Another option to minimize physical locking is to have logical row-level locking +implemented with either an Optimistic Locking Pattern or a Pessimistic Locking Pattern. + +* Optimistic locking assumes a low likelihood of record contention. It typically means + inserting a timestamp column in each database table used concurrently by both batch and + on-line processing. When an application fetches a row for processing, it also fetches the + timestamp. As the application then tries to update the processed row, the update uses the + original timestamp in the WHERE clause. If the timestamp matches, the data and the + timestamp are updated. If the timestamp does not match, this indicates that another + application has updated the same row between the fetch and the update attempt. Therefore, + the update cannot be performed. + +* Pessimistic locking is any locking strategy that assumes there is a high likelihood of + record contention and therefore either a physical or logical lock needs to be obtained at + retrieval time. One type of pessimistic logical locking uses a dedicated lock-column in + the database table. When an application retrieves the row for update, it sets a flag in + the lock column. With the flag in place, other applications attempting to retrieve the + same row logically fail. When the application that sets the flag updates the row, it also + clears the flag, enabling the row to be retrieved by other applications. Please note that + the integrity of data must be maintained also between the initial fetch and the setting + of the flag, for example by using db locks (such as `SELECT FOR UPDATE`). Note also that + this method suffers from the same downside as physical locking except that it is somewhat + easier to manage building a time-out mechanism that gets the lock released if the user + goes to lunch while the record is locked. + +These patterns are not necessarily suitable for batch processing, but they might be used +for concurrent batch and on-line processing (such as in cases where the database does not +support row-level locking). As a general rule, optimistic locking is more suitable for +on-line applications, while pessimistic locking is more suitable for batch applications. +Whenever logical locking is used, the same scheme must be used for all applications +accessing data entities protected by logical locks. + +Note that both of these solutions only address locking a single record. Often, we may +need to lock a logically related group of records. With physical locks, you have to +manage these very carefully in order to avoid potential deadlocks. With logical locks, it +is usually best to build a logical lock manager that understands the logical record +groups you want to protect and that can ensure that locks are coherent and +non-deadlocking. This logical lock manager usually uses its own tables for lock +management, contention reporting, time-out mechanism, and other concerns. + +**3. Parallel Processing** Parallel processing allows multiple batch runs or jobs to run in +parallel to minimize the total elapsed batch processing time. This is not a problem as +long as the jobs are not sharing the same files, db-tables, or index spaces. If they do, +this service should be implemented using partitioned data. Another option is to build an +architecture module for maintaining interdependencies by using a control table. A control +table should contain a row for each shared resource and whether it is in use by an +application or not. The batch architecture or the application in a parallel job would +then retrieve information from that table to determine if it can get access to the +resource it needs or not. + +If the data access is not a problem, parallel processing can be implemented through the +use of additional threads to process in parallel. In the mainframe environment, parallel +job classes have traditionally been used, in order to ensure adequate CPU time for all +the processes. Regardless, the solution has to be robust enough to ensure time slices for +all the running processes. + +Other key issues in parallel processing include load balancing and the availability of +general system resources such as files, database buffer pools, and so on. Also note that +the control table itself can easily become a critical resource. + +**4. Partitioning** Using partitioning allows multiple versions of large batch applications +to run concurrently. The purpose of this is to reduce the elapsed time required to +process long batch jobs. Processes that can be successfully partitioned are those where +the input file can be split and/or the main database tables partitioned to allow the +application to run against different sets of data. + +In addition, processes which are partitioned must be designed to only process their +assigned data set. A partitioning architecture has to be closely tied to the database +design and the database partitioning strategy. Note that database partitioning does not +necessarily mean physical partitioning of the database, although in most cases this is +advisable. The following picture illustrates the partitioning approach: + +![Figure 1.2: Partitioned Process](https://docs.spring.io/spring-batch/docs/current/reference/html/images/partitioned.png) + +Figure 2. Partitioned Process + +The architecture should be flexible enough to allow dynamic configuration of the number +of partitions. Both automatic and user controlled configuration should be considered. +Automatic configuration may be based on parameters such as the input file size and the +number of input records. + +**4.1 Partitioning Approaches** Selecting a partitioning approach has to be done on a +case-by-case basis. The following list describes some of the possible partitioning +approaches: + +*1. Fixed and Even Break-Up of Record Set* + +This involves breaking the input record set into an even number of portions (for example, +10, where each portion has exactly 1/10th of the entire record set). Each portion is then +processed by one instance of the batch/extract application. + +In order to use this approach, preprocessing is required to split the record set up. The +result of this split will be a lower and upper bound placement number which can be used +as input to the batch/extract application in order to restrict its processing to only its +portion. + +Preprocessing could be a large overhead, as it has to calculate and determine the bounds +of each portion of the record set. + +*2. Break up by a Key Column* + +This involves breaking up the input record set by a key column, such as a location code, +and assigning data from each key to a batch instance. In order to achieve this, column +values can be either: + +* Assigned to a batch instance by a partitioning table (described later in this + section). + +* Assigned to a batch instance by a portion of the value (such as 0000-0999, 1000 - 1999, + and so on). + +Under option 1, adding new values means a manual reconfiguration of the batch/extract to +ensure that the new value is added to a particular instance. + +Under option 2, this ensures that all values are covered via an instance of the batch +job. However, the number of values processed by one instance is dependent on the +distribution of column values (there may be a large number of locations in the 0000-0999 +range, and few in the 1000-1999 range). Under this option, the data range should be +designed with partitioning in mind. + +Under both options, the optimal even distribution of records to batch instances cannot be +realized. There is no dynamic configuration of the number of batch instances used. + +*3. Breakup by Views* + +This approach is basically breakup by a key column but on the database level. It involves +breaking up the record set into views. These views are used by each instance of the batch +application during its processing. The breakup is done by grouping the data. + +With this option, each instance of a batch application has to be configured to hit a +particular view (instead of the master table). Also, with the addition of new data +values, this new group of data has to be included into a view. There is no dynamic +configuration capability, as a change in the number of instances results in a change to +the views. + +*4. Addition of a Processing Indicator* + +This involves the addition of a new column to the input table, which acts as an +indicator. As a preprocessing step, all indicators are marked as being non-processed. +During the record fetch stage of the batch application, records are read on the condition +that that record is marked as being non-processed, and once they are read (with lock), +they are marked as being in processing. When that record is completed, the indicator is +updated to either complete or error. Many instances of a batch application can be started +without a change, as the additional column ensures that a record is only processed once. + +With this option, I/O on the table increases dynamically. In the case of an updating +batch application, this impact is reduced, as a write must occur anyway. + +*5. Extract Table to a Flat File* + +This involves the extraction of the table into a file. This file can then be split into +multiple segments and used as input to the batch instances. + +With this option, the additional overhead of extracting the table into a file and +splitting it may cancel out the effect of multi-partitioning. Dynamic configuration can +be achieved by changing the file splitting script. + +*6. Use of a Hashing Column* + +This scheme involves the addition of a hash column (key/index) to the database tables +used to retrieve the driver record. This hash column has an indicator to determine which +instance of the batch application processes this particular row. For example, if there +are three batch instances to be started, then an indicator of 'A' marks a row for +processing by instance 1, an indicator of 'B' marks a row for processing by instance 2, +and an indicator of 'C' marks a row for processing by instance 3. + +The procedure used to retrieve the records would then have an additional `WHERE` clause +to select all rows marked by a particular indicator. The inserts in this table would +involve the addition of the marker field, which would be defaulted to one of the +instances (such as 'A'). + +A simple batch application would be used to update the indicators, such as to +redistribute the load between the different instances. When a sufficiently large number +of new rows have been added, this batch can be run (anytime, except in the batch window) +to redistribute the new rows to other instances. + +Additional instances of the batch application only require the running of the batch +application as described in the preceding paragraphs to redistribute the indicators to +work with a new number of instances. + +**4.2 Database and Application Design Principles** + +An architecture that supports multi-partitioned applications which run against +partitioned database tables using the key column approach should include a central +partition repository for storing partition parameters. This provides flexibility and +ensures maintainability. The repository generally consists of a single table, known as +the partition table. + +Information stored in the partition table is static and, in general, should be maintained +by the DBA. The table should consist of one row of information for each partition of a +multi-partitioned application. The table should have columns for Program ID Code, +Partition Number (logical ID of the partition), Low Value of the db key column for this +partition, and High Value of the db key column for this partition. + +On program start-up, the program `id` and partition number should be passed to the +application from the architecture (specifically, from the Control Processing Tasklet). If +a key column approach is used, these variables are used to read the partition table in +order to determine what range of data the application is to process. In addition the +partition number must be used throughout the processing to: + +* Add to the output files/database updates in order for the merge process to work + properly. + +* Report normal processing to the batch log and any errors to the architecture error + handler. + +**4.3 Minimizing Deadlocks** + +When applications run in parallel or are partitioned, contention in database resources +and deadlocks may occur. It is critical that the database design team eliminates +potential contention situations as much as possible as part of the database design. + +Also, the developers must ensure that the database index tables are designed with +deadlock prevention and performance in mind. + +Deadlocks or hot spots often occur in administration or architecture tables, such as log +tables, control tables, and lock tables. The implications of these should be taken into +account as well. A realistic stress test is crucial for identifying the possible +bottlenecks in the architecture. + +To minimize the impact of conflicts on data, the architecture should provide services +such as wait-and-retry intervals when attaching to a database or when encountering a +deadlock. This means a built-in mechanism to react to certain database return codes and, +instead of issuing an immediate error, waiting a predetermined amount of time and +retrying the database operation. + +**4.4 Parameter Passing and Validation** + +The partition architecture should be relatively transparent to application developers. +The architecture should perform all tasks associated with running the application in a +partitioned mode, including: + +* Retrieving partition parameters before application start-up. + +* Validating partition parameters before application start-up. + +* Passing parameters to the application at start-up. + +The validation should include checks to ensure that: + +* The application has sufficient partitions to cover the whole data range. + +* There are no gaps between partitions. + +If the database is partitioned, some additional validation may be necessary to ensure +that a single partition does not span database partitions. + +Also, the architecture should take into consideration the consolidation of partitions. +Key questions include: + +* Must all the partitions be finished before going into the next job step? + +* What happens if one of the partitions aborts? \ No newline at end of file diff --git a/docs/en/spring-batch/step.md b/docs/en/spring-batch/step.md new file mode 100644 index 0000000000000000000000000000000000000000..e301031bba80552a4fe2340860fbcbaf0e95526b --- /dev/null +++ b/docs/en/spring-batch/step.md @@ -0,0 +1,2194 @@ +# Configuring a Step + +## Configuring a `Step` + +XMLJavaBoth + +As discussed in [the domain chapter](domain.html#domainLanguageOfBatch), a `Step` is a +domain object that encapsulates an independent, sequential phase of a batch job and +contains all of the information necessary to define and control the actual batch +processing. This is a necessarily vague description because the contents of any given`Step` are at the discretion of the developer writing a `Job`. A `Step` can be as simple +or complex as the developer desires. A simple `Step` might load data from a file into the +database, requiring little or no code (depending upon the implementations used). A more +complex `Step` might have complicated business rules that are applied as part of the +processing, as shown in the following image: + +![Step](https://docs.spring.io/spring-batch/docs/current/reference/html/images/step.png) + +Figure 1. Step + +### Chunk-oriented Processing + +Spring Batch uses a 'Chunk-oriented' processing style within its most common +implementation. Chunk oriented processing refers to reading the data one at a time and +creating 'chunks' that are written out within a transaction boundary. Once the number of +items read equals the commit interval, the entire chunk is written out by the`ItemWriter`, and then the transaction is committed. The following image shows the +process: + +![Chunk Oriented Processing](https://docs.spring.io/spring-batch/docs/current/reference/html/images/chunk-oriented-processing.png) + +Figure 2. Chunk-oriented Processing + +The following pseudo code shows the same concepts in a simplified form: + +``` +List items = new Arraylist(); +for(int i = 0; i < commitInterval; i++){ + Object item = itemReader.read(); + if (item != null) { + items.add(item); + } +} +itemWriter.write(items); +``` + +A chunk-oriented step can also be configured with an optional `ItemProcessor`to process items before passing them to the `ItemWriter`. The following image +shows the process when an `ItemProcessor` is registered in the step: + +![Chunk Oriented Processing With Item Processor](https://docs.spring.io/spring-batch/docs/current/reference/html/images/chunk-oriented-processing-with-item-processor.png) + +Figure 3. Chunk-oriented Processing with Item Processor + +The following pseudo code shows how this is implemented in a simplified form: + +``` +List items = new Arraylist(); +for(int i = 0; i < commitInterval; i++){ + Object item = itemReader.read(); + if (item != null) { + items.add(item); + } +} + +List processedItems = new Arraylist(); +for(Object item: items){ + Object processedItem = itemProcessor.process(item); + if (processedItem != null) { + processedItems.add(processedItem); + } +} + +itemWriter.write(processedItems); +``` + +For more details about item processors and their use cases, please refer to the[Item processing](processor.html#itemProcessor) section. + +#### Configuring a `Step` + +Despite the relatively short list of required dependencies for a `Step`, it is an +extremely complex class that can potentially contain many collaborators. + +In order to ease configuration, the Spring Batch XML namespace can be used, as shown in +the following example: + +XML Configuration + +``` + + + + + + + +``` + +When using Java configuration, the Spring Batch builders can be used, as shown in the +following example: + +Java Configuration + +``` +/** + * Note the JobRepository is typically autowired in and not needed to be explicitly + * configured + */ +@Bean +public Job sampleJob(JobRepository jobRepository, Step sampleStep) { + return this.jobBuilderFactory.get("sampleJob") + .repository(jobRepository) + .start(sampleStep) + .build(); +} + +/** + * Note the TransactionManager is typically autowired in and not needed to be explicitly + * configured + */ +@Bean +public Step sampleStep(PlatformTransactionManager transactionManager) { + return this.stepBuilderFactory.get("sampleStep") + .transactionManager(transactionManager) + .chunk(10) + .reader(itemReader()) + .writer(itemWriter()) + .build(); +} +``` + +The configuration above includes the only required dependencies to create a item-oriented +step: + +* `reader`: The `ItemReader` that provides items for processing. + +* `writer`: The `ItemWriter` that processes the items provided by the `ItemReader`. + +* `transaction-manager`: Spring’s `PlatformTransactionManager` that begins and commits + transactions during processing. + +* `transactionManager`: Spring’s `PlatformTransactionManager` that begins and commits + transactions during processing. + +* `job-repository`: The XML-specific name of the `JobRepository` that periodically stores + the `StepExecution` and `ExecutionContext` during processing (just before committing). For + an in-line `` (one defined within a ``), it is an attribute on the ``element. For a standalone ``, it is defined as an attribute of the \. + +* `repository`: The Java-specific name of the `JobRepository` that periodically stores + the `StepExecution` and `ExecutionContext` during processing (just before committing). + +* `commit-interval`: The XML-specific name of the number of items to be processed + before the transaction is committed. + +* `chunk`: The Java-specific name of the dependency that indicates that this is an + item-based step and the number of items to be processed before the transaction is + committed. + +It should be noted that `job-repository` defaults to `jobRepository` and`transaction-manager` defaults to `transactionManager`. Also, the `ItemProcessor` is +optional, since the item could be directly passed from the reader to the writer. + +It should be noted that `repository` defaults to `jobRepository` and `transactionManager`defaults to `transactionManager` (all provided through the infrastructure from`@EnableBatchProcessing`). Also, the `ItemProcessor` is optional, since the item could be +directly passed from the reader to the writer. + +#### Inheriting from a Parent `Step` + +If a group of `Steps` share similar configurations, then it may be helpful to define a +"parent" `Step` from which the concrete `Steps` may inherit properties. Similar to class +inheritance in Java, the "child" `Step` combines its elements and attributes with the +parent’s. The child also overrides any of the parent’s `Steps`. + +In the following example, the `Step`, "concreteStep1", inherits from "parentStep". It is +instantiated with 'itemReader', 'itemProcessor', 'itemWriter', `startLimit=5`, and`allowStartIfComplete=true`. Additionally, the `commitInterval` is '5', since it is +overridden by the "concreteStep1" `Step`, as shown in the following example: + +``` + + + + + + + + + + + +``` + +The `id` attribute is still required on the step within the job element. This is for two +reasons: + +* The `id` is used as the step name when persisting the `StepExecution`. If the same + standalone step is referenced in more than one step in the job, an error occurs. + +* When creating job flows, as described later in this chapter, the `next` attribute + should be referring to the step in the flow, not the standalone step. + +##### Abstract `Step` + +Sometimes, it may be necessary to define a parent `Step` that is not a complete `Step`configuration. If, for instance, the `reader`, `writer`, and `tasklet` attributes are +left off of a `Step` configuration, then initialization fails. If a parent must be +defined without these properties, then the `abstract` attribute should be used. An`abstract` `Step` is only extended, never instantiated. + +In the following example, the `Step` `abstractParentStep` would not be instantiated if it +were not declared to be abstract. The `Step`, "concreteStep2", has 'itemReader', +'itemWriter', and commit-interval=10. + +``` + + + + + + + + + + + +``` + +##### Merging Lists + +Some of the configurable elements on `Steps` are lists, such as the `` element. +If both the parent and child `Steps` declare a `` element, then the +child’s list overrides the parent’s. In order to allow a child to add additional +listeners to the list defined by the parent, every list element has a `merge` attribute. +If the element specifies that `merge="true"`, then the child’s list is combined with the +parent’s instead of overriding it. + +In the following example, the `Step` "concreteStep3", is created with two listeners:`listenerOne` and `listenerTwo`: + +``` + + + + + + + + + + + + + + +``` + +#### The Commit Interval + +As mentioned previously, a step reads in and writes out items, periodically committing +using the supplied `PlatformTransactionManager`. With a `commit-interval` of 1, it +commits after writing each individual item. This is less than ideal in many situations, +since beginning and committing a transaction is expensive. Ideally, it is preferable to +process as many items as possible in each transaction, which is completely dependent upon +the type of data being processed and the resources with which the step is interacting. +For this reason, the number of items that are processed within a commit can be +configured. + +The following example shows a `step` whose `tasklet` has a `commit-interval`value of 10 as it would be defined in XML: + +XML Configuration + +``` + + + + + + + +``` + +The following example shows a `step` whose `tasklet` has a `commit-interval`value of 10 as it would be defined in Java: + +Java Configuration + +``` +@Bean +public Job sampleJob() { + return this.jobBuilderFactory.get("sampleJob") + .start(step1()) + .build(); +} + +@Bean +public Step step1() { + return this.stepBuilderFactory.get("step1") + .chunk(10) + .reader(itemReader()) + .writer(itemWriter()) + .build(); +} +``` + +In the preceding example, 10 items are processed within each transaction. At the +beginning of processing, a transaction is begun. Also, each time `read` is called on the`ItemReader`, a counter is incremented. When it reaches 10, the list of aggregated items +is passed to the `ItemWriter`, and the transaction is committed. + +#### Configuring a `Step` for Restart + +In the "[Configuring and Running a Job](job.html#configureJob)" section , restarting a`Job` was discussed. Restart has numerous impacts on steps, and, consequently, may +require some specific configuration. + +##### Setting a Start Limit + +There are many scenarios where you may want to control the number of times a `Step` may +be started. For example, a particular `Step` might need to be configured so that it only +runs once because it invalidates some resource that must be fixed manually before it can +be run again. This is configurable on the step level, since different steps may have +different requirements. A `Step` that may only be executed once can exist as part of the +same `Job` as a `Step` that can be run infinitely. + +The following code fragment shows an example of a start limit configuration in XML: + +XML Configuration + +``` + + + + + +``` + +The following code fragment shows an example of a start limit configuration in Java: + +Java Configuration + +``` +@Bean +public Step step1() { + return this.stepBuilderFactory.get("step1") + .chunk(10) + .reader(itemReader()) + .writer(itemWriter()) + .startLimit(1) + .build(); +} +``` + +The step shown in the preceding example can be run only once. Attempting to run it again +causes a `StartLimitExceededException` to be thrown. Note that the default value for the +start-limit is `Integer.MAX_VALUE`. + +##### Restarting a Completed `Step` + +In the case of a restartable job, there may be one or more steps that should always be +run, regardless of whether or not they were successful the first time. An example might +be a validation step or a `Step` that cleans up resources before processing. During +normal processing of a restarted job, any step with a status of 'COMPLETED', meaning it +has already been completed successfully, is skipped. Setting `allow-start-if-complete` to +"true" overrides this so that the step always runs. + +The following code fragment shows how to define a restartable job in XML: + +XML Configuration + +``` + + + + + +``` + +The following code fragment shows how to define a restartable job in Java: + +Java Configuration + +``` +@Bean +public Step step1() { + return this.stepBuilderFactory.get("step1") + .chunk(10) + .reader(itemReader()) + .writer(itemWriter()) + .allowStartIfComplete(true) + .build(); +} +``` + +##### `Step` Restart Configuration Example + +The following XML example shows how to configure a job to have steps that can be +restarted: + +XML Configuration + +``` + + + + + + + + + + + + + + + + + +``` + +The following Java example shows how to configure a job to have steps that can be +restarted: + +Java Configuration + +``` +@Bean +public Job footballJob() { + return this.jobBuilderFactory.get("footballJob") + .start(playerLoad()) + .next(gameLoad()) + .next(playerSummarization()) + .build(); +} + +@Bean +public Step playerLoad() { + return this.stepBuilderFactory.get("playerLoad") + .chunk(10) + .reader(playerFileItemReader()) + .writer(playerWriter()) + .build(); +} + +@Bean +public Step gameLoad() { + return this.stepBuilderFactory.get("gameLoad") + .allowStartIfComplete(true) + .chunk(10) + .reader(gameFileItemReader()) + .writer(gameWriter()) + .build(); +} + +@Bean +public Step playerSummarization() { + return this.stepBuilderFactory.get("playerSummarization") + .startLimit(2) + .chunk(10) + .reader(playerSummarizationSource()) + .writer(summaryWriter()) + .build(); +} +``` + +The preceding example configuration is for a job that loads in information about football +games and summarizes them. It contains three steps: `playerLoad`, `gameLoad`, and`playerSummarization`. The `playerLoad` step loads player information from a flat file, +while the `gameLoad` step does the same for games. The final step,`playerSummarization`, then summarizes the statistics for each player, based upon the +provided games. It is assumed that the file loaded by `playerLoad` must be loaded only +once, but that `gameLoad` can load any games found within a particular directory, +deleting them after they have been successfully loaded into the database. As a result, +the `playerLoad` step contains no additional configuration. It can be started any number +of times, and, if complete, is skipped. The `gameLoad` step, however, needs to be run +every time in case extra files have been added since it last ran. It has +'allow-start-if-complete' set to 'true' in order to always be started. (It is assumed +that the database table games are loaded into has a process indicator on it, to ensure +new games can be properly found by the summarization step). The summarization step, +which is the most important in the job, is configured to have a start limit of 2. This +is useful because if the step continually fails, a new exit code is returned to the +operators that control job execution, and it can not start again until manual +intervention has taken place. + +| |This job provides an example for this document and is not the same as the `footballJob`found in the samples project.| +|---|--------------------------------------------------------------------------------------------------------------------| + +The remainder of this section describes what happens for each of the three runs of the`footballJob` example. + +Run 1: + +1. `playerLoad` runs and completes successfully, adding 400 players to the 'PLAYERS' + table. + +2. `gameLoad` runs and processes 11 files worth of game data, loading their contents + into the 'GAMES' table. + +3. `playerSummarization` begins processing and fails after 5 minutes. + +Run 2: + +1. `playerLoad` does not run, since it has already completed successfully, and`allow-start-if-complete` is 'false' (the default). + +2. `gameLoad` runs again and processes another 2 files, loading their contents into the + 'GAMES' table as well (with a process indicator indicating they have yet to be + processed). + +3. `playerSummarization` begins processing of all remaining game data (filtering using the + process indicator) and fails again after 30 minutes. + +Run 3: + +1. `playerLoad` does not run, since it has already completed successfully, and`allow-start-if-complete` is 'false' (the default). + +2. `gameLoad` runs again and processes another 2 files, loading their contents into the + 'GAMES' table as well (with a process indicator indicating they have yet to be + processed). + +3. `playerSummarization` is not started and the job is immediately killed, since this is + the third execution of `playerSummarization`, and its limit is only 2. Either the limit + must be raised or the `Job` must be executed as a new `JobInstance`. + +#### Configuring Skip Logic + +There are many scenarios where errors encountered while processing should not result in`Step` failure, but should be skipped instead. This is usually a decision that must be +made by someone who understands the data itself and what meaning it has. Financial data, +for example, may not be skippable because it results in money being transferred, which +needs to be completely accurate. Loading a list of vendors, on the other hand, might +allow for skips. If a vendor is not loaded because it was formatted incorrectly or was +missing necessary information, then there probably are not issues. Usually, these bad +records are logged as well, which is covered later when discussing listeners. + +The following XML example shows an example of using a skip limit: + +XML Configuration + +``` + + + + + + + + + +``` + +The following Java example shows an example of using a skip limit: + +Java Configuration + +``` +@Bean +public Step step1() { + return this.stepBuilderFactory.get("step1") + .chunk(10) + .reader(flatFileItemReader()) + .writer(itemWriter()) + .faultTolerant() + .skipLimit(10) + .skip(FlatFileParseException.class) + .build(); +} +``` + +In the preceding example, a `FlatFileItemReader` is used. If, at any point, a`FlatFileParseException` is thrown, the item is skipped and counted against the total +skip limit of 10. Exceptions (and their subclasses) that are declared might be thrown +during any phase of the chunk processing (read, process, write) but separate counts +are made of skips on read, process, and write inside +the step execution, but the limit applies across all skips. Once the skip limit is +reached, the next exception found causes the step to fail. In other words, the eleventh +skip triggers the exception, not the tenth. + +One problem with the preceding example is that any other exception besides a`FlatFileParseException` causes the `Job` to fail. In certain scenarios, this may be the +correct behavior. However, in other scenarios, it may be easier to identify which +exceptions should cause failure and skip everything else. + +The following XML example shows an example excluding a particular exception: + +XML Configuration + +``` + + + + + + + + + + +``` + +The following Java example shows an example excluding a particular exception: + +Java Configuration + +``` +@Bean +public Step step1() { + return this.stepBuilderFactory.get("step1") + .chunk(10) + .reader(flatFileItemReader()) + .writer(itemWriter()) + .faultTolerant() + .skipLimit(10) + .skip(Exception.class) + .noSkip(FileNotFoundException.class) + .build(); +} +``` + +By identifying `java.lang.Exception` as a skippable exception class, the configuration +indicates that all `Exceptions` are skippable. However, by 'excluding'`java.io.FileNotFoundException`, the configuration refines the list of skippable +exception classes to be all `Exceptions` *except* `FileNotFoundException`. Any excluded +exception classes is fatal if encountered (that is, they are not skipped). + +For any exception encountered, the skippability is determined by the nearest superclass +in the class hierarchy. Any unclassified exception is treated as 'fatal'. + +The order of the `` and `` elements does not matter. + +The order of the `skip` and `noSkip` method calls does not matter. + +#### Configuring Retry Logic + +In most cases, you want an exception to cause either a skip or a `Step` failure. However, +not all exceptions are deterministic. If a `FlatFileParseException` is encountered while +reading, it is always thrown for that record. Resetting the `ItemReader` does not help. +However, for other exceptions, such as a `DeadlockLoserDataAccessException`, which +indicates that the current process has attempted to update a record that another process +holds a lock on. Waiting and trying again might result in success. + +In XML, retry should be configured as follows: + +``` + + + + + + + + + +``` + +In Java, retry should be configured as follows: + +``` +@Bean +public Step step1() { + return this.stepBuilderFactory.get("step1") + .chunk(2) + .reader(itemReader()) + .writer(itemWriter()) + .faultTolerant() + .retryLimit(3) + .retry(DeadlockLoserDataAccessException.class) + .build(); +} +``` + +The `Step` allows a limit for the number of times an individual item can be retried and a +list of exceptions that are 'retryable'. More details on how retry works can be found in[retry](retry.html#retry). + +#### Controlling Rollback + +By default, regardless of retry or skip, any exceptions thrown from the `ItemWriter`cause the transaction controlled by the `Step` to rollback. If skip is configured as +described earlier, exceptions thrown from the `ItemReader` do not cause a rollback. +However, there are many scenarios in which exceptions thrown from the `ItemWriter` should +not cause a rollback, because no action has taken place to invalidate the transaction. +For this reason, the `Step` can be configured with a list of exceptions that should not +cause rollback. + +In XML, you can control rollback as follows: + +XML Configuration + +``` + + + + + + + + +``` + +In Java, you can control rollback as follows: + +Java Configuration + +``` +@Bean +public Step step1() { + return this.stepBuilderFactory.get("step1") + .chunk(2) + .reader(itemReader()) + .writer(itemWriter()) + .faultTolerant() + .noRollback(ValidationException.class) + .build(); +} +``` + +##### Transactional Readers + +The basic contract of the `ItemReader` is that it is forward only. The step buffers +reader input, so that in the case of a rollback, the items do not need to be re-read +from the reader. However, there are certain scenarios in which the reader is built on +top of a transactional resource, such as a JMS queue. In this case, since the queue is +tied to the transaction that is rolled back, the messages that have been pulled from the +queue are put back on. For this reason, the step can be configured to not buffer the +items. + +The following example shows how to create reader that does not buffer items in XML: + +XML Configuration + +``` + + + + + +``` + +The following example shows how to create reader that does not buffer items in Java: + +Java Configuration + +``` +@Bean +public Step step1() { + return this.stepBuilderFactory.get("step1") + .chunk(2) + .reader(itemReader()) + .writer(itemWriter()) + .readerIsTransactionalQueue() + .build(); +} +``` + +#### Transaction Attributes + +Transaction attributes can be used to control the `isolation`, `propagation`, and`timeout` settings. More information on setting transaction attributes can be found in +the[Spring +core documentation](https://docs.spring.io/spring/docs/current/spring-framework-reference/data-access.html#transaction). + +The following example sets the `isolation`, `propagation`, and `timeout` transaction +attributes in XML: + +XML Configuration + +``` + + + + + + +``` + +The following example sets the `isolation`, `propagation`, and `timeout` transaction +attributes in Java: + +Java Configuration + +``` +@Bean +public Step step1() { + DefaultTransactionAttribute attribute = new DefaultTransactionAttribute(); + attribute.setPropagationBehavior(Propagation.REQUIRED.value()); + attribute.setIsolationLevel(Isolation.DEFAULT.value()); + attribute.setTimeout(30); + + return this.stepBuilderFactory.get("step1") + .chunk(2) + .reader(itemReader()) + .writer(itemWriter()) + .transactionAttribute(attribute) + .build(); +} +``` + +#### Registering `ItemStream` with a `Step` + +The step has to take care of `ItemStream` callbacks at the necessary points in its +lifecycle (For more information on the `ItemStream` interface, see[ItemStream](readersAndWriters.html#itemStream)). This is vital if a step fails and might +need to be restarted, because the `ItemStream` interface is where the step gets the +information it needs about persistent state between executions. + +If the `ItemReader`, `ItemProcessor`, or `ItemWriter` itself implements the `ItemStream`interface, then these are registered automatically. Any other streams need to be +registered separately. This is often the case where indirect dependencies, such as +delegates, are injected into the reader and writer. A stream can be registered on the`step` through the 'stream' element. + +The following example shows how to register a `stream` on a `step` in XML: + +XML Configuration + +``` + + + + + + + + + + + + + + + + + + + +``` + +The following example shows how to register a `stream` on a `step` in Java: + +Java Configuration + +``` +@Bean +public Step step1() { + return this.stepBuilderFactory.get("step1") + .chunk(2) + .reader(itemReader()) + .writer(compositeItemWriter()) + .stream(fileItemWriter1()) + .stream(fileItemWriter2()) + .build(); +} + +/** + * In Spring Batch 4, the CompositeItemWriter implements ItemStream so this isn't + * necessary, but used for an example. + */ +@Bean +public CompositeItemWriter compositeItemWriter() { + List writers = new ArrayList<>(2); + writers.add(fileItemWriter1()); + writers.add(fileItemWriter2()); + + CompositeItemWriter itemWriter = new CompositeItemWriter(); + + itemWriter.setDelegates(writers); + + return itemWriter; +} +``` + +In the example above, the `CompositeItemWriter` is not an `ItemStream`, but both of its +delegates are. Therefore, both delegate writers must be explicitly registered as streams +in order for the framework to handle them correctly. The `ItemReader` does not need to be +explicitly registered as a stream because it is a direct property of the `Step`. The step +is now restartable, and the state of the reader and writer is correctly persisted in the +event of a failure. + +#### Intercepting `Step` Execution + +Just as with the `Job`, there are many events during the execution of a `Step` where a +user may need to perform some functionality. For example, in order to write out to a flat +file that requires a footer, the `ItemWriter` needs to be notified when the `Step` has +been completed, so that the footer can be written. This can be accomplished with one of many`Step` scoped listeners. + +Any class that implements one of the extensions of `StepListener` (but not that interface +itself since it is empty) can be applied to a step through the `listeners` element. +The `listeners` element is valid inside a step, tasklet, or chunk declaration. It is +recommended that you declare the listeners at the level at which its function applies, +or, if it is multi-featured (such as `StepExecutionListener` and `ItemReadListener`), +then declare it at the most granular level where it applies. + +The following example shows a listener applied at the chunk level in XML: + +XML Configuration + +``` + + + + + + + + +``` + +The following example shows a listener applied at the chunk level in Java: + +Java Configuration + +``` +@Bean +public Step step1() { + return this.stepBuilderFactory.get("step1") + .chunk(10) + .reader(reader()) + .writer(writer()) + .listener(chunkListener()) + .build(); +} +``` + +An `ItemReader`, `ItemWriter` or `ItemProcessor` that itself implements one of the`StepListener` interfaces is registered automatically with the `Step` if using the +namespace `` element or one of the `*StepFactoryBean` factories. This only +applies to components directly injected into the `Step`. If the listener is nested inside +another component, it needs to be explicitly registered (as described previously under[Registering `ItemStream` with a `Step`](#registeringItemStreams)). + +In addition to the `StepListener` interfaces, annotations are provided to address the +same concerns. Plain old Java objects can have methods with these annotations that are +then converted into the corresponding `StepListener` type. It is also common to annotate +custom implementations of chunk components such as `ItemReader` or `ItemWriter` or`Tasklet`. The annotations are analyzed by the XML parser for the `` elements +as well as registered with the `listener` methods in the builders, so all you need to do +is use the XML namespace or builders to register the listeners with a step. + +##### `StepExecutionListener` + +`StepExecutionListener` represents the most generic listener for `Step` execution. It +allows for notification before a `Step` is started and after it ends, whether it ended +normally or failed, as shown in the following example: + +``` +public interface StepExecutionListener extends StepListener { + + void beforeStep(StepExecution stepExecution); + + ExitStatus afterStep(StepExecution stepExecution); + +} +``` + +`ExitStatus` is the return type of `afterStep` in order to allow listeners the chance to +modify the exit code that is returned upon completion of a `Step`. + +The annotations corresponding to this interface are: + +* `@BeforeStep` + +* `@AfterStep` + +##### `ChunkListener` + +A chunk is defined as the items processed within the scope of a transaction. Committing a +transaction, at each commit interval, commits a 'chunk'. A `ChunkListener` can be used to +perform logic before a chunk begins processing or after a chunk has completed +successfully, as shown in the following interface definition: + +``` +public interface ChunkListener extends StepListener { + + void beforeChunk(ChunkContext context); + void afterChunk(ChunkContext context); + void afterChunkError(ChunkContext context); + +} +``` + +The beforeChunk method is called after the transaction is started but before read is +called on the `ItemReader`. Conversely, `afterChunk` is called after the chunk has been +committed (and not at all if there is a rollback). + +The annotations corresponding to this interface are: + +* `@BeforeChunk` + +* `@AfterChunk` + +* `@AfterChunkError` + +A `ChunkListener` can be applied when there is no chunk declaration. The `TaskletStep` is +responsible for calling the `ChunkListener`, so it applies to a non-item-oriented tasklet +as well (it is called before and after the tasklet). + +##### `ItemReadListener` + +When discussing skip logic previously, it was mentioned that it may be beneficial to log +the skipped records, so that they can be dealt with later. In the case of read errors, +this can be done with an `ItemReaderListener`, as shown in the following interface +definition: + +``` +public interface ItemReadListener extends StepListener { + + void beforeRead(); + void afterRead(T item); + void onReadError(Exception ex); + +} +``` + +The `beforeRead` method is called before each call to read on the `ItemReader`. The`afterRead` method is called after each successful call to read and is passed the item +that was read. If there was an error while reading, the `onReadError` method is called. +The exception encountered is provided so that it can be logged. + +The annotations corresponding to this interface are: + +* `@BeforeRead` + +* `@AfterRead` + +* `@OnReadError` + +##### `ItemProcessListener` + +Just as with the `ItemReadListener`, the processing of an item can be 'listened' to, as +shown in the following interface definition: + +``` +public interface ItemProcessListener extends StepListener { + + void beforeProcess(T item); + void afterProcess(T item, S result); + void onProcessError(T item, Exception e); + +} +``` + +The `beforeProcess` method is called before `process` on the `ItemProcessor` and is +handed the item that is to be processed. The `afterProcess` method is called after the +item has been successfully processed. If there was an error while processing, the`onProcessError` method is called. The exception encountered and the item that was +attempted to be processed are provided, so that they can be logged. + +The annotations corresponding to this interface are: + +* `@BeforeProcess` + +* `@AfterProcess` + +* `@OnProcessError` + +##### `ItemWriteListener` + +The writing of an item can be 'listened' to with the `ItemWriteListener`, as shown in the +following interface definition: + +``` +public interface ItemWriteListener extends StepListener { + + void beforeWrite(List items); + void afterWrite(List items); + void onWriteError(Exception exception, List items); + +} +``` + +The `beforeWrite` method is called before `write` on the `ItemWriter` and is handed the +list of items that is written. The `afterWrite` method is called after the item has been +successfully written. If there was an error while writing, the `onWriteError` method is +called. The exception encountered and the item that was attempted to be written are +provided, so that they can be logged. + +The annotations corresponding to this interface are: + +* `@BeforeWrite` + +* `@AfterWrite` + +* `@OnWriteError` + +##### `SkipListener` + +`ItemReadListener`, `ItemProcessListener`, and `ItemWriteListener` all provide mechanisms +for being notified of errors, but none informs you that a record has actually been +skipped. `onWriteError`, for example, is called even if an item is retried and +successful. For this reason, there is a separate interface for tracking skipped items, as +shown in the following interface definition: + +``` +public interface SkipListener extends StepListener { + + void onSkipInRead(Throwable t); + void onSkipInProcess(T item, Throwable t); + void onSkipInWrite(S item, Throwable t); + +} +``` + +`onSkipInRead` is called whenever an item is skipped while reading. It should be noted +that rollbacks may cause the same item to be registered as skipped more than once.`onSkipInWrite` is called when an item is skipped while writing. Because the item has +been read successfully (and not skipped), it is also provided the item itself as an +argument. + +The annotations corresponding to this interface are: + +* `@OnSkipInRead` + +* `@OnSkipInWrite` + +* `@OnSkipInProcess` + +###### SkipListeners and Transactions + +One of the most common use cases for a `SkipListener` is to log out a skipped item, so +that another batch process or even human process can be used to evaluate and fix the +issue leading to the skip. Because there are many cases in which the original transaction +may be rolled back, Spring Batch makes two guarantees: + +1. The appropriate skip method (depending on when the error happened) is called only once + per item. + +2. The `SkipListener` is always called just before the transaction is committed. This is + to ensure that any transactional resources call by the listener are not rolled back by a + failure within the `ItemWriter`. + +### `TaskletStep` + +[Chunk-oriented processing](#chunkOrientedProcessing) is not the only way to process in a`Step`. What if a `Step` must consist of a simple stored procedure call? You could +implement the call as an `ItemReader` and return null after the procedure finishes. +However, doing so is a bit unnatural, since there would need to be a no-op `ItemWriter`. +Spring Batch provides the `TaskletStep` for this scenario. + +`Tasklet` is a simple interface that has one method, `execute`, which is called +repeatedly by the `TaskletStep` until it either returns `RepeatStatus.FINISHED` or throws +an exception to signal a failure. Each call to a `Tasklet` is wrapped in a transaction.`Tasklet` implementors might call a stored procedure, a script, or a simple SQL update +statement. + +To create a `TaskletStep` in XML, the 'ref' attribute of the `` element should +reference a bean that defines a `Tasklet` object. No `` element should be used +within the ``. The following example shows a simple tasklet: + +``` + + + +``` + +To create a `TaskletStep` in Java, the bean passed to the `tasklet` method of the builder +should implement the `Tasklet` interface. No call to `chunk` should be called when +building a `TaskletStep`. The following example shows a simple tasklet: + +``` +@Bean +public Step step1() { + return this.stepBuilderFactory.get("step1") + .tasklet(myTasklet()) + .build(); +} +``` + +| |`TaskletStep` automatically registers the
tasklet as a `StepListener` if it implements the `StepListener`interface.| +|---|-----------------------------------------------------------------------------------------------------------------------| + +#### `TaskletAdapter` + +As with other adapters for the `ItemReader` and `ItemWriter` interfaces, the `Tasklet`interface contains an implementation that allows for adapting itself to any pre-existing +class: `TaskletAdapter`. An example where this may be useful is an existing DAO that is +used to update a flag on a set of records. The `TaskletAdapter` can be used to call this +class without having to write an adapter for the `Tasklet` interface. + +The following example shows how to define a `TaskletAdapter` in XML: + +XML Configuration + +``` + + + + + + +``` + +The following example shows how to define a `TaskletAdapter` in Java: + +Java Configuration + +``` +@Bean +public MethodInvokingTaskletAdapter myTasklet() { + MethodInvokingTaskletAdapter adapter = new MethodInvokingTaskletAdapter(); + + adapter.setTargetObject(fooDao()); + adapter.setTargetMethod("updateFoo"); + + return adapter; +} +``` + +#### Example `Tasklet` Implementation + +Many batch jobs contain steps that must be done before the main processing begins in +order to set up various resources or after processing has completed to cleanup those +resources. In the case of a job that works heavily with files, it is often necessary to +delete certain files locally after they have been uploaded successfully to another +location. The following example (taken from the[Spring +Batch samples project](https://github.com/spring-projects/spring-batch/tree/master/spring-batch-samples)) is a `Tasklet` implementation with just such a responsibility: + +``` +public class FileDeletingTasklet implements Tasklet, InitializingBean { + + private Resource directory; + + public RepeatStatus execute(StepContribution contribution, + ChunkContext chunkContext) throws Exception { + File dir = directory.getFile(); + Assert.state(dir.isDirectory()); + + File[] files = dir.listFiles(); + for (int i = 0; i < files.length; i++) { + boolean deleted = files[i].delete(); + if (!deleted) { + throw new UnexpectedJobExecutionException("Could not delete file " + + files[i].getPath()); + } + } + return RepeatStatus.FINISHED; + } + + public void setDirectoryResource(Resource directory) { + this.directory = directory; + } + + public void afterPropertiesSet() throws Exception { + Assert.notNull(directory, "directory must be set"); + } +} +``` + +The preceding `tasklet` implementation deletes all files within a given directory. It +should be noted that the `execute` method is called only once. All that is left is to +reference the `tasklet` from the `step`. + +The following example shows how to reference the `tasklet` from the `step` in XML: + +XML Configuration + +``` + + + + + + + + + + + + + +``` + +The following example shows how to reference the `tasklet` from the `step` in Java: + +Java Configuration + +``` +@Bean +public Job taskletJob() { + return this.jobBuilderFactory.get("taskletJob") + .start(deleteFilesInDir()) + .build(); +} + +@Bean +public Step deleteFilesInDir() { + return this.stepBuilderFactory.get("deleteFilesInDir") + .tasklet(fileDeletingTasklet()) + .build(); +} + +@Bean +public FileDeletingTasklet fileDeletingTasklet() { + FileDeletingTasklet tasklet = new FileDeletingTasklet(); + + tasklet.setDirectoryResource(new FileSystemResource("target/test-outputs/test-dir")); + + return tasklet; +} +``` + +### Controlling Step Flow + +With the ability to group steps together within an owning job comes the need to be able +to control how the job "flows" from one step to another. The failure of a `Step` does not +necessarily mean that the `Job` should fail. Furthermore, there may be more than one type +of 'success' that determines which `Step` should be executed next. Depending upon how a +group of `Steps` is configured, certain steps may not even be processed at all. + +#### Sequential Flow + +The simplest flow scenario is a job where all of the steps execute sequentially, as shown +in the following image: + +![Sequential Flow](https://docs.spring.io/spring-batch/docs/current/reference/html/images/sequential-flow.png) + +Figure 4. Sequential Flow + +This can be achieved by using the 'next' in a `step`. + +The following example shows how to use the `next` attribute in XML: + +XML Configuration + +``` + + + + + +``` + +The following example shows how to use the `next()` method in Java: + +Java Configuration + +``` +@Bean +public Job job() { + return this.jobBuilderFactory.get("job") + .start(stepA()) + .next(stepB()) + .next(stepC()) + .build(); +} +``` + +In the scenario above, 'step A' runs first because it is the first `Step` listed. If +'step A' completes normally, then 'step B' runs, and so on. However, if 'step A' fails, +then the entire `Job` fails and 'step B' does not execute. + +| |With the Spring Batch XML namespace, the first step listed in the configuration is*always* the first step run by the `Job`. The order of the other step elements does not
matter, but the first step must always appear first in the xml.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### Conditional Flow + +In the example above, there are only two possibilities: + +1. The `step` is successful and the next `step` should be executed. + +2. The `step` failed and, thus, the `job` should fail. + +In many cases, this may be sufficient. However, what about a scenario in which the +failure of a `step` should trigger a different `step`, rather than causing failure? The +following image shows such a flow: + +![Conditional Flow](https://docs.spring.io/spring-batch/docs/current/reference/html/images/conditional-flow.png) + +Figure 5. Conditional Flow + +In order to handle more complex scenarios, the Spring Batch XML namespace allows transitions +elements to be defined within the step element. One such transition is the `next`element. Like the `next` attribute, the `next` element tells the `Job` which `Step` to +execute next. However, unlike the attribute, any number of `next` elements are allowed on +a given `Step`, and there is no default behavior in the case of failure. This means that, if +transition elements are used, then all of the behavior for the `Step` transitions must be +defined explicitly. Note also that a single step cannot have both a `next` attribute and +a `transition` element. + +The `next` element specifies a pattern to match and the step to execute next, as shown in +the following example: + +XML Configuration + +``` + + + + + + + + +``` + +The Java API offers a fluent set of methods that let you specify the flow and what to do +when a step fails. The following example shows how to specify one step (`stepA`) and then +proceed to either of two different steps (`stepB` and `stepC`), depending on whether`stepA` succeeds: + +Java Configuration + +``` +@Bean +public Job job() { + return this.jobBuilderFactory.get("job") + .start(stepA()) + .on("*").to(stepB()) + .from(stepA()).on("FAILED").to(stepC()) + .end() + .build(); +} +``` + +When using XML configuration, the `on` attribute of a transition element uses a simple +pattern-matching scheme to match the `ExitStatus` that results from the execution of the`Step`. + +When using java configuration, the `on()` method uses a simple pattern-matching scheme to +match the `ExitStatus` that results from the execution of the `Step`. + +Only two special characters are allowed in the pattern: + +* "\*" matches zero or more characters + +* "?" matches exactly one character + +For example, "c\*t" matches "cat" and "count", while "c?t" matches "cat" but not "count". + +While there is no limit to the number of transition elements on a `Step`, if the `Step`execution results in an `ExitStatus` that is not covered by an element, then the +framework throws an exception and the `Job` fails. The framework automatically orders +transitions from most specific to least specific. This means that, even if the ordering +were swapped for "stepA" in the example above, an `ExitStatus` of "FAILED" would still go +to "stepC". + +##### Batch Status Versus Exit Status + +When configuring a `Job` for conditional flow, it is important to understand the +difference between `BatchStatus` and `ExitStatus`. `BatchStatus` is an enumeration that +is a property of both `JobExecution` and `StepExecution` and is used by the framework to +record the status of a `Job` or `Step`. It can be one of the following values:`COMPLETED`, `STARTING`, `STARTED`, `STOPPING`, `STOPPED`, `FAILED`, `ABANDONED`, or`UNKNOWN`. Most of them are self explanatory: `COMPLETED` is the status set when a step +or job has completed successfully, `FAILED` is set when it fails, and so on. + +The following example contains the 'next' element when using XML configuration: + +``` + +``` + +The following example contains the 'on' element when using Java Configuration: + +``` +... +.from(stepA()).on("FAILED").to(stepB()) +... +``` + +At first glance, it would appear that 'on' references the `BatchStatus` of the `Step` to +which it belongs. However, it actually references the `ExitStatus` of the `Step`. As the +name implies, `ExitStatus` represents the status of a `Step` after it finishes execution. + +More specifically, when using XML configuration, the 'next' element shown in the +preceding XML configuration example references the exit code of `ExitStatus`. + +When using Java configuration, the 'on()' method shown in the preceding +Java configuration example references the exit code of `ExitStatus`. + +In English, it says: "go to stepB if the exit code is `FAILED` ". By default, the exit +code is always the same as the `BatchStatus` for the `Step`, which is why the entry above +works. However, what if the exit code needs to be different? A good example comes from +the skip sample job within the samples project: + +The following example shows how to work with a different exit code in XML: + +XML Configuration + +``` + + + + + +``` + +The following example shows how to work with a different exit code in Java: + +Java Configuration + +``` +@Bean +public Job job() { + return this.jobBuilderFactory.get("job") + .start(step1()).on("FAILED").end() + .from(step1()).on("COMPLETED WITH SKIPS").to(errorPrint1()) + .from(step1()).on("*").to(step2()) + .end() + .build(); +} +``` + +`step1` has three possibilities: + +1. The `Step` failed, in which case the job should fail. + +2. The `Step` completed successfully. + +3. The `Step` completed successfully but with an exit code of 'COMPLETED WITH SKIPS'. In + this case, a different step should be run to handle the errors. + +The preceding configuration works. However, something needs to change the exit code based on +the condition of the execution having skipped records, as shown in the following example: + +``` +public class SkipCheckingListener extends StepExecutionListenerSupport { + public ExitStatus afterStep(StepExecution stepExecution) { + String exitCode = stepExecution.getExitStatus().getExitCode(); + if (!exitCode.equals(ExitStatus.FAILED.getExitCode()) && + stepExecution.getSkipCount() > 0) { + return new ExitStatus("COMPLETED WITH SKIPS"); + } + else { + return null; + } + } +} +``` + +The above code is a `StepExecutionListener` that first checks to make sure the `Step` was +successful and then checks to see if the skip count on the `StepExecution` is higher than +0. If both conditions are met, a new `ExitStatus` with an exit code of`COMPLETED WITH SKIPS` is returned. + +#### Configuring for Stop + +After the discussion of [BatchStatus and ExitStatus](#batchStatusVsExitStatus), +one might wonder how the `BatchStatus` and `ExitStatus` are determined for the `Job`. +While these statuses are determined for the `Step` by the code that is executed, the +statuses for the `Job` are determined based on the configuration. + +So far, all of the job configurations discussed have had at least one final `Step` with +no transitions. + +In the following XML example, after the `step` executes, the `Job` ends: + +``` + +``` + +In the following Java example, after the `step` executes, the `Job` ends: + +``` +@Bean +public Job job() { + return this.jobBuilderFactory.get("job") + .start(step1()) + .build(); +} +``` + +If no transitions are defined for a `Step`, then the status of the `Job` is defined as +follows: + +* If the `Step` ends with `ExitStatus` FAILED, then the `BatchStatus` and `ExitStatus` of + the `Job` are both `FAILED`. + +* Otherwise, the `BatchStatus` and `ExitStatus` of the `Job` are both `COMPLETED`. + +While this method of terminating a batch job is sufficient for some batch jobs, such as a +simple sequential step job, custom defined job-stopping scenarios may be required. For +this purpose, Spring Batch provides three transition elements to stop a `Job` (in +addition to the [`next` element](#nextElement) that we discussed previously). +Each of these stopping elements stops a `Job` with a particular `BatchStatus`. It is +important to note that the stop transition elements have no effect on either the`BatchStatus` or `ExitStatus` of any `Steps` in the `Job`. These elements affect only the +final statuses of the `Job`. For example, it is possible for every step in a job to have +a status of `FAILED` but for the job to have a status of `COMPLETED`. + +##### Ending at a Step + +Configuring a step end instructs a `Job` to stop with a `BatchStatus` of `COMPLETED`. A`Job` that has finished with status `COMPLETED` cannot be restarted (the framework throws +a `JobInstanceAlreadyCompleteException`). + +When using XML configuration, the 'end' element is used for this task. The `end` element +also allows for an optional 'exit-code' attribute that can be used to customize the`ExitStatus` of the `Job`. If no 'exit-code' attribute is given, then the `ExitStatus` is`COMPLETED` by default, to match the `BatchStatus`. + +When using Java configuration, the 'end' method is used for this task. The `end` method +also allows for an optional 'exitStatus' parameter that can be used to customize the`ExitStatus` of the `Job`. If no 'exitStatus' value is provided, then the `ExitStatus` is`COMPLETED` by default, to match the `BatchStatus`. + +Consider the following scenario: if `step2` fails, then the `Job` stops with a`BatchStatus` of `COMPLETED` and an `ExitStatus` of `COMPLETED` and `step3` does not run. +Otherwise, execution moves to `step3`. Note that if `step2` fails, the `Job` is not +restartable (because the status is `COMPLETED`). + +The following example shows the scenario in XML: + +``` + + + + + + + + +``` + +The following example shows the scenario in Java: + +``` +@Bean +public Job job() { + return this.jobBuilderFactory.get("job") + .start(step1()) + .next(step2()) + .on("FAILED").end() + .from(step2()).on("*").to(step3()) + .end() + .build(); +} +``` + +##### Failing a Step + +Configuring a step to fail at a given point instructs a `Job` to stop with a`BatchStatus` of `FAILED`. Unlike end, the failure of a `Job` does not prevent the `Job`from being restarted. + +When using XML configuration, the 'fail' element also allows for an optional 'exit-code' +attribute that can be used to customize the `ExitStatus` of the `Job`. If no 'exit-code' +attribute is given, then the `ExitStatus` is `FAILED` by default, to match the`BatchStatus`. + +Consider the following scenario if `step2` fails, then the `Job` stops with a`BatchStatus` of `FAILED` and an `ExitStatus` of `EARLY TERMINATION` and `step3` does not +execute. Otherwise, execution moves to `step3`. Additionally, if `step2` fails and the`Job` is restarted, then execution begins again on `step2`. + +The following example shows the scenario in XML: + +XML Configuration + +``` + + + + + + + + +``` + +The following example shows the scenario in Java: + +Java Configuration + +``` +@Bean +public Job job() { + return this.jobBuilderFactory.get("job") + .start(step1()) + .next(step2()).on("FAILED").fail() + .from(step2()).on("*").to(step3()) + .end() + .build(); +} +``` + +##### Stopping a Job at a Given Step + +Configuring a job to stop at a particular step instructs a `Job` to stop with a`BatchStatus` of `STOPPED`. Stopping a `Job` can provide a temporary break in processing, +so that the operator can take some action before restarting the `Job`. + +When using XML configuration, a 'stop' element requires a 'restart' attribute that specifies +the step where execution should pick up when the Job is restarted. + +When using Java configuration, the `stopAndRestart` method requires a 'restart' attribute +that specifies the step where execution should pick up when the Job is restarted. + +Consider the following scenario: if `step1` finishes with `COMPLETE`, then the job then +stops. Once it is restarted, execution begins on `step2`. + +The following listing shows the scenario in XML: + +``` + + + + + +``` + +The following example shows the scenario in Java: + +``` +@Bean +public Job job() { + return this.jobBuilderFactory.get("job") + .start(step1()).on("COMPLETED").stopAndRestart(step2()) + .end() + .build(); +} +``` + +#### Programmatic Flow Decisions + +In some situations, more information than the `ExitStatus` may be required to decide +which step to execute next. In this case, a `JobExecutionDecider` can be used to assist +in the decision, as shown in the following example: + +``` +public class MyDecider implements JobExecutionDecider { + public FlowExecutionStatus decide(JobExecution jobExecution, StepExecution stepExecution) { + String status; + if (someCondition()) { + status = "FAILED"; + } + else { + status = "COMPLETED"; + } + return new FlowExecutionStatus(status); + } +} +``` + +In the following sample job configuration, a `decision` specifies the decider to use as +well as all of the transitions: + +XML Configuration + +``` + + + + + + + + + + + + + +``` + +In the following example, a bean implementing the `JobExecutionDecider` is passed +directly to the `next` call when using Java configuration. + +Java Configuration + +``` +@Bean +public Job job() { + return this.jobBuilderFactory.get("job") + .start(step1()) + .next(decider()).on("FAILED").to(step2()) + .from(decider()).on("COMPLETED").to(step3()) + .end() + .build(); +} +``` + +#### Split Flows + +Every scenario described so far has involved a `Job` that executes its steps one at a +time in a linear fashion. In addition to this typical style, Spring Batch also allows +for a job to be configured with parallel flows. + +The XML namespace allows you to use the 'split' element. As the following example shows, +the 'split' element contains one or more 'flow' elements, where entire separate flows can +be defined. A 'split' element may also contain any of the previously discussed transition +elements, such as the 'next' attribute or the 'next', 'end' or 'fail' elements. + +``` + + + + + + + + + + +``` + +Java based configuration lets you configure splits through the provided builders. As the +following example shows, the 'split' element contains one or more 'flow' elements, where +entire separate flows can be defined. A 'split' element may also contain any of the +previously discussed transition elements, such as the 'next' attribute or the 'next', +'end' or 'fail' elements. + +``` +@Bean +public Flow flow1() { + return new FlowBuilder("flow1") + .start(step1()) + .next(step2()) + .build(); +} + +@Bean +public Flow flow2() { + return new FlowBuilder("flow2") + .start(step3()) + .build(); +} + +@Bean +public Job job(Flow flow1, Flow flow2) { + return this.jobBuilderFactory.get("job") + .start(flow1) + .split(new SimpleAsyncTaskExecutor()) + .add(flow2) + .next(step4()) + .end() + .build(); +} +``` + +#### Externalizing Flow Definitions and Dependencies Between Jobs + +Part of the flow in a job can be externalized as a separate bean definition and then +re-used. There are two ways to do so. The first is to simply declare the flow as a +reference to one defined elsewhere. + +The following example shows how to declare a flow as a reference to a flow defined +elsewhere in XML: + +XML Configuration + +``` + + + + + + + + + +``` + +The following example shows how to declare a flow as a reference to a flow defined +elsewhere in Java: + +Java Configuration + +``` +@Bean +public Job job() { + return this.jobBuilderFactory.get("job") + .start(flow1()) + .next(step3()) + .end() + .build(); +} + +@Bean +public Flow flow1() { + return new FlowBuilder("flow1") + .start(step1()) + .next(step2()) + .build(); +} +``` + +The effect of defining an external flow as shown in the preceding example is to insert +the steps from the external flow into the job as if they had been declared inline. In +this way, many jobs can refer to the same template flow and compose such templates into +different logical flows. This is also a good way to separate the integration testing of +the individual flows. + +The other form of an externalized flow is to use a `JobStep`. A `JobStep` is similar to a`FlowStep` but actually creates and launches a separate job execution for the steps in +the flow specified. + +The following example hows an example of a `JobStep` in XML: + +XML Configuration + +``` + + + + + + +... + + + + +``` + +The following example shows an example of a `JobStep` in Java: + +Java Configuration + +``` +@Bean +public Job jobStepJob() { + return this.jobBuilderFactory.get("jobStepJob") + .start(jobStepJobStep1(null)) + .build(); +} + +@Bean +public Step jobStepJobStep1(JobLauncher jobLauncher) { + return this.stepBuilderFactory.get("jobStepJobStep1") + .job(job()) + .launcher(jobLauncher) + .parametersExtractor(jobParametersExtractor()) + .build(); +} + +@Bean +public Job job() { + return this.jobBuilderFactory.get("job") + .start(step1()) + .build(); +} + +@Bean +public DefaultJobParametersExtractor jobParametersExtractor() { + DefaultJobParametersExtractor extractor = new DefaultJobParametersExtractor(); + + extractor.setKeys(new String[]{"input.file"}); + + return extractor; +} +``` + +The job parameters extractor is a strategy that determines how the `ExecutionContext` for +the `Step` is converted into `JobParameters` for the `Job` that is run. The `JobStep` is +useful when you want to have some more granular options for monitoring and reporting on +jobs and steps. Using `JobStep` is also often a good answer to the question: "How do I +create dependencies between jobs?" It is a good way to break up a large system into +smaller modules and control the flow of jobs. + +### Late Binding of `Job` and `Step` Attributes + +Both the XML and flat file examples shown earlier use the Spring `Resource` abstraction +to obtain a file. This works because `Resource` has a `getFile` method, which returns a`java.io.File`. Both XML and flat file resources can be configured using standard Spring +constructs: + +The following example shows late binding in XML: + +XML Configuration + +``` + + + +``` + +The following example shows late binding in Java: + +Java Configuration + +``` +@Bean +public FlatFileItemReader flatFileItemReader() { + FlatFileItemReader reader = new FlatFileItemReaderBuilder() + .name("flatFileItemReader") + .resource(new FileSystemResource("file://outputs/file.txt")) + ... +} +``` + +The preceding `Resource` loads the file from the specified file system location. Note +that absolute locations have to start with a double slash (`//`). In most Spring +applications, this solution is good enough, because the names of these resources are +known at compile time. However, in batch scenarios, the file name may need to be +determined at runtime as a parameter to the job. This can be solved using '-D' parameters +to read a system property. + +The following example shows how to read a file name from a property in XML: + +XML Configuration + +``` + + + +``` + +The following shows how to read a file name from a property in Java: + +Java Configuration + +``` +@Bean +public FlatFileItemReader flatFileItemReader(@Value("${input.file.name}") String name) { + return new FlatFileItemReaderBuilder() + .name("flatFileItemReader") + .resource(new FileSystemResource(name)) + ... +} +``` + +All that would be required for this solution to work would be a system argument (such as`-Dinput.file.name="file://outputs/file.txt"`). + +| |Although a `PropertyPlaceholderConfigurer` can be used here, it is not
necessary if the system property is always set because the `ResourceEditor` in Spring
already filters and does placeholder replacement on system properties.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Often, in a batch setting, it is preferable to parametrize the file name in the`JobParameters` of the job, instead of through system properties, and access them that +way. To accomplish this, Spring Batch allows for the late binding of various `Job` and`Step` attributes. + +The following example shows how to parameterize a file name in XML: + +XML Configuration + +``` + + + +``` + +The following example shows how to parameterize a file name in Java: + +Java Configuration + +``` +@StepScope +@Bean +public FlatFileItemReader flatFileItemReader(@Value("#{jobParameters['input.file.name']}") String name) { + return new FlatFileItemReaderBuilder() + .name("flatFileItemReader") + .resource(new FileSystemResource(name)) + ... +} +``` + +Both the `JobExecution` and `StepExecution` level `ExecutionContext` can be accessed in +the same way. + +The following example shows how to access the `ExecutionContext` in XML: + +XML Configuration + +``` + + + +``` + +XML Configuration + +``` + + + +``` + +The following example shows how to access the `ExecutionContext` in Java: + +Java Configuration + +``` +@StepScope +@Bean +public FlatFileItemReader flatFileItemReader(@Value("#{jobExecutionContext['input.file.name']}") String name) { + return new FlatFileItemReaderBuilder() + .name("flatFileItemReader") + .resource(new FileSystemResource(name)) + ... +} +``` + +Java Configuration + +``` +@StepScope +@Bean +public FlatFileItemReader flatFileItemReader(@Value("#{stepExecutionContext['input.file.name']}") String name) { + return new FlatFileItemReaderBuilder() + .name("flatFileItemReader") + .resource(new FileSystemResource(name)) + ... +} +``` + +| |Any bean that uses late-binding must be declared with scope="step". See[Step Scope](#step-scope) for more information. It should be noted
that a `Step` bean should not be step-scoped. If late-binding is needed in a step
definition, the components of that step (ie tasklet, item reader/writer, etc)
are the ones that should be scoped instead.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |If you are using Spring 3.0 (or above), the expressions in step-scoped beans are in the
Spring Expression Language, a powerful general purpose language with many interesting
features. To provide backward compatibility, if Spring Batch detects the presence of
older versions of Spring, it uses a native expression language that is less powerful and
that has slightly different parsing rules. The main difference is that the map keys in
the example above do not need to be quoted with Spring 2.5, but the quotes are mandatory
in Spring 3.0.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### Step Scope + +All of the late binding examples shown earlier have a scope of “step” declared on the +bean definition. + +The following example shows an example of binding to step scope in XML: + +XML Configuration + +``` + + + +``` + +The following example shows an example of binding to step scope in Java: + +Java Configuration + +``` +@StepScope +@Bean +public FlatFileItemReader flatFileItemReader(@Value("#{jobParameters[input.file.name]}") String name) { + return new FlatFileItemReaderBuilder() + .name("flatFileItemReader") + .resource(new FileSystemResource(name)) + ... +} +``` + +Using a scope of `Step` is required in order to use late binding, because the bean cannot +actually be instantiated until the `Step` starts, to allow the attributes to be found. +Because it is not part of the Spring container by default, the scope must be added +explicitly, by using the `batch` namespace or by including a bean definition explicitly +for the `StepScope`, or by using the `@EnableBatchProcessing` annotation. Use only one of +those methods. The following example uses the `batch` namespace: + +``` + + +... + +``` + +The following example includes the bean definition explicitly: + +``` + +``` + +#### Job Scope + +`Job` scope, introduced in Spring Batch 3.0, is similar to `Step` scope in configuration +but is a Scope for the `Job` context, so that there is only one instance of such a bean +per running job. Additionally, support is provided for late binding of references +accessible from the `JobContext` using `#{..}` placeholders. Using this feature, bean +properties can be pulled from the job or job execution context and the job parameters. + +The following example shows an example of binding to job scope in XML: + +XML Configuration + +``` + + + +``` + +XML Configuration + +``` + + + +``` + +The following example shows an example of binding to job scope in Java: + +Java Configurtation + +``` +@JobScope +@Bean +public FlatFileItemReader flatFileItemReader(@Value("#{jobParameters[input]}") String name) { + return new FlatFileItemReaderBuilder() + .name("flatFileItemReader") + .resource(new FileSystemResource(name)) + ... +} +``` + +Java Configuration + +``` +@JobScope +@Bean +public FlatFileItemReader flatFileItemReader(@Value("#{jobExecutionContext['input.name']}") String name) { + return new FlatFileItemReaderBuilder() + .name("flatFileItemReader") + .resource(new FileSystemResource(name)) + ... +} +``` + +Because it is not part of the Spring container by default, the scope must be added +explicitly, by using the `batch` namespace, by including a bean definition explicitly for +the JobScope, or using the `@EnableBatchProcessing` annotation (but not all of them). +The following example uses the `batch` namespace: + +``` + + + +... + +``` + +The following example includes a bean that explicitly defines the `JobScope`: + +``` + +``` + +| |There are some practical limitations of using job-scoped beans in multi-threaded
or partitioned steps. Spring Batch does not control the threads spawned in these
use cases, so it is not possible to set them up correctly to use such beans. Hence,
it is not recommended to use job-scoped beans in multi-threaded or partitioned steps.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| \ No newline at end of file diff --git a/docs/en/spring-batch/testing.md b/docs/en/spring-batch/testing.md new file mode 100644 index 0000000000000000000000000000000000000000..9a563a9c40f9955268d9261dda8356737d65314a --- /dev/null +++ b/docs/en/spring-batch/testing.md @@ -0,0 +1,324 @@ +# Unit Testing + +## Unit Testing + +XMLJavaBoth + +As with other application styles, it is extremely important to unit test any code written +as part of a batch job. The Spring core documentation covers how to unit and integration +test with Spring in great detail, so it is not be repeated here. It is important, however, +to think about how to 'end to end' test a batch job, which is what this chapter covers. +The spring-batch-test project includes classes that facilitate this end-to-end test +approach. + +### Creating a Unit Test Class + +In order for the unit test to run a batch job, the framework must load the job’s +ApplicationContext. Two annotations are used to trigger this behavior: + +* `@RunWith(SpringJUnit4ClassRunner.class)`: Indicates that the class should use Spring’s + JUnit facilities + +* `@ContextConfiguration(…​)`: Indicates which resources to configure the`ApplicationContext` with. + +Starting from v4.1, it is also possible to inject Spring Batch test utilities +like the `JobLauncherTestUtils` and `JobRepositoryTestUtils` in the test context +using the `@SpringBatchTest` annotation. + +| |It should be noted that `JobLauncherTestUtils` requires a `Job` bean and that`JobRepositoryTestUtils` requires a `DataSource` bean. Since `@SpringBatchTest`registers a `JobLauncherTestUtils` and a `JobRepositoryTestUtils` in the test
context, it is expected that the test context contains a single autowire candidate
for a `Job` and a `DataSource` (either a single bean definition or one that is
annotated with `org.springframework.context.annotation.Primary`).| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The following Java example shows the annotations in use: + +Using Java Configuration + +``` +@SpringBatchTest +@RunWith(SpringRunner.class) +@ContextConfiguration(classes=SkipSampleConfiguration.class) +public class SkipSampleFunctionalTests { ... } +``` + +The following XML example shows the annotations in use: + +Using XML Configuration + +``` +@SpringBatchTest +@RunWith(SpringRunner.class) +@ContextConfiguration(locations = { "/simple-job-launcher-context.xml", + "/jobs/skipSampleJob.xml" }) +public class SkipSampleFunctionalTests { ... } +``` + +### End-To-End Testing of Batch Jobs + +'End To End' testing can be defined as testing the complete run of a batch job from +beginning to end. This allows for a test that sets up a test condition, executes the job, +and verifies the end result. + +Consider an example of a batch job that reads from the database and writes to a flat file. +The test method begins by setting up the database with test data. It clears the CUSTOMER +table and then inserts 10 new records. The test then launches the `Job` by using the`launchJob()` method. The `launchJob()` method is provided by the `JobLauncherTestUtils`class. The `JobLauncherTestUtils` class also provides the `launchJob(JobParameters)`method, which allows the test to give particular parameters. The `launchJob()` method +returns the `JobExecution` object, which is useful for asserting particular information +about the `Job` run. In the following case, the test verifies that the `Job` ended with +status "COMPLETED". + +The following listing shows the example in XML: + +XML Based Configuration + +``` +@SpringBatchTest +@RunWith(SpringRunner.class) +@ContextConfiguration(locations = { "/simple-job-launcher-context.xml", + "/jobs/skipSampleJob.xml" }) +public class SkipSampleFunctionalTests { + + @Autowired + private JobLauncherTestUtils jobLauncherTestUtils; + + private SimpleJdbcTemplate simpleJdbcTemplate; + + @Autowired + public void setDataSource(DataSource dataSource) { + this.simpleJdbcTemplate = new SimpleJdbcTemplate(dataSource); + } + + @Test + public void testJob() throws Exception { + simpleJdbcTemplate.update("delete from CUSTOMER"); + for (int i = 1; i <= 10; i++) { + simpleJdbcTemplate.update("insert into CUSTOMER values (?, 0, ?, 100000)", + i, "customer" + i); + } + + JobExecution jobExecution = jobLauncherTestUtils.launchJob(); + + Assert.assertEquals("COMPLETED", jobExecution.getExitStatus().getExitCode()); + } +} +``` + +The following listing shows the example in Java: + +Java Based Configuration + +``` +@SpringBatchTest +@RunWith(SpringRunner.class) +@ContextConfiguration(classes=SkipSampleConfiguration.class) +public class SkipSampleFunctionalTests { + + @Autowired + private JobLauncherTestUtils jobLauncherTestUtils; + + private SimpleJdbcTemplate simpleJdbcTemplate; + + @Autowired + public void setDataSource(DataSource dataSource) { + this.simpleJdbcTemplate = new SimpleJdbcTemplate(dataSource); + } + + @Test + public void testJob() throws Exception { + simpleJdbcTemplate.update("delete from CUSTOMER"); + for (int i = 1; i <= 10; i++) { + simpleJdbcTemplate.update("insert into CUSTOMER values (?, 0, ?, 100000)", + i, "customer" + i); + } + + JobExecution jobExecution = jobLauncherTestUtils.launchJob(); + + Assert.assertEquals("COMPLETED", jobExecution.getExitStatus().getExitCode()); + } +} +``` + +### Testing Individual Steps + +For complex batch jobs, test cases in the end-to-end testing approach may become +unmanageable. It these cases, it may be more useful to have test cases to test individual +steps on their own. The `AbstractJobTests` class contains a method called `launchStep`, +which takes a step name and runs just that particular `Step`. This approach allows for +more targeted tests letting the test set up data for only that step and to validate its +results directly. The following example shows how to use the `launchStep` method to load a`Step` by name: + +``` +JobExecution jobExecution = jobLauncherTestUtils.launchStep("loadFileStep"); +``` + +### Testing Step-Scoped Components + +Often, the components that are configured for your steps at runtime use step scope and +late binding to inject context from the step or job execution. These are tricky to test as +standalone components, unless you have a way to set the context as if they were in a step +execution. That is the goal of two components in Spring Batch:`StepScopeTestExecutionListener` and `StepScopeTestUtils`. + +The listener is declared at the class level, and its job is to create a step execution +context for each test method, as shown in the following example: + +``` +@ContextConfiguration +@TestExecutionListeners( { DependencyInjectionTestExecutionListener.class, + StepScopeTestExecutionListener.class }) +@RunWith(SpringRunner.class) +public class StepScopeTestExecutionListenerIntegrationTests { + + // This component is defined step-scoped, so it cannot be injected unless + // a step is active... + @Autowired + private ItemReader reader; + + public StepExecution getStepExecution() { + StepExecution execution = MetaDataInstanceFactory.createStepExecution(); + execution.getExecutionContext().putString("input.data", "foo,bar,spam"); + return execution; + } + + @Test + public void testReader() { + // The reader is initialized and bound to the input data + assertNotNull(reader.read()); + } + +} +``` + +There are two `TestExecutionListeners`. One is the regular Spring Test framework, which +handles dependency injection from the configured application context to inject the reader. +The other is the Spring Batch `StepScopeTestExecutionListener`. It works by looking for a +factory method in the test case for a `StepExecution`, using that as the context for the +test method, as if that execution were active in a `Step` at runtime. The factory method +is detected by its signature (it must return a `StepExecution`). If a factory method is +not provided, then a default `StepExecution` is created. + +Starting from v4.1, the `StepScopeTestExecutionListener` and`JobScopeTestExecutionListener` are imported as test execution listeners +if the test class is annotated with `@SpringBatchTest`. The preceding test +example can be configured as follows: + +``` +@SpringBatchTest +@RunWith(SpringRunner.class) +@ContextConfiguration +public class StepScopeTestExecutionListenerIntegrationTests { + + // This component is defined step-scoped, so it cannot be injected unless + // a step is active... + @Autowired + private ItemReader reader; + + public StepExecution getStepExecution() { + StepExecution execution = MetaDataInstanceFactory.createStepExecution(); + execution.getExecutionContext().putString("input.data", "foo,bar,spam"); + return execution; + } + + @Test + public void testReader() { + // The reader is initialized and bound to the input data + assertNotNull(reader.read()); + } + +} +``` + +The listener approach is convenient if you want the duration of the step scope to be the +execution of the test method. For a more flexible but more invasive approach, you can use +the `StepScopeTestUtils`. The following example counts the number of items available in +the reader shown in the previous example: + +``` +int count = StepScopeTestUtils.doInStepScope(stepExecution, + new Callable() { + public Integer call() throws Exception { + + int count = 0; + + while (reader.read() != null) { + count++; + } + return count; + } +}); +``` + +### Validating Output Files + +When a batch job writes to the database, it is easy to query the database to verify that +the output is as expected. However, if the batch job writes to a file, it is equally +important that the output be verified. Spring Batch provides a class called `AssertFile`to facilitate the verification of output files. The method called `assertFileEquals` takes +two `File` objects (or two `Resource` objects) and asserts, line by line, that the two +files have the same content. Therefore, it is possible to create a file with the expected +output and to compare it to the actual result, as shown in the following example: + +``` +private static final String EXPECTED_FILE = "src/main/resources/data/input.txt"; +private static final String OUTPUT_FILE = "target/test-outputs/output.txt"; + +AssertFile.assertFileEquals(new FileSystemResource(EXPECTED_FILE), + new FileSystemResource(OUTPUT_FILE)); +``` + +### Mocking Domain Objects + +Another common issue encountered while writing unit and integration tests for Spring Batch +components is how to mock domain objects. A good example is a `StepExecutionListener`, as +illustrated in the following code snippet: + +``` +public class NoWorkFoundStepExecutionListener extends StepExecutionListenerSupport { + + public ExitStatus afterStep(StepExecution stepExecution) { + if (stepExecution.getReadCount() == 0) { + return ExitStatus.FAILED; + } + return null; + } +} +``` + +The preceding listener example is provided by the framework and checks a `StepExecution`for an empty read count, thus signifying that no work was done. While this example is +fairly simple, it serves to illustrate the types of problems that may be encountered when +attempting to unit test classes that implement interfaces requiring Spring Batch domain +objects. Consider the following unit test for the listener’s in the preceding example: + +``` +private NoWorkFoundStepExecutionListener tested = new NoWorkFoundStepExecutionListener(); + +@Test +public void noWork() { + StepExecution stepExecution = new StepExecution("NoProcessingStep", + new JobExecution(new JobInstance(1L, new JobParameters(), + "NoProcessingJob"))); + + stepExecution.setExitStatus(ExitStatus.COMPLETED); + stepExecution.setReadCount(0); + + ExitStatus exitStatus = tested.afterStep(stepExecution); + assertEquals(ExitStatus.FAILED.getExitCode(), exitStatus.getExitCode()); +} +``` + +Because the Spring Batch domain model follows good object-oriented principles, the`StepExecution` requires a `JobExecution`, which requires a `JobInstance` and`JobParameters`, to create a valid `StepExecution`. While this is good in a solid domain +model, it does make creating stub objects for unit testing verbose. To address this issue, +the Spring Batch test module includes a factory for creating domain objects:`MetaDataInstanceFactory`. Given this factory, the unit test can be updated to be more +concise, as shown in the following example: + +``` +private NoWorkFoundStepExecutionListener tested = new NoWorkFoundStepExecutionListener(); + +@Test +public void testAfterStep() { + StepExecution stepExecution = MetaDataInstanceFactory.createStepExecution(); + + stepExecution.setExitStatus(ExitStatus.COMPLETED); + stepExecution.setReadCount(0); + + ExitStatus exitStatus = tested.afterStep(stepExecution); + assertEquals(ExitStatus.FAILED.getExitCode(), exitStatus.getExitCode()); +} +``` + +The preceding method for creating a simple `StepExecution` is just one convenience method +available within the factory. A full method listing can be found in its[Javadoc](https://docs.spring.io/spring-batch/apidocs/org/springframework/batch/test/MetaDataInstanceFactory.html). \ No newline at end of file diff --git a/docs/en/spring-batch/transaction-appendix.md b/docs/en/spring-batch/transaction-appendix.md new file mode 100644 index 0000000000000000000000000000000000000000..17ecb1280f2d80f63ac59d1195e0faf32d1b41d6 --- /dev/null +++ b/docs/en/spring-batch/transaction-appendix.md @@ -0,0 +1,310 @@ +# Batch Processing and Transactions + +## Appendix A: Batch Processing and Transactions + +### Simple Batching with No Retry + +Consider the following simple example of a nested batch with no retries. It shows a +common scenario for batch processing: An input source is processed until exhausted, and +we commit periodically at the end of a "chunk" of processing. + +``` +1 | REPEAT(until=exhausted) { +| +2 | TX { +3 | REPEAT(size=5) { +3.1 | input; +3.2 | output; +| } +| } +| +| } +``` + +The input operation (3.1) could be a message-based receive (such as from JMS), or a +file-based read, but to recover and continue processing with a chance of completing the +whole job, it must be transactional. The same applies to the operation at 3.2. It must +be either transactional or idempotent. + +If the chunk at `REPEAT` (3) fails because of a database exception at 3.2, then `TX` (2) +must roll back the whole chunk. + +### Simple Stateless Retry + +It is also useful to use a retry for an operation which is not transactional, such as a +call to a web-service or other remote resource, as shown in the following example: + +``` +0 | TX { +1 | input; +1.1 | output; +2 | RETRY { +2.1 | remote access; +| } +| } +``` + +This is actually one of the most useful applications of a retry, since a remote call is +much more likely to fail and be retryable than a database update. As long as the remote +access (2.1) eventually succeeds, the transaction, `TX` (0), commits. If the remote +access (2.1) eventually fails, then the transaction, `TX` (0), is guaranteed to roll +back. + +### Typical Repeat-Retry Pattern + +The most typical batch processing pattern is to add a retry to the inner block of the +chunk, as shown in the following example: + +``` +1 | REPEAT(until=exhausted, exception=not critical) { +| +2 | TX { +3 | REPEAT(size=5) { +| +4 | RETRY(stateful, exception=deadlock loser) { +4.1 | input; +5 | } PROCESS { +5.1 | output; +6 | } SKIP and RECOVER { +| notify; +| } +| +| } +| } +| +| } +``` + +The inner `RETRY` (4) block is marked as "stateful". See [the +typical use case](#transactionsNoRetry) for a description of a stateful retry. This means that if the +retry `PROCESS` (5) block fails, the behavior of the `RETRY` (4) is as follows: + +1. Throw an exception, rolling back the transaction, `TX` (2), at the chunk level, and + allowing the item to be re-presented to the input queue. + +2. When the item re-appears, it might be retried depending on the retry policy in place, + executing `PROCESS` (5) again. The second and subsequent attempts might fail again and + re-throw the exception. + +3. Eventually, the item reappears for the final time. The retry policy disallows another + attempt, so `PROCESS` (5) is never executed. In this case, we follow the `RECOVER` (6) + path, effectively "skipping" the item that was received and is being processed. + +Note that the notation used for the `RETRY` (4) in the plan above explicitly shows that +the input step (4.1) is part of the retry. It also makes clear that there are two +alternate paths for processing: the normal case, as denoted by `PROCESS` (5), and the +recovery path, as denoted in a separate block by `RECOVER` (6). The two alternate paths +are completely distinct. Only one is ever taken in normal circumstances. + +In special cases (such as a special `TransactionValidException` type), the retry policy +might be able to determine that the `RECOVER` (6) path can be taken on the last attempt +after `PROCESS` (5) has just failed, instead of waiting for the item to be re-presented. +This is not the default behavior, because it requires detailed knowledge of what has +happened inside the `PROCESS` (5) block, which is not usually available. For example, if +the output included write access before the failure, then the exception should be +re-thrown to ensure transactional integrity. + +The completion policy in the outer `REPEAT` (1) is crucial to the success of the above +plan. If the output (5.1) fails, it may throw an exception (it usually does, as +described), in which case the transaction, `TX` (2), fails, and the exception could +propagate up through the outer batch `REPEAT` (1). We do not want the whole batch to +stop, because the `RETRY` (4) might still be successful if we try again, so we add`exception=not critical` to the outer `REPEAT` (1). + +Note, however, that if the `TX` (2) fails and we *do* try again, by virtue of the outer +completion policy, the item that is next processed in the inner `REPEAT` (3) is not +guaranteed to be the one that just failed. It might be, but it depends on the +implementation of the input (4.1). Thus, the output (5.1) might fail again on either a +new item or the old one. The client of the batch should not assume that each `RETRY` (4) +attempt is going to process the same items as the last one that failed. For example, if +the termination policy for `REPEAT` (1) is to fail after 10 attempts, it fails after 10 +consecutive attempts but not necessarily at the same item. This is consistent with the +overall retry strategy. The inner `RETRY` (4) is aware of the history of each item and +can decide whether or not to have another attempt at it. + +### Asynchronous Chunk Processing + +The inner batches or chunks in the [typical example](#repeatRetry) can be executed +concurrently by configuring the outer batch to use an `AsyncTaskExecutor`. The outer +batch waits for all the chunks to complete before completing. The following example shows +asynchronous chunk processing: + +``` +1 | REPEAT(until=exhausted, concurrent, exception=not critical) { +| +2 | TX { +3 | REPEAT(size=5) { +| +4 | RETRY(stateful, exception=deadlock loser) { +4.1 | input; +5 | } PROCESS { +| output; +6 | } RECOVER { +| recover; +| } +| +| } +| } +| +| } +``` + +### Asynchronous Item Processing + +The individual items in chunks in the [typical example](#repeatRetry) can also, in +principle, be processed concurrently. In this case, the transaction boundary has to move +to the level of the individual item, so that each transaction is on a single thread, as +shown in the following example: + +``` +1 | REPEAT(until=exhausted, exception=not critical) { +| +2 | REPEAT(size=5, concurrent) { +| +3 | TX { +4 | RETRY(stateful, exception=deadlock loser) { +4.1 | input; +5 | } PROCESS { +| output; +6 | } RECOVER { +| recover; +| } +| } +| +| } +| +| } +``` + +This plan sacrifices the optimization benefit, which the simple plan had, of having all +the transactional resources chunked together. It is only useful if the cost of the +processing (5) is much higher than the cost of transaction management (3). + +### Interactions Between Batching and Transaction Propagation + +There is a tighter coupling between batch-retry and transaction management than we would +ideally like. In particular, a stateless retry cannot be used to retry database +operations with a transaction manager that does not support NESTED propagation. + +The following example uses retry without repeat: + +``` +1 | TX { +| +1.1 | input; +2.2 | database access; +2 | RETRY { +3 | TX { +3.1 | database access; +| } +| } +| +| } +``` + +Again, and for the same reason, the inner transaction, `TX` (3), can cause the outer +transaction, `TX` (1), to fail, even if the `RETRY` (2) is eventually successful. + +Unfortunately, the same effect percolates from the retry block up to the surrounding +repeat batch if there is one, as shown in the following example: + +``` +1 | TX { +| +2 | REPEAT(size=5) { +2.1 | input; +2.2 | database access; +3 | RETRY { +4 | TX { +4.1 | database access; +| } +| } +| } +| +| } +``` + +Now, if TX (3) rolls back, it can pollute the whole batch at TX (1) and force it to roll +back at the end. + +What about non-default propagation? + +* In the preceding example, `PROPAGATION_REQUIRES_NEW` at `TX` (3) prevents the outer`TX` (1) from being polluted if both transactions are eventually successful. But if `TX`(3) commits and `TX` (1) rolls back, then `TX` (3) stays committed, so we violate the + transaction contract for `TX` (1). If `TX` (3) rolls back, `TX` (1) does not necessarily + (but it probably does in practice, because the retry throws a roll back exception). + +* `PROPAGATION_NESTED` at `TX` (3) works as we require in the retry case (and for a + batch with skips): `TX` (3) can commit but subsequently be rolled back by the outer + transaction, `TX` (1). If `TX` (3) rolls back, `TX` (1) rolls back in practice. This + option is only available on some platforms, not including Hibernate or + JTA, but it is the only one that consistently works. + +Consequently, the `NESTED` pattern is best if the retry block contains any database +access. + +### Special Case: Transactions with Orthogonal Resources + +Default propagation is always OK for simple cases where there are no nested database +transactions. Consider the following example, where the `SESSION` and `TX` are not +global `XA` resources, so their resources are orthogonal: + +``` +0 | SESSION { +1 | input; +2 | RETRY { +3 | TX { +3.1 | database access; +| } +| } +| } +``` + +Here there is a transactional message `SESSION` (0), but it does not participate in other +transactions with `PlatformTransactionManager`, so it does not propagate when `TX` (3) +starts. There is no database access outside the `RETRY` (2) block. If `TX` (3) fails and +then eventually succeeds on a retry, `SESSION` (0) can commit (independently of a `TX`block). This is similar to the vanilla "best-efforts-one-phase-commit" scenario. The +worst that can happen is a duplicate message when the `RETRY` (2) succeeds and the`SESSION` (0) cannot commit (for example, because the message system is unavailable). + +### Stateless Retry Cannot Recover + +The distinction between a stateless and a stateful retry in the typical example above is +important. It is actually ultimately a transactional constraint that forces the +distinction, and this constraint also makes it obvious why the distinction exists. + +We start with the observation that there is no way to skip an item that failed and +successfully commit the rest of the chunk unless we wrap the item processing in a +transaction. Consequently, we simplify the typical batch execution plan to be as +follows: + +``` +0 | REPEAT(until=exhausted) { +| +1 | TX { +2 | REPEAT(size=5) { +| +3 | RETRY(stateless) { +4 | TX { +4.1 | input; +4.2 | database access; +| } +5 | } RECOVER { +5.1 | skip; +| } +| +| } +| } +| +| } +``` + +The preceding example shows a stateless `RETRY` (3) with a `RECOVER` (5) path that kicks +in after the final attempt fails. The `stateless` label means that the block is repeated +without re-throwing any exception up to some limit. This only works if the transaction,`TX` (4), has propagation NESTED. + +If the inner `TX` (4) has default propagation properties and rolls back, it pollutes the +outer `TX` (1). The inner transaction is assumed by the transaction manager to have +corrupted the transactional resource, so it cannot be used again. + +Support for NESTED propagation is sufficiently rare that we choose not to support +recovery with stateless retries in the current versions of Spring Batch. The same effect +can always be achieved (at the expense of repeating more processing) by using the +typical pattern above. \ No newline at end of file diff --git a/docs/en/spring-batch/whatsnew.md b/docs/en/spring-batch/whatsnew.md new file mode 100644 index 0000000000000000000000000000000000000000..b0ce201f64ccfa192c53e21159bc27c5531911b5 --- /dev/null +++ b/docs/en/spring-batch/whatsnew.md @@ -0,0 +1,145 @@ +# What’s New in Spring Batch 4.3 + +## What’s New in Spring Batch 4.3 + +This release comes with a number of new features, performance improvements, +dependency updates and API deprecations. This section describes the most +important changes. For a complete list of changes, please refer to the[release notes](https://github.com/spring-projects/spring-batch/releases/tag/4.3.0). + +### New features + +#### New synchronized ItemStreamWriter + +Similar to the `SynchronizedItemStreamReader`, this release introduces a`SynchronizedItemStreamWriter`. This feature is useful in multi-threaded steps +where concurrent threads need to be synchronized to not override each other’s writes. + +#### New JpaQueryProvider for named queries + +This release introduces a new `JpaNamedQueryProvider` next to the`JpaNativeQueryProvider` to ease the configuration of JPA named queries when +using the `JpaPagingItemReader`: + +``` +JpaPagingItemReader reader = new JpaPagingItemReaderBuilder() + .name("fooReader") + .queryProvider(new JpaNamedQueryProvider("allFoos", Foo.class)) + // set other properties on the reader + .build(); +``` + +#### New JpaCursorItemReader Implementation + +JPA 2.2 added the ability to stream results as a cursor instead of only paging. +This release introduces a new JPA item reader that uses this feature to +stream results in a cursor-based fashion similar to the `JdbcCursorItemReader`and `HibernateCursorItemReader`. + +#### New JobParametersIncrementer implementation + +Similar to the `RunIdIncrementer`, this release adds a new `JobParametersIncrementer`that is based on a `DataFieldMaxValueIncrementer` from Spring Framework. + +#### GraalVM Support + +This release adds initial support to run Spring Batch applications on GraalVM. +The support is still experimental and will be improved in future releases. + +#### Java records Support + +This release adds support to use Java records as items in chunk-oriented steps. +The newly added `RecordFieldSetMapper` supports data mapping from flat files to +Java records, as shown in the following example: + +``` +@Bean +public FlatFileItemReader itemReader() { + return new FlatFileItemReaderBuilder() + .name("personReader") + .resource(new FileSystemResource("persons.csv")) + .delimited() + .names("id", "name") + .fieldSetMapper(new RecordFieldSetMapper<>(Person.class)) + .build(); +} +``` + +In this example, the `Person` type is a Java record defined as follows: + +``` +public record Person(int id, String name) { } +``` + +The `FlatFileItemReader` uses the new `RecordFieldSetMapper` to map data from +the `persons.csv` file to records of type `Person`. + +### Performance improvements + +#### Use bulk writes in RepositoryItemWriter + +Up to version 4.2, in order to use `CrudRepository#saveAll` in `RepositoryItemWriter`, +it was required to extend the writer and override `write(List)`. + +In this release, the `RepositoryItemWriter` has been updated to use`CrudRepository#saveAll` by default. + +#### Use bulk writes in MongoItemWriter + +The `MongoItemWriter` used `MongoOperations#save()` in a for loop +to save items to the database. In this release, this writer has been +updated to use `org.springframework.data.mongodb.core.BulkOperations` instead. + +#### Job start/restart time improvement + +The implementation of `JobRepository#getStepExecutionCount()` used to load +all job executions and step executions in-memory to do the count on the framework +side. In this release, the implementation has been changed to do a single call to +the database with a SQL count query in order to count step executions. + +### Dependency updates + +This release updates dependent Spring projects to the following versions: + +* Spring Framework 5.3 + +* Spring Data 2020.0 + +* Spring Integration 5.4 + +* Spring AMQP 2.3 + +* Spring for Apache Kafka 2.6 + +* Micrometer 1.5 + +### Deprecations + +#### API deprecation + +The following is a list of APIs that have been deprecated in this release: + +* `org.springframework.batch.core.repository.support.MapJobRepositoryFactoryBean` + +* `org.springframework.batch.core.explore.support.MapJobExplorerFactoryBean` + +* `org.springframework.batch.core.repository.dao.MapJobInstanceDao` + +* `org.springframework.batch.core.repository.dao.MapJobExecutionDao` + +* `org.springframework.batch.core.repository.dao.MapStepExecutionDao` + +* `org.springframework.batch.core.repository.dao.MapExecutionContextDao` + +* `org.springframework.batch.item.data.AbstractNeo4jItemReader` + +* `org.springframework.batch.item.file.transform.Alignment` + +* `org.springframework.batch.item.xml.StaxUtils` + +* `org.springframework.batch.core.launch.support.ScheduledJobParametersFactory` + +* `org.springframework.batch.item.file.MultiResourceItemReader#getCurrentResource()` + +* `org.springframework.batch.core.JobExecution#stop()` + +Suggested replacements can be found in the Javadoc of each deprecated API. + +#### SQLFire support deprecation + +SQLFire has been in [EOL](https://www.vmware.com/latam/products/pivotal-sqlfire.html)since November 1st, 2014. This release deprecates the support of using SQLFire +as a job repository and schedules it for removal in version 5.0. \ No newline at end of file diff --git a/docs/en/spring-integration/READEME.md b/docs/en/spring-cloud-data-flow/README.md similarity index 100% rename from docs/en/spring-integration/READEME.md rename to docs/en/spring-cloud-data-flow/README.md diff --git a/docs/en/spring-security/READEME copy.md b/docs/en/spring-credhub/README.md similarity index 100% rename from docs/en/spring-security/READEME copy.md rename to docs/en/spring-credhub/README.md diff --git a/docs/en/spring-credhub/spring-credhub.md b/docs/en/spring-credhub/spring-credhub.md new file mode 100644 index 0000000000000000000000000000000000000000..709810672a44cd39c36f68a5d3ddd58c706f7cd7 --- /dev/null +++ b/docs/en/spring-credhub/spring-credhub.md @@ -0,0 +1,534 @@ +# Spring CredHub + +Spring CredHub provides client-side support for storing, retrieving, and deleting credentials from a [CredHub](https://docs.cloudfoundry.org/credhub/) server running in a [Cloud Foundry](https://www.cloudfoundry.org/) platform. + +CredHub provides an [HTTP API](https://docs.cloudfoundry.org/api/credhub/) to securely store, generate, retrieve, and delete credentials of various types. Spring CredHub provides a Java binding for the CredHub API, making it easy to integrate Spring applications with CredHub. + +## 1. Getting started + +Spring CredHub supports CredHub server version 1.x and 2.x. +This library is intended to provide full coverage of the CredHub API - all operations on all credential types. + +Spring CredHub has been optimized to work with Spring Boot applications. +To include Spring CredHub in a Spring Boot application, add some dependencies to the project build file. + +### 1.1. Maven Dependencies + +Add the Spring CredHub starter to the `dependencies` section of the build file: + +``` + + + org.springframework.credhub + spring-credhub-starter + 2.2.0 + + +``` + +To enable reactive support in Spring CredHub, add the following [Spring WebFlux](https://docs.spring.io/spring-framework/docs/5.3.13/reference/html/web-reactive.html#spring-webflux) dependency to the build file: + +``` + + + org.springframework.boot + spring-boot-starter-webflux + 5.3.13 + + +``` + +To use OAuth2 authentication to CredHub, add the following [Spring Security](https://spring.io/projects/spring-security) dependencies to the build file: + +``` + + + org.springframework.security + spring-security-config + 5.5.3 + + + org.springframework.security + spring-security-oauth2-client + 5.5.3 + + +``` + +### 1.2. Gradle Dependencies + +Add the Spring CredHub starter to the `dependencies` section of the build file: + +``` + dependencies { + compile('org.springframework.credhub:spring-credhub-starter:2.2.0') + } +``` + +To enable reactive support in Spring CredHub, add the following [Spring WebFlux](https://docs.spring.io/spring-framework/docs/5.3.13/reference/html/web-reactive.html#spring-webflux) dependency to the build file: + +``` + dependencies { + compile("org.springframework.boot:spring-boot-starter-webflux:5.3.13") + } +``` + +To use OAuth2 authentication to CredHub, add the following [Spring Security](https://spring.io/projects/spring-security) dependencies to the build file: + +``` + dependencies { + compile("org.springframework.security:spring-security-config:5.5.3") + compile("org.springframework.security:spring-security-oauth2-client:5.5.3") + } +``` + +## 2. Spring Boot Configuration + +When using the Spring CredHub starter dependency, Spring CredHub can be configured with [Spring Boot application properties](https://docs.spring.io/spring-boot/docs/current/reference/html/boot-features-external-config.html#boot-features-external-config-application-property-files). +With the proper configuration properties, Spring CredHub will auto-configure a connection to a CredHub server. + +### 2.1. Mutual TLS Authentication + +An application running on Cloud Foundry can authenticate to a CredHub server deployed to the same platform using mutual TLS. +Mutual TLS is the default authentication scheme when no other authentication credentials are provided. +To use mutual TLS authentication to a CredHub server, simply provide the URL of the CredHub server as an application property: + +``` +spring: + credhub: + url: [CredHub server URL] +``` + +See the [CredHub documentation](https://docs.cloudfoundry.org/api/credhub/version/main/#_mutual_tls) for more information on mutual TLS authentication. + +An application running on Cloud Foundry can use the internal address `[https://credhub.service.cf.internal:8844](https://credhub.service.cf.internal:8844)` to communicate with a CredHub server deployed to the same platform. + +### 2.2. OAuth2 Authentication + +OAuth2 can be used to authenticate via UAA to any CredHub server. +Spring CredHub supports client credentials grant tokens for authentication using the following Spring CredHub and Spring Security configuration: + +``` +spring: + credhub: + url: [CredHub server URL] + oauth2: + registration-id: credhub-client + security: + oauth2: + client: + registration: + credhub-client: + provider: uaa + client-id: [OAuth2 client ID] + client-secret: [OAuth2 client secret] + authorization-grant-type: client_credentials + provider: + uaa: + token-uri: [UAA token server endpoint] +``` + +The ID provided in `spring.credhub.oauth2.registration-id` must refer to a client configured under `spring.security.oauth2.client.registration`. +See the [Spring Boot documentation](https://docs.spring.io/spring-boot/docs/current/reference/htmlsingle/#boot-features-security-oauth2) for more information on Spring Boot OAuth2 client configuration. + +The OAuth2 client specified in the Spring Security client registration must have CredHub scopes such as `credhub.read` or `credhub.write` to perform most operations. +See the [CredHub documentation](https://docs.cloudfoundry.org/api/credhub/version/main/#_uaa_oauth2) for more information on OAuth2 authentication with UAA. + +#### 2.2.1. Auto-configuration of Spring Security OAuth2 + +When `spring.credhub.oauth2` properties are set and Spring Security is on the application classpath, Spring CredHub will auto-configure the Spring Security beans required for OAuth2 authentication. +An application can provide the required Spring Security OAuth2 beans to override the auto-configuration if necessary. + +##### [](#servlet-and-non-reactive-applications)[Servlet and Non-reactive Applications](#servlet-and-non-reactive-applications) + +Spring CredHub requires beans of the following types, provided by Spring Security, in order to authenticate using OAuth2. + +| Required Bean Type | Auto-configured Type | +|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|[`ClientRegistrationRepository`](https://docs.spring.io/spring-security/site/docs/5.5.3/api/org/springframework/security/oauth2/client/registration/ClientRegistrationRepository.html)| [`InMemoryClientRegistrationRepository`](https://docs.spring.io/spring-security/site/docs/5.5.3/api/org/springframework/security/oauth2/client/registration/InMemoryClientRegistrationRepository.html) | +|[`OAuth2AuthorizedClientRepository`](https://docs.spring.io/spring-security/site/docs/5.5.3/api/org/springframework/security/oauth2/client/web/OAuth2AuthorizedClientRepository.html) |[`AuthenticatedPrincipalOAuth2AuthorizedClientRepository`](https://docs.spring.io/spring-security/site/docs/5.5.3/api/org/springframework/security/oauth2/client/web/AuthenticatedPrincipalOAuth2AuthorizedClientRepository.html)| +| [`OAuth2AuthorizedClientManager`](https://docs.spring.io/spring-security/site/docs/5.5.3/api/org/springframework/security/oauth2/client/OAuth2AuthorizedClientManager.html) | [`DefaultOAuth2AuthorizedClientManager`](https://docs.spring.io/spring-security/site/docs/5.5.3/api/org/springframework/security/oauth2/client/web\DefaultOAuth2AuthorizedClientManager.html) | + +The auto-configured `DefaultOAuth2AuthorizedClientManager` assumes the application is running in a servlet container and has an active `HttpServletRequest`. +An application might need to provide an alternate implementation of the `OAuth2AuthorizedClientManager` bean such as [`AuthorizedClientServiceOAuth2AuthorizedClientManager`](https://docs.spring.io/spring-security/site/docs/5.5.3/api/org/springframework/security/oauth2/client/AuthorizedClientServiceOAuth2AuthorizedClientManager.html) to process requests outside of an `HttpServletRequest`, as shown in the following example: + +``` +/* + * Copyright 2016-2020 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.credhub; + +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.security.oauth2.client.AuthorizedClientServiceOAuth2AuthorizedClientManager; +import org.springframework.security.oauth2.client.ClientCredentialsOAuth2AuthorizedClientProvider; +import org.springframework.security.oauth2.client.OAuth2AuthorizedClientService; +import org.springframework.security.oauth2.client.registration.ClientRegistrationRepository; + +@Configuration +public class CredHubSecurityConfiguration { + + @Bean + public AuthorizedClientServiceOAuth2AuthorizedClientManager reactiveClientManager( + ClientRegistrationRepository clientRegistrationRepository, + OAuth2AuthorizedClientService authorizedClientService) { + AuthorizedClientServiceOAuth2AuthorizedClientManager clientManager = new AuthorizedClientServiceOAuth2AuthorizedClientManager( + clientRegistrationRepository, authorizedClientService); + clientManager.setAuthorizedClientProvider(new ClientCredentialsOAuth2AuthorizedClientProvider()); + return clientManager; + } + +} +``` + +Refer to the [Spring Security documentation](https://docs.spring.io/spring-security/site/docs/5.5.3/reference/html5/#oauth2login-override-boot-autoconfig) for more information and examples of configuring other beans. + +##### [](#reactive-applications)[Reactive Applications](#reactive-applications) + +Spring CredHub requires beans of the following types, provided by Spring Security, in order to authenticate using OAuth2. + +| Required Bean Type | Auto-configured Type | +|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| [`ReactiveClientRegistrationRepository`](https://docs.spring.io/spring-security/site/docs/5.5.3/api/org/springframework/security/oauth2/client/registration/ReactiveClientRegistrationRepository.html) | [`InMemoryReactiveClientRegistrationRepository`](https://docs.spring.io/spring-security/site/docs/5.5.3/api/org/springframework/security/oauth2/client/registration/InMemoryReactiveClientRegistrationRepository.html) | +|[`ServerOAuth2AuthorizedClientRepository`](https://docs.spring.io/spring-security/site/docs/5.5.3/api/org/springframework/security/oauth2/client/web/server/ServerOAuth2AuthorizedClientRepository.html)|[`UnAuthenticatedServerOAuth2AuthorizedClientRepository`](https://docs.spring.io/spring-security/site/docs/5.5.3/api/org/springframework/security/oauth2/client/web/server/UnAuthenticatedServerOAuth2AuthorizedClientRepository.html)| +| [`ReactiveOAuth2AuthorizedClientManager`](https://docs.spring.io/spring-security/site/docs/5.5.3/api/org/springframework/security/oauth2/client/ReactiveOAuth2AuthorizedClientManager.html) | [`DefaultReactiveOAuth2AuthorizedClientManager`](https://docs.spring.io/spring-security/site/docs/5.5.3/api/org/springframework/security/oauth2/client/web/DefaultReactiveOAuth2AuthorizedClientManager.html) | + +The auto-configured `DefaultReactiveOAuth2AuthorizedClientManager` requires an active `ServerHttpRequest` context. +An application might need to provide an alternate implementation of the `ReactiveOAuth2AuthorizedClientManager` bean such as [`AuthorizedClientServiceReactiveOAuth2AuthorizedClientManager`](https://docs.spring.io/spring-security/site/docs/5.5.3/api/org/springframework/security/oauth2/client/AuthorizedClientServiceReactiveOAuth2AuthorizedClientManager.html) to process requests outside of an `ServerHttpRequest`, as shown in the following example: + +``` +/* + * Copyright 2016-2020 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.credhub; + +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.security.oauth2.client.AuthorizedClientServiceReactiveOAuth2AuthorizedClientManager; +import org.springframework.security.oauth2.client.ClientCredentialsReactiveOAuth2AuthorizedClientProvider; +import org.springframework.security.oauth2.client.ReactiveOAuth2AuthorizedClientService; +import org.springframework.security.oauth2.client.registration.ReactiveClientRegistrationRepository; + +@Configuration +public class CredHubReactiveSecurityConfiguration { + + @Bean + public AuthorizedClientServiceReactiveOAuth2AuthorizedClientManager reactiveClientManager( + ReactiveClientRegistrationRepository clientRegistrationRepository, + ReactiveOAuth2AuthorizedClientService authorizedClientService) { + AuthorizedClientServiceReactiveOAuth2AuthorizedClientManager clientManager = new AuthorizedClientServiceReactiveOAuth2AuthorizedClientManager( + clientRegistrationRepository, authorizedClientService); + clientManager.setAuthorizedClientProvider(new ClientCredentialsReactiveOAuth2AuthorizedClientProvider()); + return clientManager; + } + +} +``` + +Refer to the [Spring Security documentation](https://docs.spring.io/spring-security/site/docs/5.5.3/reference/html5/#oauth2login-override-boot-autoconfig) for more information and examples of configuring other beans. + +## 3. Introduction to CredHubOperations + +The interface `org.springframework.credhub.core.CredHubOperations` and the implementation `org.springframework.credhub.core.CredHubTemplate` are the central classes in Spring CredHub.`CredHubOperations` provides access to additional operations interfaces that model the full CredHub API: + +``` +/** + * Get the operations for saving, retrieving, and deleting credentials. + */ +CredHubCredentialOperations credentials(); + +/** + * Get the operations for adding, retrieving, and deleting credential permissions. + */ +CredHubPermissionOperations permissions(); + +/** + * Get the operations for adding, retrieving, and deleting credential permissions. + */ +CredHubPermissionV2Operations permissionsV2(); + +/** + * Get the operations for retrieving, regenerating, and updating certificates. + */ +CredHubCertificateOperations certificates(); + +/** + * Get the operations for interpolating service binding credentials. + */ +CredHubInterpolationOperations interpolation(); + +/** + * Get the operations for retrieving CredHub server information. + */ +CredHubInfoOperations info(); +``` + +### 3.1. Mapping to CredHub API + +Each method of the `Operations` interfaces maps directly to one endpoint of the CredHub HTTP API. +The following table shows the mapping between the CredHub API and the appropriate Spring CredHub `Operations` interface. + +| [CredHub Credentials API](https://docs.cloudfoundry.org/api/credhub/version/main/#_credentials_endpoint) | [CredHubCredentialOperations](https://docs.spring.io/spring-credhub/docs/2.2.0/api/index.html?org/springframework/credhub/core/credential/CredHubCredentialOperations.html) | +|------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|[CredHub Permissions API](https://docs.cloudfoundry.org/api/credhub/version/main/#_permissions_v1_deprecated) (v1)| [CredHubPermissionOperations](https://docs.spring.io/spring-credhub/docs/2.2.0/api/index.html?org/springframework/credhub/core/permission/CredHubPermissionOperations.html) | +| [CredHub Permissions API](https://docs.cloudfoundry.org/api/credhub/version/main/#_permissions_v2_endpoint) (v2) | [CredHubPermissionV2Operations](https://docs.spring.io/spring-credhub/docs/2.2.0/api/index.html?org/springframework/credhub/core/permissionV2/CredHubPermissionV2Operations.html) | +| [CredHub Certificates API](https://docs.cloudfoundry.org/api/credhub/version/main/#_certificates_endpoint) | [CredHubCertificateOperations](https://docs.spring.io/spring-credhub/docs/2.2.0/api/index.html?org/springframework/credhub/core/certificate/CredHubCertificateOperations.html) | +| [CredHub Interpolation API](https://docs.cloudfoundry.org/api/credhub/version/main/#_interpolation_endpoint) |[CredHubInterpolationOperations](https://docs.spring.io/spring-credhub/docs/2.2.0/api/index.html?org/springframework/credhub/core/interpolation/CredHubInterpolationOperations.html)| +| [CredHub Information API](https://docs.cloudfoundry.org/api/credhub/version/main/#_info_endpoint) | [CredHubInfoOperations](https://docs.spring.io/spring-credhub/docs/2.2.0/api/index.html?org/springframework/credhub/core/info/CredHubInfoOperations.html) | + +### 3.2. CredHubOperations Auto-configuration + +A `CredHubOperations` Spring bean is created using Spring Boot auto-configuration when application properties are properly configured. +Application classes can autowire an instance of this bean to interact with a CredHub server. + +``` +/* + * Copyright 2016-2020 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.credhub; + +import org.springframework.credhub.core.CredHubOperations; +import org.springframework.credhub.support.CredentialDetails; +import org.springframework.credhub.support.SimpleCredentialName; +import org.springframework.credhub.support.password.PasswordCredential; +import org.springframework.credhub.support.password.PasswordParameters; +import org.springframework.credhub.support.password.PasswordParametersRequest; +import org.springframework.stereotype.Component; + +@Component +public class CredHubService { + + private final CredHubOperations credHubOperations; + + private final SimpleCredentialName credentialName; + + public CredHubService(CredHubOperations credHubOperations) { + this.credHubOperations = credHubOperations; + + this.credentialName = new SimpleCredentialName("example", "password"); + } + + public String generatePassword() { + PasswordParameters parameters = PasswordParameters.builder().length(12).excludeLower(false).excludeUpper(false) + .excludeNumber(false).includeSpecial(true).build(); + + CredentialDetails password = this.credHubOperations.credentials() + .generate(PasswordParametersRequest.builder().name(this.credentialName).parameters(parameters).build()); + + return password.getValue().getPassword(); + } + + public String getPassword() { + CredentialDetails password = this.credHubOperations.credentials() + .getByName(this.credentialName, PasswordCredential.class); + + return password.getValue().getPassword(); + } + +} +``` + +## 4. Introduction to ReactiveCredHubOperations + +The interface `org.springframework.credhub.core.ReactiveCredHubOperations` and the implementation `org.springframework.credhub.core.ReactiveCredHubTemplate` are the central classes in Spring CredHub reactive support.`ReactiveCredHubOperations` provides access to additional operations interfaces that model the full CredHub API: + +``` +/** + * Get the operations for saving, retrieving, and deleting credentials. + */ +ReactiveCredHubCredentialOperations credentials(); + +/** + * Get the operations for adding, retrieving, and deleting credential permissions. + */ +ReactiveCredHubPermissionOperations permissions(); + +/** + * Get the operations for adding, retrieving, and deleting credential permissions. + */ +ReactiveCredHubPermissionV2Operations permissionsV2(); + +/** + * Get the operations for retrieving, regenerating, and updating certificates. + */ +ReactiveCredHubCertificateOperations certificates(); + +/** + * Get the operations for interpolating service binding credentials. + */ +ReactiveCredHubInterpolationOperations interpolation(); + +/** + * Get the operations for retrieving CredHub server information. + */ +ReactiveCredHubInfoOperations info(); +``` + +### 4.1. Mapping to CredHub API + +Each method of the `Reactive…​Operations` interfaces maps directly to one endpoint of the CredHub HTTP API. +The following table shows the mapping between the CredHub API and the appropriate Spring CredHub `Reactive…​Operations` interface. + +| [CredHub Credentials API](https://docs.cloudfoundry.org/api/credhub/version/main/#_credentials_endpoint) | [ReactiveCredHubCredentialOperations](https://docs.spring.io/spring-credhub/docs/2.2.0/api/index.html?org/springframework/credhub/core/credential/ReactiveCredHubCredentialOperations.html) | +|------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|[CredHub Permissions API](https://docs.cloudfoundry.org/api/credhub/version/main/#_permissions_v1_deprecated) (v1)| [ReactiveCredHubPermissionOperations](https://docs.spring.io/spring-credhub/docs/2.2.0/api/index.html?org/springframework/credhub/core/permission/ReactiveCredHubPermissionOperations.html) | +| [CredHub Permissions API](https://docs.cloudfoundry.org/api/credhub/version/main/#_permissions_v2_endpoint) (v2) | [ReactiveCredHubPermissionV2Operations](https://docs.spring.io/spring-credhub/docs/2.2.0/api/index.html?org/springframework/credhub/core/permissionV2/ReactiveCredHubPermissionV2Operations.html) | +| [CredHub Certificates API](https://docs.cloudfoundry.org/api/credhub/version/main/#_certificates_endpoint) | [ReactiveCredHubCertificateOperations](https://docs.spring.io/spring-credhub/docs/2.2.0/api/index.html?org/springframework/credhub/core/certificate/ReactiveCredHubCertificateOperations.html) | +| [CredHub Interpolation API](https://docs.cloudfoundry.org/api/credhub/version/main/#_interpolation_endpoint) |[ReactiveCredHubInterpolationOperations](https://docs.spring.io/spring-credhub/docs/2.2.0/api/index.html?org/springframework/credhub/core/interpolation/ReactiveCredHubInterpolationOperations.html)| +| [CredHub Information API](https://docs.cloudfoundry.org/api/credhub/version/main/#_info_endpoint) | [ReactiveCredHubInfoOperations](https://docs.spring.io/spring-credhub/docs/2.2.0/api/index.html?org/springframework/credhub/core/info/ReactiveCredHubInfoOperations.html) | + +### 4.2. ReactiveCredHubOperations Auto-configuration + +A `ReactiveCredHubOperations` Spring bean is created using Spring Boot auto-configuration when application properties are properly configured and the Spring WebFlux library is on the classpath. +Application classes can autowire an instance of this bean to interact with a CredHub server. + +``` +/* + * Copyright 2016-2020 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.credhub; + +import reactor.core.publisher.Mono; + +import org.springframework.credhub.core.ReactiveCredHubOperations; +import org.springframework.credhub.support.SimpleCredentialName; +import org.springframework.credhub.support.password.PasswordCredential; +import org.springframework.credhub.support.password.PasswordParameters; +import org.springframework.credhub.support.password.PasswordParametersRequest; +import org.springframework.stereotype.Component; + +@Component +public class ReactiveCredHubService { + + private final ReactiveCredHubOperations credHubOperations; + + private final SimpleCredentialName credentialName; + + public ReactiveCredHubService(ReactiveCredHubOperations credHubOperations) { + this.credHubOperations = credHubOperations; + + this.credentialName = new SimpleCredentialName("example", "password"); + } + + public Mono generatePassword() { + PasswordParameters parameters = PasswordParameters.builder().length(12).excludeLower(false).excludeUpper(false) + .excludeNumber(false).includeSpecial(true).build(); + + return this.credHubOperations.credentials() + .generate(PasswordParametersRequest.builder().name(this.credentialName).parameters(parameters).build(), + PasswordCredential.class) + .map((password) -> password.getValue().getPassword()); + } + + public Mono getPassword() { + return this.credHubOperations.credentials().getByName(this.credentialName, PasswordCredential.class) + .map((password) -> password.getValue().getPassword()); + } + +} +``` + +## 5. HTTP Client Support + +Spring CredHub `CredHubOperations` supports multiple HTTP client libraries to communicate with the CredHub API. The following libraries are supported: + +* Java’s builtin `HttpURLConnection` (default) + +* [Apache HttpComponents](https://hc.apache.org/) + +* [OkHttp 3](https://square.github.io/okhttp/) + +* [Netty](https://netty.io/) + +Choosing a specific client library requires the appropriate dependency to be available on the application classpath. +The application classpath will be inspected for each client library in the order listed above. + +Spring CredHub `ReactiveCredHubOperations` only supports the Netty HTTP client library. + +### 5.1. Apache HttpComponents + +To use Apache HttpComponents to communicate with CredHub, add the following dependency to the application: + +``` + + org.apache.httpcomponents + httpclient + +``` + +| |Apache HttpClient’s [wire logging](https://hc.apache.org/httpcomponents-client-4.5.x/logging.html) can be enabled through logging configuration. Make sure to not accidentally enable wire logging as logs may expose traffic (including tokens and secrets) between your application and CredHub in plain text.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 5.2. OkHttp 3 + +To use OkHttp 3 to communicate with CredHub, add the following dependency to the application: + +``` + + com.squareup.okhttp3 + okhttp + +``` + +### 5.3. Netty + +To use Netty to communicate with CredHub, add the following dependency to the application: + +``` + + io.netty + netty-all + +``` \ No newline at end of file diff --git a/docs/en/spring-security/READEME.md b/docs/en/spring-flo/README.md similarity index 100% rename from docs/en/spring-security/READEME.md rename to docs/en/spring-flo/README.md diff --git a/docs/en/spring-flo/spring-flo.md b/docs/en/spring-flo/spring-flo.md new file mode 100644 index 0000000000000000000000000000000000000000..c3d6a0ca910a8ee92cb63287412c17d0155b7585 --- /dev/null +++ b/docs/en/spring-flo/spring-flo.md @@ -0,0 +1,378 @@ +# Welcome to the Spring Flo wiki! +Spring Flo is a set of [Angular JS](https://angularjs.org/) directives for a diagram editor able to represent a DSL graphically and synchronize graphical and textual representation of that DSL. Graphical representation is done with a [Joint JS](http://jointjs.com/) graph object, textual representation can be either a plain HTML element (such as ` + +``` + +The HTML above translates into a page with toolbar for buttons (Layout and Show/Hide Palette), text area for DSL and the Flo editor for graph representation of the DSL. + +## All the extension points: + +### Metamodel Service +This service enables the domain in which Flo is being used to specify what kinds of element are being connected together in the graph and also how the graph should be converted to-and-from a textual representation. [Sample metamodel service is here](https://github.com/spring-projects/spring-flo/blob/master/samples/spring-flo-sample/src/main/resources/static/js/metamodel-service.js). +#### textToGraph(flo, definition) +Sets the graph contents for the `flo` object based on the textual representation of the DSL from `definition` object. Text is transformed into the corresponding Joint JS graph content. The graph is to be populated via `flo` objects functions such as `flo.createLink()` and `flo.createNode()` and cleared with `flo.clearGraph` +#### graphToText(flo, definition) +Convert the current graph available from the `flo` object into a textual representation which is then set (as the `text` property) on the `definition` object. +#### load() +Returns a promise that resolves to a `metamodel` object. The `metamodel` object layout is a map of element `group` names to a map of elements that belong to this `group`. The map of elements that belong to the `group` is a mapping between element's `name` and element's [Metadata Object](https://github.com/spring-projects/spring-flo/wiki#element-metadata) +#### refresh() _(Optional)_ +Refreshes the meta-model and returns a promise that is resolved to the same result as [load()](#load). Refresh should also fire event to `metamodel` change listeners. +#### encodeTextToDSL(text) _(Optional)_ +Encodes DSL element property value text to the DSL required format. Example is converting multiline text into a single line required by the DSL format. Used to display the property value in a human readable format. +#### decodeTextFromDSL(dsl) _(Optional)_ +Decodes DSL element property value text from DSL format. Example is converting single line text into a multiline text, i.e. replacing escaped line breaks. Used to set a property value for DSL element entered by the user via UI. +#### subscribe(listener) _(Optional)_ +Adds a listener to `metamodel` events. (See [Metamodel Listener](#metamodel-listener)) +#### unsubscribe(listener) _(Optional)_ +Removes `metamodel` events listener. (See [Metamodel Listener](#metamodel-listener)) +#### isValidPropertyValue(element, key, value) _(Optional)_ +Check if the the value being specified for the key on the specified element is allowed. For example: if the key takes an integer, don't allow alphabetic characters. + +### Render Service +The service is responsible for visual representation of graph elements based on the metadata (coming from [Metamodel Service](#metamodel-service)). This service is **optional**. [Sample render service is here](https://github.com/spring-projects/spring-flo/blob/master/samples/spring-flo-sample/src/main/resources/static/js/render-service.js). +#### createNode(metadata, properties) _(Optional)_ +Creates an instance of Joint JS graph node model object (`joint.dia.Element`). Parameters that may affect the kind of node model object are element's [metadata](#element-metadata) and map of properties (if any passed in). +#### createLink(source, target, metadata, properties) _(Optional)_ +Creates an instance of Joint JS graph link model object (`joint.dia.Link`). Parameters that may affect the kind of link model object are element's [metadata](#element-metadata), map of properties (if any passed in), source and target elements +#### createHandle(kind, parent) _(Optional)_ +Creates an instance of Joint JS graph node model object (`joint.dia.Element`). An example of a handle is a shape shown next to the parent shape interacting with which results in some editing action over the parent shape. Parameters that may affect the kind of handle model object are `kind` of type `string` (user defined, i.e. `delete`, `resize`, etc.) and handle's `parent` element. This function is only called by the framework if Editor Service `createHandles()` function is implemented. +#### createDecoration(kind, parent) _(Optional)_ +Creates an instance of Joint JS graph node model object (`joint.dia.Element`). An example of decoration is a validation marker displayed over the parent shape. Parameters that may affect the kind of decoration model object are `kind` of type `string` and decoration's `parent` element. Note that `kind` parameter is coming from the framework (unlike for `createHandle` function). This function is only called by the framework if Editor Service `validateNode()` function is implemented. (At the moment decorations are only the validation error markers). +#### initializeNewNode(node, context) _(Optional)_ +Performs any additional initialization of a newly created graph `node` when `node` is already added to the Joint JS graph and rendered on the canvas, e.g. element's SVG DOM structure is available. The `context` parameter is an object with `paper` and `graph` properties applicable for the `node`. Useful to perform any kind of initialization on a node when it's SVG DOM is appended to the page DOM. Examples: fit string label inside a shape, use angular directive on a shape, add DOM listeners etc. +#### initializeNewLink(link, context) _(Optional)_ +Performs any additional initialization of a newly created graph `link` when `link` is already added to the Joint JS graph and rendered on the canvas, e.g. element's SVG DOM structure is available. The `context` parameter is an object with `paper` and `graph` properties applicable for the `link`. Useful to perform any kind of initialization on a link when it's SVG DOM is appended to the page DOM. Examples: use angular directive on a shape, add DOM listeners etc. +#### initializeNewHandle(handle, context) _(Optional)_ +Performs any additional initialization of a newly created graph `handle` when `handle` is already added to the Joint JS graph and rendered on the canvas, e.g. element's SVG DOM structure is available. The `context` parameter is an object with `paper` and `graph` properties applicable for the `handle`. Useful to perform any kind of initialization on a handle shape when it's SVG DOM is appended to the page DOM. Examples: fit string label inside a shape, use angular directive on a shape, add DOM listeners etc. +#### initializeNewDecoration(decoration, context) _(Optional)_ +Performs any additional initialization of a newly created graph `decoration` when `decoration` is already added to the Joint JS graph and rendered on the canvas, e.g. element's SVG DOM structure is available. The `context` parameter is an object with `paper` and `graph` properties applicable for the `decoration`. Useful to perform any kind of initialization on a decoration shape when it's SVG DOM is appended to the page DOM. Examples: fit string label inside a shape, use angular directive on a shape, add DOM listeners etc. +#### getNodeView() _(Optional)_ +Returns instance of `joint.dia.ElementView`. It can also be a function of the form `function(element)` that takes an element model and should return an object responsible for rendering that model onto the screen. Under normal circumstances this function does not need to be implemented and the Joint JS view object created by the framework should be enough. Implement this function if different nodes require different Joint Js views or view has some special rendering (i.e. embedded HTML elements). See [Joint JS Paper Options](http://jointjs.com/api#joint.dia.Paper:options) +#### getLinkView() _(Optional)_ +Returns instance of Joint JS `joint.dia.LinkView`. Default is `joint.dia.LinkView`. It can also be a function of the form `function(link)` that takes a link model and should return an object responsible for rendering that model onto the screen. Under normal circumstances this function does not need to be implemented and the Joint JS view object created by the framework should be enough. Implement this function if different links require different Joint JS views or view has some special rendering (i.e. pattern applied to a line - `joint.shapes.flo.PatternLinkView`). See [Joint JS Paper Options](http://jointjs.com/api#joint.dia.Paper:options) +#### layout(paper) _(Optional)_ +Responsible for laying out the Joint JS graph that can be derived from passed in `paper` parameter (`paper.model`). +#### handleLinkEvent(paper, event, link) _(Optional)_ +Responsible for handling `event` that occurred on the `link` that belong to passed in Joint JS `paper` object. The `event` parameter is a `string` with possible values: `'add'`, `'remove'` or Joint JS native link change events such as `'change:source'`, `'change:target'`, etc. see [Joint JS Link Events](http://jointjs.com/api#joint.dia.Link:events) +#### isSemanticProperty(propertyPath, element) _(Optional)_ +Returns `true` for `string` property attribute path `propertyPath` on an `element` if graphs needs to perform some visual update based on `propertyPath` value change (Not needed for properties under `props` on an `element`). Visual update is performed by [refreshVisuals()](#refreshVisuals). The property path `propertyPath` is relative to Joint JS element `attrs` property +#### refreshVisuals(element, propertyPath, paper) _(Optional)_ +Performs some visual update of the graph or, which is more likely, the passed in `element` displayed on Joint JS `paper` based on the changed property specified by `propertyPath` +#### getLinkAnchorPoint(linkView, view, port, reference) _(Optional)_ +This function allows you to customize what are the anchor points of links. The function must return a point (with `x` and `y` properties) where the link anchors to the element. The function takes the link view, element view, the `port` (SVG element) the link should stick to and a reference point (either the closest vertex or the anchor point on the other side of the link). + +### Editor Service +The service responsible for providing Flo editor with rich editing capabilities such as handles around selected shapes, custom drag and drop behaviour, live and static validation. This service is **optional**. [Sample editor service is here](https://github.com/spring-projects/spring-flo/blob/master/samples/spring-flo-sample/src/main/resources/static/js/editor-service.js) +#### createHandles(flo, createHandle, selected) _(Optional)_ +Called when node is selected and handles can be displayed. Handles are usually small shapes around the `selected` Joint JS node in `flo` editor interactions with which modify properties on `selected` node, i.e. resize or delete handles. Call `createHandle(selected, kind, clickHandlerFunction, coordinate)` function to create a handle. The `kind` parameter is a `string` kind of a handle, `clickHandlerFunction` is performed when handle has been clicked on and `coordinate` is the place to put the handle shape. Note that if this function is implemented then Render Service `createHandle(...)` function must be implemented as well. The framework will remove handles automatically when needed, hence no need to worry about this on the client side. +#### validatePort(paper, view, portView) _(Optional)_ +Decide whether to create a link if the user clicks a port. The `portView` is the DOM element representing the port, `view` is the port's parent Joint JS view object show in Joint JS `paper` +#### validateLink(flo, cellViewS, portS, cellViewT, portT, end, linkView) _(Optional)_ +Decide whether to allow or disallow a connection between the source view/port (`cellViewS`/`portS`) and target view/port (`cellViewT`/`portT`). The `end` is either `'source'` or `'target'` and tells which end of the link is being dragged. This is useful for defining whether, for example, a link starting in a port POut of element A can lead to a port PIn of elmement B. +#### calculateDragDescriptor(flo, draggedView, targetUnderMouse, coordinate, context) _(Optional)_ +Called when dragging of a node `draggedView` is in progress over `targetUnderMouse` Joint JS graph element (node or link) at `coordinate`. There are also `flo` object parameter and `context` object, which currently just has a `boolean` property `palette` to denote whether drag and drop occurring on the palette or canvas. The function should return a [Drag Descriptor Object](#drag-descriptor). +#### handleNodeDropping(flo, dragDescriptor) _(Optional)_ +Performs necessary graph manipulations when the node being dragged is dropped. The `dragDescriptor` [Drag Descriptor](#drag-descriptor) should have the mandatory information on what is being dragged and where it's being dropped. The `flo` object parameter would help to make necessary graph modifications +#### showDragFeedback(flo, dragDescriptor) _(Optional)_ +Any custom visual feedback when dragging a node over some graph element (node or link) can be drawn by this function. `dragDescriptor` parameter has a [Drag Descriptor Object](#drag-descriptor) that has complete information about dragging in progress and `flo` object would help with drawing feedback using Joint JS +#### hideDragFeedback(flo, dragDescriptor) _(Optional)_ +Removes any custom visual feedback drawn by [showDragFeedback()](#show-drag-feedback). Has the same parameters. +#### validateNode(flo, node) _(Optional)_ +Returns a `javascript` array of `string` error messages that are the result of validating `node` Joint JS graph node on the canvas in `flo` editor +#### preDelete(flo, deletedElement) _(Optional)_ +Called prior to removal of the specified `deletedElement` allowing extra tidyup before that happens. For example: removes any dependent Joint JS graph elements related to the element about to be deleted. +#### interactive _(Optional)_ +If set to `false`, interaction with elements and links is disabled. If it is a function, it will be called with the cell view in action and the name of the method it is evaluated in (`'pointerdown'`, `'pointermove'`, ...). If the returned value of such a function is false interaction will be disabled for the action. For links, there are special properties of the interaction object that are useful to disable the default behaviour. These properties are: `vertexAdd`, `vertexMove`, `vertexRemove` and `arrowheadMove`. By setting any of these properties to false, you can disable the related default action on links. +#### allowLinkVertexEdit _(Optional)_ +If set to `false` link vertex (or bend point) creation or editing (e.g. movement) is not allowed in the editor. + +## Data structure reference: +### Flo +This object is created by the `flo-editor` directive controller and it contains various editor specific properties and functions. +#### scheduleUpdateGraphRepresentation() +Schedules an asynchronous update of the graph DSL representation based on the text DSL representation. +#### updateGraphRepresentation() +Asynchronously update the graph DSL representation based on the text DSL representation. A promise is returned which gets resolved when the update completes. +#### updateTextRepresentation() +Asynchronously update the text DSL representation (`definition` object) based on the graph DSL representation. A promise is returned which gets resolved when the update completes. +#### performLayout() +Arranges nodes and links of the graph on the canvas. +#### clearGraph() +Clears out canvas of all nodes and links. With syncing on this also causes the text DSL representation to clear. +#### getGraph() +Returns a reference to `joint.dia.Graph` object instance of the canvas contents (The graph model, see [Joint JS Graph API](http://jointjs.com/api#joint.dia.Graph) +#### getPaper() +Returns a reference to joint.dia.Paper object instance of the canvas (The graph view object, see [Joint JS Paper API](http://jointjs.com/api#joint.dia.Paper) +#### enableSyncing(enable) +Enables or disables textual and graph DSL representation synchronization mechanism based on the passed `boolean` parameter `enable`. Useful when textual DSL representation UI is collapsed. +#### getSelection() +Returns currently selected graph model element (node or link) on the canvas +#### zoomPercent(percent) +Angular getter/setter function for the zoom value on the canvas. Sets zoom percent value if the integer `number` parameter is supplied. Returns the integer percent value if parameter is missing (getter mode) +#### gridSize(gridSize) +Angular getter/setter function for the canvas grid size in pixels. Sets grid width value if the integer `number` parameter `gridSize` is supplied. Returns the current grid size value if parameter is missing (getter mode). Note that setting grid width to `1` turns the grid off. Invalid values for `gridSize` are ignored +#### getMinZoom() +Returns integer `number` minimum allowed value for the zoom percent. Useful to set the proper range for zoom controls. Needed by the zoom control on the canvas (if it is set to be shown). The value equals `5` by default (5%). +#### getMaxZoom() +Returns integer `number` maximum allowed value for the zoom percent. Useful to set the proper range for zoom controls. Needed by the zoom control on the canvas (if it is set to be shown). The value equals `400` by default (400%). +#### getZoomStep() +Returns integer `number` zoom percent increment/decrement step. Needed by the zoom control on the canvas (if it is set to be shown). The value equals `5` by default (5% increment/decrement value). +#### fitToPage() +Fits the whole graph into canvas's viewport (i.e. no need to scroll to look for content on the canvas). Adjusts the zoom level and scroll position appropriately +#### readOnlyCanvas(newValue) +Angular getter/setter function for the canvas "read-only" property. Read-only canvas does not allow for any user editing interaction of any shapes on the canvas. Sets the read-only property based on the passed in `newValue` parameter as the result the canvas toggles the behaviour for read-only state right away. Returns the current "read-only" state value if parameter is missing (getter mode). +#### createNode(metadata, properties, location) +Creates and returns the newly created Joint JS graph node (instance of `joint.dia.Element`) based on the graph node `metadata` object (see [Element Metadata](#element-metadata)), `properties` key-value pairs map, and location on the canvas (object with `x` and `y` properties). The new node is also added to the Flo canvas Joint JS `graph` and hence to the Joint JS `paper` and appears right away on the canvas before this function returns the result. +#### createLink(source, target, metadata, properties); +Creates and returns the newly created Joint JS graph link (instance of `joint.dia.Link`) between `source` and `target` nodes (of type `joint.dia.Element`) based on the graph link `metadata` object (see [Element Metadata](#element-metadata)), `properties` key-value pairs map. The new link is also added to the Flo canvas Joint JS `graph` and hence to the Joint JS `paper` and appears right away on the canvas before this function returns the result. + +### Definition +This object holds data related to DSL's textual representation. Typically this object should at least have `text` property of type `string` for the DSL text, but it can also have other properties that might be added by client's Metamodel Service graph-text conversion functions. + +### Metamodel Listener +Typically Metamodel object is loaded asynchronously via HTTP request. If metadata is cached by the service then it might be useful to register listeners. Flo editor palette would automatically rebuild itself if metamodel has changed +```javascript +{ + metadataError: function(data) { + /* Error loading metadata has occurred */ + }, + metadataRefresh: function() { + /* Metadata is about to be refreshed */ + }, + metadataChanged: function(data) { + /* New metadata is available */ + } +} +``` +### Drag Descriptor +API client is free to add extra properties to this object (i.e. may help drawing visual feedback) +```javascript +{ + context: context, /* String 'palette' or 'canvas' */ + source: { + cell: draggedNode, /* Joint JS graph node being dragged */ + selector: selector, /* Optional. Joint JS CSS class selector for the subelement of the dragged node*/, + port: portType /* Optional. Involved port DOM element type attribute value == port Joint JS markup 'type' property */ + }, + target: { + cell: targetNode, /* Joint JS graph node target under mouse element */ + selector: selector, /* Optional. Joint JS CSS class selector for the element under mouse within the targetNode */ + port: portType /* Optional. Sub-element under mouse is a port. Port DOM element type attribute value == port Joint JS markup 'type' property */ + }, +}; +``` + +### Joint JS Graph Node Markup +```javascript +model: /* Joint JS model object for a module shape */ + ... + attributes: + ... + angle: 0, /* Joint JS property - rotation angle */ + + id: "02be8001-ea1e-4f30-a94e-9503da5964b5" /* Joint JS property - element model UUID + + position: /* Joint JS property - coordinates of the shape's bounding rectangle */ + x: 119 + y: 46 + + size: /* Joint JS property - size of the shape's bounding rectangle */ + height: 40 + width: 120 + + type: "sinspctr.IntNode" /* Flo property - internal, type (node, link, handle, decoration, etc) */ + + z: 1 /* Joint JS property - z-index of the shape + + ports: /* Joint JS property - internal, ports available on the shape */ + input: + id: "input" + output: + id: "output" + tap: + id: "tap" + + attrs: /* Joint JS property - user defined rendering constructs and semantic properties */ + + . /*\ */ + .border /* \ */ + .box /* \ */ + .input-port /* \ */ + .label1 /* \___User defined rendering constructs implied by the markup */ + .label2 /* / */ + .output-port /* / */ + .shape /* / */ + .stream-label /* / */ + .tap-port /*/ */ + + metadata: /* Flo property. Node metadata supplied by Metamodel Service */ + + props: /* Flo property. Semantic properties of the element. Name <-> value pair map */ + dir: "/Users/x/tmp" + file: "temp.tmp" + debug: true + + ... + ... +... +``` + +### Element Metadata +Graphical element metadata supplied by Metamodel Service +```javascript +metadata: { + + get: function(), /* function taking property key string as a parameter */ + /* Returns promise that resolves to the metadata object of the property */ + /* See snippet below showing the format of a property metadata */ + + group: "source", /* Category/Group of an element. Translates into palette groups of elements */ + + name: "file", /* Name or Type of an element (should be unique within its group) */ + + metadata: { /* Additional metadata for the element */ + titleProperty: 'props/title', /* Property to be displayed at the top of all properties in properties Div */ + noEditableProps: false, /* If true then element doesn't have properties to edit and properties Div is not shown */ + allow-additional-properties: true, /* Allows user to create new properties for element in the properties Div */ + } + +} +``` +Element's property metadata is expected to be as follows +```javascript + properties: { + info: { + defaultValue: null, + description: "General information about the file", + id: "info", + name: "info", + shortDescription: "File Info" + }, + + language: { + defaultValue: "English" + description: "Language of the file contents", + id: "language", + name: "language", + shortDescription: "Text Language" + }, + ... +``` diff --git a/docs/en/spring-session/READEME.md b/docs/en/spring-for-apache-kafka/README.md similarity index 100% rename from docs/en/spring-session/READEME.md rename to docs/en/spring-for-apache-kafka/README.md diff --git a/docs/en/spring-for-apache-kafka/spring-kafka.md b/docs/en/spring-for-apache-kafka/spring-kafka.md new file mode 100644 index 0000000000000000000000000000000000000000..f8f4ad8c9ac116cf3ab384413136953f735f872e --- /dev/null +++ b/docs/en/spring-for-apache-kafka/spring-kafka.md @@ -0,0 +1,7929 @@ +# Spring for Apache Kafka + +## 1. Preface + +The Spring for Apache Kafka project applies core Spring concepts to the development of Kafka-based messaging solutions. +We provide a “template” as a high-level abstraction for sending messages. +We also provide support for Message-driven POJOs. + +## 2. What’s new? + +### 2.1. What’s New in 2.8 Since 2.7 + +This section covers the changes made from version 2.7 to version 2.8. +For changes in earlier version, see [[history]](#history). + +#### 2.1.1. Kafka Client Version + +This version requires the 3.0.0 `kafka-clients` + +| |When using transactions, `kafka-clients` 3.0.0 and later no longer support `EOSMode.V2` (aka `BETA`) (and automatic fallback to `V1` - aka `ALPHA`) with brokers earlier than 2.5; you must therefore override the default `EOSMode` (`V2`) with `V1` if your brokers are older (or upgrade your brokers).| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +See [Exactly Once Semantics](#exactly-once) and [KIP-447](https://cwiki.apache.org/confluence/display/KAFKA/KIP-447%3A+Producer+scalability+for+exactly+once+semantics) for more information. + +#### 2.1.2. Package Changes + +Classes and interfaces related to type mapping have been moved from `…​support.converter` to `…​support.mapping`. + +* `AbstractJavaTypeMapper` + +* `ClassMapper` + +* `DefaultJackson2JavaTypeMapper` + +* `Jackson2JavaTypeMapper` + +#### 2.1.3. Out of Order Manual Commits + +The listener container can now be configured to accept manual offset commits out of order (usually asynchronously). +The container will defer the commit until the missing offset is acknowledged. +See [Manually Committing Offsets](#ooo-commits) for more information. + +#### 2.1.4. `@KafkaListener` Changes + +It is now possible to specify whether the listener method is a batch listener on the method itself. +This allows the same container factory to be used for both record and batch listeners. + +See [Batch Listeners](#batch-listeners) for more information. + +Batch listeners can now handle conversion exceptions. + +See [Conversion Errors with Batch Error Handlers](#batch-listener-conv-errors) for more information. + +`RecordFilterStrategy`, when used with batch listeners, can now filter the entire batch in one call. +See the note at the end of [Batch Listeners](#batch-listeners) for more information. + +#### 2.1.5. `KafkaTemplate` Changes + +You can now receive a single record, given the topic, partition and offset. +See [Using `KafkaTemplate` to Receive](#kafka-template-receive) for more information. + +#### 2.1.6. `CommonErrorHandler` Added + +The legacy `GenericErrorHandler` and its sub-interface hierarchies for record an batch listeners have been replaced by a new single interface `CommonErrorHandler` with implementations corresponding to most legacy implementations of `GenericErrorHandler`. +See [Container Error Handlers](#error-handlers) for more information. + +#### 2.1.7. Listener Container Changes + +The `interceptBeforeTx` container property is now `true` by default. + +The `authorizationExceptionRetryInterval` property has been renamed to `authExceptionRetryInterval` and now applies to `AuthenticationException` s in addition to `AuthorizationException` s previously. +Both exceptions are considered fatal and the container will stop by default, unless this property is set. + +See [Using `KafkaMessageListenerContainer`](#kafka-container) and [Listener Container Properties](#container-props) for more information. + +#### 2.1.8. Serializer/Deserializer Changes + +The `DelegatingByTopicSerializer` and `DelegatingByTopicDeserializer` are now provided. +See [Delegating Serializer and Deserializer](#delegating-serialization) for more information. + +#### 2.1.9. `DeadLetterPublishingRecover` Changes + +The property `stripPreviousExceptionHeaders` is now `true` by default. + +See [Managing Dead Letter Record Headers](#dlpr-headers) for more information. + +#### 2.1.10. Retryable Topics Changes + +Now you can use the same factory for retryable and non-retryable topics. +See [Specifying a ListenerContainerFactory](#retry-topic-lcf) for more information. + +There’s now a manageable global list of fatal exceptions that will make the failed record go straight to the DLT. +Refer to [Exception Classifier](#retry-topic-ex-classifier) to see how to manage it. + +The KafkaBackOffException thrown when using the retryable topics feature is now logged at DEBUG level. +See [[change-kboe-logging-level]](#change-kboe-logging-level) if you need to change the logging level back to WARN or set it to any other level. + +## 3. Introduction + +This first part of the reference documentation is a high-level overview of Spring for Apache Kafka and the underlying concepts and some code snippets that can help you get up and running as quickly as possible. + +### 3.1. Quick Tour + +Prerequisites: You must install and run Apache Kafka. +Then you must put the Spring for Apache Kafka (`spring-kafka`) JAR and all of its dependencies on your class path. +The easiest way to do that is to declare a dependency in your build tool. + +If you are not using Spring Boot, declare the `spring-kafka` jar as a dependency in your project. + +Maven + +``` + + org.springframework.kafka + spring-kafka + 2.8.3 + +``` + +Gradle + +``` +compile 'org.springframework.kafka:spring-kafka:2.8.3' +``` + +| |When using Spring Boot, (and you haven’t used start.spring.io to create your project), omit the version and Boot will automatically bring in the correct version that is compatible with your Boot version:| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Maven + +``` + + org.springframework.kafka + spring-kafka + +``` + +Gradle + +``` +compile 'org.springframework.kafka:spring-kafka' +``` + +However, the quickest way to get started is to use [start.spring.io](https://start.spring.io) (or the wizards in Spring Tool Suits and Intellij IDEA) and create a project, selecting 'Spring for Apache Kafka' as a dependency. + +#### 3.1.1. Compatibility + +This quick tour works with the following versions: + +* Apache Kafka Clients 3.0.0 + +* Spring Framework 5.3.x + +* Minimum Java version: 8 + +#### 3.1.2. Getting Started + +The simplest way to get started is to use [start.spring.io](https://start.spring.io) (or the wizards in Spring Tool Suits and Intellij IDEA) and create a project, selecting 'Spring for Apache Kafka' as a dependency. +Refer to the [Spring Boot documentation](https://docs.spring.io/spring-boot/docs/current/reference/html/spring-boot-features.html#boot-features-kafka) for more information about its opinionated auto configuration of the infrastructure beans. + +Here is a minimal consumer application. + +##### Spring Boot Consumer App + +Example 1. Application + +Java + +``` +@SpringBootApplication +public class Application { + + public static void main(String[] args) { + SpringApplication.run(Application.class, args); + } + + @Bean + public NewTopic topic() { + return TopicBuilder.name("topic1") + .partitions(10) + .replicas(1) + .build(); + } + + @KafkaListener(id = "myId", topics = "topic1") + public void listen(String in) { + System.out.println(in); + } + +} +``` + +Kotlin + +``` +@SpringBootApplication +class Application { + + @Bean + fun topic() = NewTopic("topic1", 10, 1) + + @KafkaListener(id = "myId", topics = ["topic1"]) + fun listen(value: String?) { + println(value) + } + +} + +fun main(args: Array) = runApplication(*args) +``` + +Example 2. application.properties + +``` +spring.kafka.consumer.auto-offset-reset=earliest +``` + +The `NewTopic` bean causes the topic to be created on the broker; it is not needed if the topic already exists. + +##### Spring Boot Producer App + +Example 3. Application + +Java + +``` +@SpringBootApplication +public class Application { + + public static void main(String[] args) { + SpringApplication.run(Application.class, args); + } + + @Bean + public NewTopic topic() { + return TopicBuilder.name("topic1") + .partitions(10) + .replicas(1) + .build(); + } + + @Bean + public ApplicationRunner runner(KafkaTemplate template) { + return args -> { + template.send("topic1", "test"); + }; + } + +} +``` + +Kotlin + +``` +@SpringBootApplication +class Application { + + @Bean + fun topic() = NewTopic("topic1", 10, 1) + + @Bean + fun runner(template: KafkaTemplate) = + ApplicationRunner { template.send("topic1", "test") } + + companion object { + @JvmStatic + fun main(args: Array) = runApplication(*args) + } + +} +``` + +##### + +| |Spring for Apache Kafka is designed to be used in a Spring Application Context.
For example, if you create the listener container yourself outside of a Spring context, not all functions will work unless you satisfy all of the `…​Aware` interfaces that the container implements.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Here is an example of an application that does not use Spring Boot; it has both a `Consumer` and `Producer`. + +Example 4. Without Boot + +Java + +``` +public class Sender { + + public static void main(String[] args) { + AnnotationConfigApplicationContext context = new AnnotationConfigApplicationContext(Config.class); + context.getBean(Sender.class).send("test", 42); + } + + private final KafkaTemplate template; + + public Sender(KafkaTemplate template) { + this.template = template; + } + + public void send(String toSend, int key) { + this.template.send("topic1", key, toSend); + } + +} + +public class Listener { + + @KafkaListener(id = "listen1", topics = "topic1") + public void listen1(String in) { + System.out.println(in); + } + +} + +@Configuration +@EnableKafka +public class Config { + + @Bean + ConcurrentKafkaListenerContainerFactory + kafkaListenerContainerFactory(ConsumerFactory consumerFactory) { + ConcurrentKafkaListenerContainerFactory factory = + new ConcurrentKafkaListenerContainerFactory<>(); + factory.setConsumerFactory(consumerFactory); + return factory; + } + + @Bean + public ConsumerFactory consumerFactory() { + return new DefaultKafkaConsumerFactory<>(consumerProps()); + } + + private Map consumerProps() { + Map props = new HashMap<>(); + props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); + props.put(ConsumerConfig.GROUP_ID_CONFIG, "group"); + props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, IntegerDeserializer.class); + props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); + props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); + // ... + return props; + } + + @Bean + public Sender sender(KafkaTemplate template) { + return new Sender(template); + } + + @Bean + public Listener listener() { + return new Listener(); + } + + @Bean + public ProducerFactory producerFactory() { + return new DefaultKafkaProducerFactory<>(senderProps()); + } + + private Map senderProps() { + Map props = new HashMap<>(); + props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); + props.put(ProducerConfig.LINGER_MS_CONFIG, 10); + props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class); + props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); + //... + return props; + } + + @Bean + public KafkaTemplate kafkaTemplate(ProducerFactory producerFactory) { + return new KafkaTemplate(producerFactory); + } + +} +``` + +Kotlin + +``` +class Sender(private val template: KafkaTemplate) { + + fun send(toSend: String, key: Int) { + template.send("topic1", key, toSend) + } + +} + +class Listener { + + @KafkaListener(id = "listen1", topics = ["topic1"]) + fun listen1(`in`: String) { + println(`in`) + } + +} + +@Configuration +@EnableKafka +class Config { + + @Bean + fun kafkaListenerContainerFactory(consumerFactory: ConsumerFactory) = + ConcurrentKafkaListenerContainerFactory().also { it.consumerFactory = consumerFactory } + + @Bean + fun consumerFactory() = DefaultKafkaConsumerFactory(consumerProps) + + val consumerProps = mapOf( + ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG to "localhost:9092", + ConsumerConfig.GROUP_ID_CONFIG to "group", + ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG to IntegerDeserializer::class.java, + ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG to StringDeserializer::class.java, + ConsumerConfig.AUTO_OFFSET_RESET_CONFIG to "earliest" + ) + + @Bean + fun sender(template: KafkaTemplate) = Sender(template) + + @Bean + fun listener() = Listener() + + @Bean + fun producerFactory() = DefaultKafkaProducerFactory(senderProps) + + val senderProps = mapOf( + ProducerConfig.BOOTSTRAP_SERVERS_CONFIG to "localhost:9092", + ProducerConfig.LINGER_MS_CONFIG to 10, + ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG to IntegerSerializer::class.java, + ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG to StringSerializer::class.java + ) + + @Bean + fun kafkaTemplate(producerFactory: ProducerFactory) = KafkaTemplate(producerFactory) + +} +``` + +As you can see, you have to define several infrastructure beans when not using Spring Boot. + +## 4. Reference + +This part of the reference documentation details the various components that comprise Spring for Apache Kafka. +The [main chapter](#kafka) covers the core classes to develop a Kafka application with Spring. + +### 4.1. Using Spring for Apache Kafka + +This section offers detailed explanations of the various concerns that impact using Spring for Apache Kafka. +For a quick but less detailed introduction, see [Quick Tour](#quick-tour). + +#### 4.1.1. Connecting to Kafka + +* `KafkaAdmin` - see [Configuring Topics](#configuring-topics) + +* `ProducerFactory` - see [Sending Messages](#sending-messages) + +* `ConsumerFactory` - see [Receiving Messages](#receiving-messages) + +Starting with version 2.5, each of these extends `KafkaResourceFactory`. +This allows changing the bootstrap servers at runtime by adding a `Supplier` to their configuration: `setBootstrapServersSupplier(() → …​)`. +This will be called for all new connections to get the list of servers. +Consumers and Producers are generally long-lived. +To close existing Producers, call `reset()` on the `DefaultKafkaProducerFactory`. +To close existing Consumers, call `stop()` (and then `start()`) on the `KafkaListenerEndpointRegistry` and/or `stop()` and `start()` on any other listener container beans. + +For convenience, the framework also provides an `ABSwitchCluster` which supports two sets of bootstrap servers; one of which is active at any time. +Configure the `ABSwitchCluster` and add it to the producer and consumer factories, and the `KafkaAdmin`, by calling `setBootstrapServersSupplier()`. +When you want to switch, call `primary()` or `secondary()` and call `reset()` on the producer factory to establish new connection(s); for consumers, `stop()` and `start()` all listener containers. +When using `@KafkaListener` s, `stop()` and `start()` the `KafkaListenerEndpointRegistry` bean. + +See the Javadocs for more information. + +##### Factory Listeners + +Starting with version 2.5, the `DefaultKafkaProducerFactory` and `DefaultKafkaConsumerFactory` can be configured with a `Listener` to receive notifications whenever a producer or consumer is created or closed. + +Producer Factory Listener + +``` +interface Listener { + + default void producerAdded(String id, Producer producer) { + } + + default void producerRemoved(String id, Producer producer) { + } + +} +``` + +Consumer Factory Listener + +``` +interface Listener { + + default void consumerAdded(String id, Consumer consumer) { + } + + default void consumerRemoved(String id, Consumer consumer) { + } + +} +``` + +In each case, the `id` is created by appending the `client-id` property (obtained from the `metrics()` after creation) to the factory `beanName` property, separated by `.`. + +These listeners can be used, for example, to create and bind a Micrometer `KafkaClientMetrics` instance when a new client is created (and close it when the client is closed). + +The framework provides listeners that do exactly that; see [Micrometer Native Metrics](#micrometer-native). + +#### 4.1.2. Configuring Topics + +If you define a `KafkaAdmin` bean in your application context, it can automatically add topics to the broker. +To do so, you can add a `NewTopic` `@Bean` for each topic to the application context. +Version 2.3 introduced a new class `TopicBuilder` to make creation of such beans more convenient. +The following example shows how to do so: + +Java + +``` +@Bean +public KafkaAdmin admin() { + Map configs = new HashMap<>(); + configs.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); + return new KafkaAdmin(configs); +} + +@Bean +public NewTopic topic1() { + return TopicBuilder.name("thing1") + .partitions(10) + .replicas(3) + .compact() + .build(); +} + +@Bean +public NewTopic topic2() { + return TopicBuilder.name("thing2") + .partitions(10) + .replicas(3) + .config(TopicConfig.COMPRESSION_TYPE_CONFIG, "zstd") + .build(); +} + +@Bean +public NewTopic topic3() { + return TopicBuilder.name("thing3") + .assignReplicas(0, Arrays.asList(0, 1)) + .assignReplicas(1, Arrays.asList(1, 2)) + .assignReplicas(2, Arrays.asList(2, 0)) + .config(TopicConfig.COMPRESSION_TYPE_CONFIG, "zstd") + .build(); +} +``` + +Kotlin + +``` +@Bean +fun admin() = KafkaAdmin(mapOf(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG to "localhost:9092")) + +@Bean +fun topic1() = + TopicBuilder.name("thing1") + .partitions(10) + .replicas(3) + .compact() + .build() + +@Bean +fun topic2() = + TopicBuilder.name("thing2") + .partitions(10) + .replicas(3) + .config(TopicConfig.COMPRESSION_TYPE_CONFIG, "zstd") + .build() + +@Bean +fun topic3() = + TopicBuilder.name("thing3") + .assignReplicas(0, Arrays.asList(0, 1)) + .assignReplicas(1, Arrays.asList(1, 2)) + .assignReplicas(2, Arrays.asList(2, 0)) + .config(TopicConfig.COMPRESSION_TYPE_CONFIG, "zstd") + .build() +``` + +Starting with version 2.6, you can omit `.partitions()` and/or `replicas()` and the broker defaults will be applied to those properties. +The broker version must be at least 2.4.0 to support this feature - see [KIP-464](https://cwiki.apache.org/confluence/display/KAFKA/KIP-464%3A+Defaults+for+AdminClient%23createTopic). + +Java + +``` +@Bean +public NewTopic topic4() { + return TopicBuilder.name("defaultBoth") + .build(); +} + +@Bean +public NewTopic topic5() { + return TopicBuilder.name("defaultPart") + .replicas(1) + .build(); +} + +@Bean +public NewTopic topic6() { + return TopicBuilder.name("defaultRepl") + .partitions(3) + .build(); +} +``` + +Kotlin + +``` +@Bean +fun topic4() = TopicBuilder.name("defaultBoth").build() + +@Bean +fun topic5() = TopicBuilder.name("defaultPart").replicas(1).build() + +@Bean +fun topic6() = TopicBuilder.name("defaultRepl").partitions(3).build() +``` + +Starting with version 2.7, you can declare multiple `NewTopic` s in a single `KafkaAdmin.NewTopics` bean definition: + +Java + +``` +@Bean +public KafkaAdmin.NewTopics topics456() { + return new NewTopics( + TopicBuilder.name("defaultBoth") + .build(), + TopicBuilder.name("defaultPart") + .replicas(1) + .build(), + TopicBuilder.name("defaultRepl") + .partitions(3) + .build()); +} +``` + +Kotlin + +``` +@Bean +fun topics456() = KafkaAdmin.NewTopics( + TopicBuilder.name("defaultBoth") + .build(), + TopicBuilder.name("defaultPart") + .replicas(1) + .build(), + TopicBuilder.name("defaultRepl") + .partitions(3) + .build() +) +``` + +| |When using Spring Boot, a `KafkaAdmin` bean is automatically registered so you only need the `NewTopic` (and/or `NewTopics`) `@Bean` s.| +|---|---------------------------------------------------------------------------------------------------------------------------------------| + +By default, if the broker is not available, a message is logged, but the context continues to load. +You can programmatically invoke the admin’s `initialize()` method to try again later. +If you wish this condition to be considered fatal, set the admin’s `fatalIfBrokerNotAvailable` property to `true`. +The context then fails to initialize. + +| |If the broker supports it (1.0.0 or higher), the admin increases the number of partitions if it is found that an existing topic has fewer partitions than the `NewTopic.numPartitions`.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Starting with version 2.7, the `KafkaAdmin` provides methods to create and examine topics at runtime. + +* `createOrModifyTopics` + +* `describeTopics` + +For more advanced features, you can use the `AdminClient` directly. +The following example shows how to do so: + +``` +@Autowired +private KafkaAdmin admin; + +... + + AdminClient client = AdminClient.create(admin.getConfigurationProperties()); + ... + client.close(); +``` + +#### 4.1.3. Sending Messages + +This section covers how to send messages. + +##### Using `KafkaTemplate` + +This section covers how to use `KafkaTemplate` to send messages. + +###### Overview + +The `KafkaTemplate` wraps a producer and provides convenience methods to send data to Kafka topics. +The following listing shows the relevant methods from `KafkaTemplate`: + +``` +ListenableFuture> sendDefault(V data); + +ListenableFuture> sendDefault(K key, V data); + +ListenableFuture> sendDefault(Integer partition, K key, V data); + +ListenableFuture> sendDefault(Integer partition, Long timestamp, K key, V data); + +ListenableFuture> send(String topic, V data); + +ListenableFuture> send(String topic, K key, V data); + +ListenableFuture> send(String topic, Integer partition, K key, V data); + +ListenableFuture> send(String topic, Integer partition, Long timestamp, K key, V data); + +ListenableFuture> send(ProducerRecord record); + +ListenableFuture> send(Message message); + +Map metrics(); + +List partitionsFor(String topic); + + T execute(ProducerCallback callback); + +// Flush the producer. + +void flush(); + +interface ProducerCallback { + + T doInKafka(Producer producer); + +} +``` + +See the [Javadoc](https://docs.spring.io/spring-kafka/api/org/springframework/kafka/core/KafkaTemplate.html) for more detail. + +The `sendDefault` API requires that a default topic has been provided to the template. + +The API takes in a `timestamp` as a parameter and stores this timestamp in the record. +How the user-provided timestamp is stored depends on the timestamp type configured on the Kafka topic. +If the topic is configured to use `CREATE_TIME`, the user specified timestamp is recorded (or generated if not specified). +If the topic is configured to use `LOG_APPEND_TIME`, the user-specified timestamp is ignored and the broker adds in the local broker time. + +The `metrics` and `partitionsFor` methods delegate to the same methods on the underlying [`Producer`](https://kafka.apache.org/20/javadoc/org/apache/kafka/clients/producer/Producer.html). +The `execute` method provides direct access to the underlying [`Producer`](https://kafka.apache.org/20/javadoc/org/apache/kafka/clients/producer/Producer.html). + +To use the template, you can configure a producer factory and provide it in the template’s constructor. +The following example shows how to do so: + +``` +@Bean +public ProducerFactory producerFactory() { + return new DefaultKafkaProducerFactory<>(producerConfigs()); +} + +@Bean +public Map producerConfigs() { + Map props = new HashMap<>(); + props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); + props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); + props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); + // See https://kafka.apache.org/documentation/#producerconfigs for more properties + return props; +} + +@Bean +public KafkaTemplate kafkaTemplate() { + return new KafkaTemplate(producerFactory()); +} +``` + +Starting with version 2.5, you can now override the factory’s `ProducerConfig` properties to create templates with different producer configurations from the same factory. + +``` +@Bean +public KafkaTemplate stringTemplate(ProducerFactory pf) { + return new KafkaTemplate<>(pf); +} + +@Bean +public KafkaTemplate bytesTemplate(ProducerFactory pf) { + return new KafkaTemplate<>(pf, + Collections.singletonMap(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class)); +} +``` + +Note that a bean of type `ProducerFactory` (such as the one auto-configured by Spring Boot) can be referenced with different narrowed generic types. + +You can also configure the template by using standard `` definitions. + +Then, to use the template, you can invoke one of its methods. + +When you use the methods with a `Message` parameter, the topic, partition, and key information is provided in a message header that includes the following items: + +* `KafkaHeaders.TOPIC` + +* `KafkaHeaders.PARTITION_ID` + +* `KafkaHeaders.MESSAGE_KEY` + +* `KafkaHeaders.TIMESTAMP` + +The message payload is the data. + +Optionally, you can configure the `KafkaTemplate` with a `ProducerListener` to get an asynchronous callback with the results of the send (success or failure) instead of waiting for the `Future` to complete. +The following listing shows the definition of the `ProducerListener` interface: + +``` +public interface ProducerListener { + + void onSuccess(ProducerRecord producerRecord, RecordMetadata recordMetadata); + + void onError(ProducerRecord producerRecord, RecordMetadata recordMetadata, + Exception exception); + +} +``` + +By default, the template is configured with a `LoggingProducerListener`, which logs errors and does nothing when the send is successful. + +For convenience, default method implementations are provided in case you want to implement only one of the methods. + +Notice that the send methods return a `ListenableFuture`. +You can register a callback with the listener to receive the result of the send asynchronously. +The following example shows how to do so: + +``` +ListenableFuture> future = template.send("myTopic", "something"); +future.addCallback(new ListenableFutureCallback>() { + + @Override + public void onSuccess(SendResult result) { + ... + } + + @Override + public void onFailure(Throwable ex) { + ... + } + +}); +``` + +`SendResult` has two properties, a `ProducerRecord` and `RecordMetadata`. +See the Kafka API documentation for information about those objects. + +The `Throwable` in `onFailure` can be cast to a `KafkaProducerException`; its `failedProducerRecord` property contains the failed record. + +Starting with version 2.5, you can use a `KafkaSendCallback` instead of a `ListenableFutureCallback`, making it easier to extract the failed `ProducerRecord`, avoiding the need to cast the `Throwable`: + +``` +ListenableFuture> future = template.send("topic", 1, "thing"); +future.addCallback(new KafkaSendCallback() { + + @Override + public void onSuccess(SendResult result) { + ... + } + + @Override + public void onFailure(KafkaProducerException ex) { + ProducerRecord failed = ex.getFailedProducerRecord(); + ... + } + +}); +``` + +You can also use a pair of lambdas: + +``` +ListenableFuture> future = template.send("topic", 1, "thing"); +future.addCallback(result -> { + ... + }, (KafkaFailureCallback) ex -> { + ProducerRecord failed = ex.getFailedProducerRecord(); + ... + }); +``` + +If you wish to block the sending thread to await the result, you can invoke the future’s `get()` method; using the method with a timeout is recommended. +You may wish to invoke `flush()` before waiting or, for convenience, the template has a constructor with an `autoFlush` parameter that causes the template to `flush()` on each send. +Flushing is only needed if you have set the `linger.ms` producer property and want to immediately send a partial batch. + +###### Examples + +This section shows examples of sending messages to Kafka: + +Example 5. Non Blocking (Async) + +``` +public void sendToKafka(final MyOutputData data) { + final ProducerRecord record = createRecord(data); + + ListenableFuture> future = template.send(record); + future.addCallback(new KafkaSendCallback() { + + @Override + public void onSuccess(SendResult result) { + handleSuccess(data); + } + + @Override + public void onFailure(KafkaProducerException ex) { + handleFailure(data, record, ex); + } + + }); +} +``` + +Blocking (Sync) + +``` +public void sendToKafka(final MyOutputData data) { + final ProducerRecord record = createRecord(data); + + try { + template.send(record).get(10, TimeUnit.SECONDS); + handleSuccess(data); + } + catch (ExecutionException e) { + handleFailure(data, record, e.getCause()); + } + catch (TimeoutException | InterruptedException e) { + handleFailure(data, record, e); + } +} +``` + +Note that the cause of the `ExecutionException` is `KafkaProducerException` with the `failedProducerRecord` property. + +##### Using `RoutingKafkaTemplate` + +Starting with version 2.5, you can use a `RoutingKafkaTemplate` to select the producer at runtime, based on the destination `topic` name. + +| |The routing template does **not** support transactions, `execute`, `flush`, or `metrics` operations because the topic is not known for those operations.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------| + +The template requires a map of `java.util.regex.Pattern` to `ProducerFactory` instances. +This map should be ordered (e.g. a `LinkedHashMap`) because it is traversed in order; you should add more specific patterns at the beginning. + +The following simple Spring Boot application provides an example of how to use the same template to send to different topics, each using a different value serializer. + +``` +@SpringBootApplication +public class Application { + + public static void main(String[] args) { + SpringApplication.run(Application.class, args); + } + + @Bean + public RoutingKafkaTemplate routingTemplate(GenericApplicationContext context, + ProducerFactory pf) { + + // Clone the PF with a different Serializer, register with Spring for shutdown + Map configs = new HashMap<>(pf.getConfigurationProperties()); + configs.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class); + DefaultKafkaProducerFactory bytesPF = new DefaultKafkaProducerFactory<>(configs); + context.registerBean(DefaultKafkaProducerFactory.class, "bytesPF", bytesPF); + + Map> map = new LinkedHashMap<>(); + map.put(Pattern.compile("two"), bytesPF); + map.put(Pattern.compile(".+"), pf); // Default PF with StringSerializer + return new RoutingKafkaTemplate(map); + } + + @Bean + public ApplicationRunner runner(RoutingKafkaTemplate routingTemplate) { + return args -> { + routingTemplate.send("one", "thing1"); + routingTemplate.send("two", "thing2".getBytes()); + }; + } + +} +``` + +The corresponding `@KafkaListener` s for this example are shown in [Annotation Properties](#annotation-properties). + +For another technique to achieve similar results, but with the additional capability of sending different types to the same topic, see [Delegating Serializer and Deserializer](#delegating-serialization). + +##### Using `DefaultKafkaProducerFactory` + +As seen in [Using `KafkaTemplate`](#kafka-template), a `ProducerFactory` is used to create the producer. + +When not using [Transactions](#transactions), by default, the `DefaultKafkaProducerFactory` creates a singleton producer used by all clients, as recommended in the `KafkaProducer` javadocs. +However, if you call `flush()` on the template, this can cause delays for other threads using the same producer. +Starting with version 2.3, the `DefaultKafkaProducerFactory` has a new property `producerPerThread`. +When set to `true`, the factory will create (and cache) a separate producer for each thread, to avoid this issue. + +| |When `producerPerThread` is `true`, user code **must** call `closeThreadBoundProducer()` on the factory when the producer is no longer needed.
This will physically close the producer and remove it from the `ThreadLocal`.
Calling `reset()` or `destroy()` will not clean up these producers.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Also see [`KafkaTemplate` Transactional and non-Transactional Publishing](#tx-template-mixed). + +When creating a `DefaultKafkaProducerFactory`, key and/or value `Serializer` classes can be picked up from configuration by calling the constructor that only takes in a Map of properties (see example in [Using `KafkaTemplate`](#kafka-template)), or `Serializer` instances may be passed to the `DefaultKafkaProducerFactory` constructor (in which case all `Producer` s share the same instances). +Alternatively you can provide `Supplier` s (starting with version 2.3) that will be used to obtain separate `Serializer` instances for each `Producer`: + +``` +@Bean +public ProducerFactory producerFactory() { + return new DefaultKafkaProducerFactory<>(producerConfigs(), null, () -> new CustomValueSerializer()); +} + +@Bean +public KafkaTemplate kafkaTemplate() { + return new KafkaTemplate(producerFactory()); +} +``` + +Starting with version 2.5.10, you can now update the producer properties after the factory is created. +This might be useful, for example, if you have to update SSL key/trust store locations after a credentials change. +The changes will not affect existing producer instances; call `reset()` to close any existing producers so that new producers will be created using the new properties. +NOTE: You cannot change a transactional producer factory to non-transactional, and vice-versa. + +Two new methods are now provided: + +``` +void updateConfigs(Map updates); + +void removeConfig(String configKey); +``` + +Starting with version 2.8, if you provide serializers as objects (in the constructor or via the setters), the factory will invoke the `configure()` method to configure them with the configuration properties. + +##### Using `ReplyingKafkaTemplate` + +Version 2.1.3 introduced a subclass of `KafkaTemplate` to provide request/reply semantics. +The class is named `ReplyingKafkaTemplate` and has two additional methods; the following shows the method signatures: + +``` +RequestReplyFuture sendAndReceive(ProducerRecord record); + +RequestReplyFuture sendAndReceive(ProducerRecord record, + Duration replyTimeout); +``` + +(Also see [Request/Reply with `Message` s](#exchanging-messages)). + +The result is a `ListenableFuture` that is asynchronously populated with the result (or an exception, for a timeout). +The result also has a `sendFuture` property, which is the result of calling `KafkaTemplate.send()`. +You can use this future to determine the result of the send operation. + +If the first method is used, or the `replyTimeout` argument is `null`, the template’s `defaultReplyTimeout` property is used (5 seconds by default). + +The following Spring Boot application shows an example of how to use the feature: + +``` +@SpringBootApplication +public class KRequestingApplication { + + public static void main(String[] args) { + SpringApplication.run(KRequestingApplication.class, args).close(); + } + + @Bean + public ApplicationRunner runner(ReplyingKafkaTemplate template) { + return args -> { + ProducerRecord record = new ProducerRecord<>("kRequests", "foo"); + RequestReplyFuture replyFuture = template.sendAndReceive(record); + SendResult sendResult = replyFuture.getSendFuture().get(10, TimeUnit.SECONDS); + System.out.println("Sent ok: " + sendResult.getRecordMetadata()); + ConsumerRecord consumerRecord = replyFuture.get(10, TimeUnit.SECONDS); + System.out.println("Return value: " + consumerRecord.value()); + }; + } + + @Bean + public ReplyingKafkaTemplate replyingTemplate( + ProducerFactory pf, + ConcurrentMessageListenerContainer repliesContainer) { + + return new ReplyingKafkaTemplate<>(pf, repliesContainer); + } + + @Bean + public ConcurrentMessageListenerContainer repliesContainer( + ConcurrentKafkaListenerContainerFactory containerFactory) { + + ConcurrentMessageListenerContainer repliesContainer = + containerFactory.createContainer("kReplies"); + repliesContainer.getContainerProperties().setGroupId("repliesGroup"); + repliesContainer.setAutoStartup(false); + return repliesContainer; + } + + @Bean + public NewTopic kRequests() { + return TopicBuilder.name("kRequests") + .partitions(10) + .replicas(2) + .build(); + } + + @Bean + public NewTopic kReplies() { + return TopicBuilder.name("kReplies") + .partitions(10) + .replicas(2) + .build(); + } + +} +``` + +Note that we can use Boot’s auto-configured container factory to create the reply container. + +If a non-trivial deserializer is being used for replies, consider using an [`ErrorHandlingDeserializer`](#error-handling-deserializer) that delegates to your configured deserializer. +When so configured, the `RequestReplyFuture` will be completed exceptionally and you can catch the `ExecutionException`, with the `DeserializationException` in its `cause` property. + +Starting with version 2.6.7, in addition to detecting `DeserializationException` s, the template will call the `replyErrorChecker` function, if provided. +If it returns an exception, the future will be completed exceptionally. + +Here is an example: + +``` +template.setReplyErrorChecker(record -> { + Header error = record.headers().lastHeader("serverSentAnError"); + if (error != null) { + return new MyException(new String(error.value())); + } + else { + return null; + } +}); + +... + +RequestReplyFuture future = template.sendAndReceive(record); +try { + future.getSendFuture().get(10, TimeUnit.SECONDS); // send ok + ConsumerRecord consumerRecord = future.get(10, TimeUnit.SECONDS); + ... +} +catch (InterruptedException e) { + ... +} +catch (ExecutionException e) { + if (e.getCause instanceof MyException) { + ... + } +} +catch (TimeoutException e) { + ... +} +``` + +The template sets a header (named `KafkaHeaders.CORRELATION_ID` by default), which must be echoed back by the server side. + +In this case, the following `@KafkaListener` application responds: + +``` +@SpringBootApplication +public class KReplyingApplication { + + public static void main(String[] args) { + SpringApplication.run(KReplyingApplication.class, args); + } + + @KafkaListener(id="server", topics = "kRequests") + @SendTo // use default replyTo expression + public String listen(String in) { + System.out.println("Server received: " + in); + return in.toUpperCase(); + } + + @Bean + public NewTopic kRequests() { + return TopicBuilder.name("kRequests") + .partitions(10) + .replicas(2) + .build(); + } + + @Bean // not required if Jackson is on the classpath + public MessagingMessageConverter simpleMapperConverter() { + MessagingMessageConverter messagingMessageConverter = new MessagingMessageConverter(); + messagingMessageConverter.setHeaderMapper(new SimpleKafkaHeaderMapper()); + return messagingMessageConverter; + } + +} +``` + +The `@KafkaListener` infrastructure echoes the correlation ID and determines the reply topic. + +See [Forwarding Listener Results using `@SendTo`](#annotation-send-to) for more information about sending replies. +The template uses the default header `KafKaHeaders.REPLY_TOPIC` to indicate the topic to which the reply goes. + +Starting with version 2.2, the template tries to detect the reply topic or partition from the configured reply container. +If the container is configured to listen to a single topic or a single `TopicPartitionOffset`, it is used to set the reply headers. +If the container is configured otherwise, the user must set up the reply headers. +In this case, an `INFO` log message is written during initialization. +The following example uses `KafkaHeaders.REPLY_TOPIC`: + +``` +record.headers().add(new RecordHeader(KafkaHeaders.REPLY_TOPIC, "kReplies".getBytes())); +``` + +When you configure with a single reply `TopicPartitionOffset`, you can use the same reply topic for multiple templates, as long as each instance listens on a different partition. +When configuring with a single reply topic, each instance must use a different `group.id`. +In this case, all instances receive each reply, but only the instance that sent the request finds the correlation ID. +This may be useful for auto-scaling, but with the overhead of additional network traffic and the small cost of discarding each unwanted reply. +When you use this setting, we recommend that you set the template’s `sharedReplyTopic` to `true`, which reduces the logging level of unexpected replies to DEBUG instead of the default ERROR. + +The following is an example of configuring the reply container to use the same shared reply topic: + +``` +@Bean +public ConcurrentMessageListenerContainer replyContainer( + ConcurrentKafkaListenerContainerFactory containerFactory) { + + ConcurrentMessageListenerContainer container = containerFactory.createContainer("topic2"); + container.getContainerProperties().setGroupId(UUID.randomUUID().toString()); // unique + Properties props = new Properties(); + props.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest"); // so the new group doesn't get old replies + container.getContainerProperties().setKafkaConsumerProperties(props); + return container; +} +``` + +| |If you have multiple client instances and you do not configure them as discussed in the preceding paragraph, each instance needs a dedicated reply topic.
An alternative is to set the `KafkaHeaders.REPLY_PARTITION` and use a dedicated partition for each instance.
The `Header` contains a four-byte int (big-endian).
The server must use this header to route the reply to the correct partition (`@KafkaListener` does this).
In this case, though, the reply container must not use Kafka’s group management feature and must be configured to listen on a fixed partition (by using a `TopicPartitionOffset` in its `ContainerProperties` constructor).| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |The `DefaultKafkaHeaderMapper` requires Jackson to be on the classpath (for the `@KafkaListener`).
If it is not available, the message converter has no header mapper, so you must configure a `MessagingMessageConverter` with a `SimpleKafkaHeaderMapper`, as shown earlier.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +By default, 3 headers are used: + +* `KafkaHeaders.CORRELATION_ID` - used to correlate the reply to a request + +* `KafkaHeaders.REPLY_TOPIC` - used to tell the server where to reply + +* `KafkaHeaders.REPLY_PARTITION` - (optional) used to tell the server which partition to reply to + +These header names are used by the `@KafkaListener` infrastructure to route the reply. + +Starting with version 2.3, you can customize the header names - the template has 3 properties `correlationHeaderName`, `replyTopicHeaderName`, and `replyPartitionHeaderName`. +This is useful if your server is not a Spring application (or does not use the `@KafkaListener`). + +###### Request/Reply with `Message` s + +Version 2.7 added methods to the `ReplyingKafkaTemplate` to send and receive `spring-messaging` 's `Message` abstraction: + +``` +RequestReplyMessageFuture sendAndReceive(Message message); + +

RequestReplyTypedMessageFuture sendAndReceive(Message message, + ParameterizedTypeReference

returnType); +``` + +These will use the template’s default `replyTimeout`, there are also overloaded versions that can take a timeout in the method call. + +Use the first method if the consumer’s `Deserializer` or the template’s `MessageConverter` can convert the payload without any additional information, either via configuration or type metadata in the reply message. + +Use the second method if you need to provide type information for the return type, to assist the message converter. +This also allows the same template to receive different types, even if there is no type metadata in the replies, such as when the server side is not a Spring application. +The following is an example of the latter: + +Example 6. Template Bean + +Java + +``` +@Bean +ReplyingKafkaTemplate template( + ProducerFactory pf, + ConcurrentKafkaListenerContainerFactory factory) { + + ConcurrentMessageListenerContainer replyContainer = + factory.createContainer("replies"); + replyContainer.getContainerProperties().setGroupId("request.replies"); + ReplyingKafkaTemplate template = + new ReplyingKafkaTemplate<>(pf, replyContainer); + template.setMessageConverter(new ByteArrayJsonMessageConverter()); + template.setDefaultTopic("requests"); + return template; +} +``` + +Kotlin + +``` +@Bean +fun template( + pf: ProducerFactory?, + factory: ConcurrentKafkaListenerContainerFactory +): ReplyingKafkaTemplate { + val replyContainer = factory.createContainer("replies") + replyContainer.containerProperties.groupId = "request.replies" + val template = ReplyingKafkaTemplate(pf, replyContainer) + template.messageConverter = ByteArrayJsonMessageConverter() + template.defaultTopic = "requests" + return template +} +``` + +Example 7. Using the template + +Java + +``` +RequestReplyTypedMessageFuture future1 = + template.sendAndReceive(MessageBuilder.withPayload("getAThing").build(), + new ParameterizedTypeReference() { }); +log.info(future1.getSendFuture().get(10, TimeUnit.SECONDS).getRecordMetadata().toString()); +Thing thing = future1.get(10, TimeUnit.SECONDS).getPayload(); +log.info(thing.toString()); + +RequestReplyTypedMessageFuture> future2 = + template.sendAndReceive(MessageBuilder.withPayload("getThings").build(), + new ParameterizedTypeReference>() { }); +log.info(future2.getSendFuture().get(10, TimeUnit.SECONDS).getRecordMetadata().toString()); +List things = future2.get(10, TimeUnit.SECONDS).getPayload(); +things.forEach(thing1 -> log.info(thing1.toString())); +``` + +Kotlin + +``` +val future1: RequestReplyTypedMessageFuture? = + template.sendAndReceive(MessageBuilder.withPayload("getAThing").build(), + object : ParameterizedTypeReference() {}) +log.info(future1?.sendFuture?.get(10, TimeUnit.SECONDS)?.recordMetadata?.toString()) +val thing = future1?.get(10, TimeUnit.SECONDS)?.payload +log.info(thing.toString()) + +val future2: RequestReplyTypedMessageFuture?>? = + template.sendAndReceive(MessageBuilder.withPayload("getThings").build(), + object : ParameterizedTypeReference?>() {}) +log.info(future2?.sendFuture?.get(10, TimeUnit.SECONDS)?.recordMetadata.toString()) +val things = future2?.get(10, TimeUnit.SECONDS)?.payload +things?.forEach(Consumer { thing1: Thing? -> log.info(thing1.toString()) }) +``` + +##### Reply Type Message\ + +When the `@KafkaListener` returns a `Message`, with versions before 2.5, it was necessary to populate the reply topic and correlation id headers. +In this example, we use the reply topic header from the request: + +``` +@KafkaListener(id = "requestor", topics = "request") +@SendTo +public Message messageReturn(String in) { + return MessageBuilder.withPayload(in.toUpperCase()) + .setHeader(KafkaHeaders.TOPIC, replyTo) + .setHeader(KafkaHeaders.MESSAGE_KEY, 42) + .setHeader(KafkaHeaders.CORRELATION_ID, correlation) + .build(); +} +``` + +This also shows how to set a key on the reply record. + +Starting with version 2.5, the framework will detect if these headers are missing and populate them with the topic - either the topic determined from the `@SendTo` value or the incoming `KafkaHeaders.REPLY_TOPIC` header (if present). +It will also echo the incoming `KafkaHeaders.CORRELATION_ID` and `KafkaHeaders.REPLY_PARTITION`, if present. + +``` +@KafkaListener(id = "requestor", topics = "request") +@SendTo // default REPLY_TOPIC header +public Message messageReturn(String in) { + return MessageBuilder.withPayload(in.toUpperCase()) + .setHeader(KafkaHeaders.MESSAGE_KEY, 42) + .build(); +} +``` + +##### Aggregating Multiple Replies + +The template in [Using `ReplyingKafkaTemplate`](#replying-template) is strictly for a single request/reply scenario. +For cases where multiple receivers of a single message return a reply, you can use the `AggregatingReplyingKafkaTemplate`. +This is an implementation of the client-side of the [Scatter-Gather Enterprise Integration Pattern](https://www.enterpriseintegrationpatterns.com/patterns/messaging/BroadcastAggregate.html). + +Like the `ReplyingKafkaTemplate`, the `AggregatingReplyingKafkaTemplate` constructor takes a producer factory and a listener container to receive the replies; it has a third parameter `BiPredicate>, Boolean> releaseStrategy` which is consulted each time a reply is received; when the predicate returns `true`, the collection of `ConsumerRecord` s is used to complete the `Future` returned by the `sendAndReceive` method. + +There is an additional property `returnPartialOnTimeout` (default false). +When this is set to `true`, instead of completing the future with a `KafkaReplyTimeoutException`, a partial result completes the future normally (as long as at least one reply record has been received). + +Starting with version 2.3.5, the predicate is also called after a timeout (if `returnPartialOnTimeout` is `true`). +The first argument is the current list of records; the second is `true` if this call is due to a timeout. +The predicate can modify the list of records. + +``` +AggregatingReplyingKafkaTemplate template = + new AggregatingReplyingKafkaTemplate<>(producerFactory, container, + coll -> coll.size() == releaseSize); +... +RequestReplyFuture>> future = + template.sendAndReceive(record); +future.getSendFuture().get(10, TimeUnit.SECONDS); // send ok +ConsumerRecord>> consumerRecord = + future.get(30, TimeUnit.SECONDS); +``` + +Notice that the return type is a `ConsumerRecord` with a value that is a collection of `ConsumerRecord` s. +The "outer" `ConsumerRecord` is not a "real" record, it is synthesized by the template, as a holder for the actual reply records received for the request. +When a normal release occurs (release strategy returns true), the topic is set to `aggregatedResults`; if `returnPartialOnTimeout` is true, and timeout occurs (and at least one reply record has been received), the topic is set to `partialResultsAfterTimeout`. +The template provides constant static variables for these "topic" names: + +``` +/** + * Pseudo topic name for the "outer" {@link ConsumerRecords} that has the aggregated + * results in its value after a normal release by the release strategy. + */ +public static final String AGGREGATED_RESULTS_TOPIC = "aggregatedResults"; + +/** + * Pseudo topic name for the "outer" {@link ConsumerRecords} that has the aggregated + * results in its value after a timeout. + */ +public static final String PARTIAL_RESULTS_AFTER_TIMEOUT_TOPIC = "partialResultsAfterTimeout"; +``` + +The real `ConsumerRecord` s in the `Collection` contain the actual topic(s) from which the replies are received. + +| |The listener container for the replies MUST be configured with `AckMode.MANUAL` or `AckMode.MANUAL_IMMEDIATE`; the consumer property `enable.auto.commit` must be `false` (the default since version 2.3).
To avoid any possibility of losing messages, the template only commits offsets when there are zero requests outstanding, i.e. when the last outstanding request is released by the release strategy.
After a rebalance, it is possible for duplicate reply deliveries; these will be ignored for any in-flight requests; you may see error log messages when duplicate replies are received for already released replies.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |If you use an [`ErrorHandlingDeserializer`](#error-handling-deserializer) with this aggregating template, the framework will not automatically detect `DeserializationException` s.
Instead, the record (with a `null` value) will be returned intact, with the deserialization exception(s) in headers.
It is recommended that applications call the utility method `ReplyingKafkaTemplate.checkDeserialization()` method to determine if a deserialization exception occurred.
See its javadocs for more information.
The `replyErrorChecker` is also not called for this aggregating template; you should perform the checks on each element of the reply.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 4.1.4. Receiving Messages + +You can receive messages by configuring a `MessageListenerContainer` and providing a message listener or by using the `@KafkaListener` annotation. + +##### Message Listeners + +When you use a [message listener container](#message-listener-container), you must provide a listener to receive data. +There are currently eight supported interfaces for message listeners. +The following listing shows these interfaces: + +``` +public interface MessageListener { (1) + + void onMessage(ConsumerRecord data); + +} + +public interface AcknowledgingMessageListener { (2) + + void onMessage(ConsumerRecord data, Acknowledgment acknowledgment); + +} + +public interface ConsumerAwareMessageListener extends MessageListener { (3) + + void onMessage(ConsumerRecord data, Consumer consumer); + +} + +public interface AcknowledgingConsumerAwareMessageListener extends MessageListener { (4) + + void onMessage(ConsumerRecord data, Acknowledgment acknowledgment, Consumer consumer); + +} + +public interface BatchMessageListener { (5) + + void onMessage(List> data); + +} + +public interface BatchAcknowledgingMessageListener { (6) + + void onMessage(List> data, Acknowledgment acknowledgment); + +} + +public interface BatchConsumerAwareMessageListener extends BatchMessageListener { (7) + + void onMessage(List> data, Consumer consumer); + +} + +public interface BatchAcknowledgingConsumerAwareMessageListener extends BatchMessageListener { (8) + + void onMessage(List> data, Acknowledgment acknowledgment, Consumer consumer); + +} +``` + +|**1**| Use this interface for processing individual `ConsumerRecord` instances received from the Kafka consumer `poll()` operation when using auto-commit or one of the container-managed [commit methods](#committing-offsets). | +|-----|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| Use this interface for processing individual `ConsumerRecord` instances received from the Kafka consumer `poll()` operation when using one of the manual [commit methods](#committing-offsets). | +|**3**| Use this interface for processing individual `ConsumerRecord` instances received from the Kafka consumer `poll()` operation when using auto-commit or one of the container-managed [commit methods](#committing-offsets).
Access to the `Consumer` object is provided. | +|**4**| Use this interface for processing individual `ConsumerRecord` instances received from the Kafka consumer `poll()` operation when using one of the manual [commit methods](#committing-offsets).
Access to the `Consumer` object is provided. | +|**5**| Use this interface for processing all `ConsumerRecord` instances received from the Kafka consumer `poll()` operation when using auto-commit or one of the container-managed [commit methods](#committing-offsets).`AckMode.RECORD` is not supported when you use this interface, since the listener is given the complete batch. | +|**6**| Use this interface for processing all `ConsumerRecord` instances received from the Kafka consumer `poll()` operation when using one of the manual [commit methods](#committing-offsets). | +|**7**|Use this interface for processing all `ConsumerRecord` instances received from the Kafka consumer `poll()` operation when using auto-commit or one of the container-managed [commit methods](#committing-offsets).`AckMode.RECORD` is not supported when you use this interface, since the listener is given the complete batch.
Access to the `Consumer` object is provided.| +|**8**| Use this interface for processing all `ConsumerRecord` instances received from the Kafka consumer `poll()` operation when using one of the manual [commit methods](#committing-offsets).
Access to the `Consumer` object is provided. | + +| |The `Consumer` object is not thread-safe.
You must only invoke its methods on the thread that calls the listener.| +|---|---------------------------------------------------------------------------------------------------------------------| + +| |You should not execute any `Consumer` methods that affect the consumer’s positions and or committed offsets in your listener; the container needs to manage such information.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Message Listener Containers + +Two `MessageListenerContainer` implementations are provided: + +* `KafkaMessageListenerContainer` + +* `ConcurrentMessageListenerContainer` + +The `KafkaMessageListenerContainer` receives all message from all topics or partitions on a single thread. +The `ConcurrentMessageListenerContainer` delegates to one or more `KafkaMessageListenerContainer` instances to provide multi-threaded consumption. + +Starting with version 2.2.7, you can add a `RecordInterceptor` to the listener container; it will be invoked before calling the listener allowing inspection or modification of the record. +If the interceptor returns null, the listener is not called. +Starting with version 2.7, it has additional methods which are called after the listener exits (normally, or by throwing an exception). +Also, starting with version 2.7, there is now a `BatchInterceptor`, providing similar functionality for [Batch Listeners](#batch-listeners). +In addition, the `ConsumerAwareRecordInterceptor` (and `BatchInterceptor`) provide access to the `Consumer`. +This might be used, for example, to access the consumer metrics in the interceptor. + +| |You should not execute any methods that affect the consumer’s positions and or committed offsets in these interceptors; the container needs to manage such information.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The `CompositeRecordInterceptor` and `CompositeBatchInterceptor` can be used to invoke multiple interceptors. + +By default, starting with version 2.8, when using transactions, the interceptor is invoked before the transaction has started. +You can set the listener container’s `interceptBeforeTx` property to `false` to invoke the interceptor after the transaction has started instead. + +Starting with versions 2.3.8, 2.4.6, the `ConcurrentMessageListenerContainer` now supports [Static Membership](https://kafka.apache.org/documentation/#static_membership) when the concurrency is greater than one. +The `group.instance.id` is suffixed with `-n` with `n` starting at `1`. +This, together with an increased `session.timeout.ms`, can be used to reduce rebalance events, for example, when application instances are restarted. + +###### Using `KafkaMessageListenerContainer` + +The following constructor is available: + +``` +public KafkaMessageListenerContainer(ConsumerFactory consumerFactory, + ContainerProperties containerProperties) +``` + +It receives a `ConsumerFactory` and information about topics and partitions, as well as other configuration, in a `ContainerProperties`object.`ContainerProperties` has the following constructors: + +``` +public ContainerProperties(TopicPartitionOffset... topicPartitions) + +public ContainerProperties(String... topics) + +public ContainerProperties(Pattern topicPattern) +``` + +The first constructor takes an array of `TopicPartitionOffset` arguments to explicitly instruct the container about which partitions to use (using the consumer `assign()` method) and with an optional initial offset. +A positive value is an absolute offset by default. +A negative value is relative to the current last offset within a partition by default. +A constructor for `TopicPartitionOffset` that takes an additional `boolean` argument is provided. +If this is `true`, the initial offsets (positive or negative) are relative to the current position for this consumer. +The offsets are applied when the container is started. +The second takes an array of topics, and Kafka allocates the partitions based on the `group.id` property — distributing partitions across the group. +The third uses a regex `Pattern` to select the topics. + +To assign a `MessageListener` to a container, you can use the `ContainerProps.setMessageListener` method when creating the Container. +The following example shows how to do so: + +``` +ContainerProperties containerProps = new ContainerProperties("topic1", "topic2"); +containerProps.setMessageListener(new MessageListener() { + ... +}); +DefaultKafkaConsumerFactory cf = + new DefaultKafkaConsumerFactory<>(consumerProps()); +KafkaMessageListenerContainer container = + new KafkaMessageListenerContainer<>(cf, containerProps); +return container; +``` + +Note that when creating a `DefaultKafkaConsumerFactory`, using the constructor that just takes in the properties as above means that key and value `Deserializer` classes are picked up from configuration. +Alternatively, `Deserializer` instances may be passed to the `DefaultKafkaConsumerFactory` constructor for key and/or value, in which case all Consumers share the same instances. +Another option is to provide `Supplier` s (starting with version 2.3) that will be used to obtain separate `Deserializer` instances for each `Consumer`: + +``` +DefaultKafkaConsumerFactory cf = + new DefaultKafkaConsumerFactory<>(consumerProps(), null, () -> new CustomValueDeserializer()); +KafkaMessageListenerContainer container = + new KafkaMessageListenerContainer<>(cf, containerProps); +return container; +``` + +Refer to the [Javadoc](https://docs.spring.io/spring-kafka/api/org/springframework/kafka/listener/ContainerProperties.html) for `ContainerProperties` for more information about the various properties that you can set. + +Since version 2.1.1, a new property called `logContainerConfig` is available. +When `true` and `INFO` logging is enabled each listener container writes a log message summarizing its configuration properties. + +By default, logging of topic offset commits is performed at the `DEBUG` logging level. +Starting with version 2.1.2, a property in `ContainerProperties` called `commitLogLevel` lets you specify the log level for these messages. +For example, to change the log level to `INFO`, you can use `containerProperties.setCommitLogLevel(LogIfLevelEnabled.Level.INFO);`. + +Starting with version 2.2, a new container property called `missingTopicsFatal` has been added (default: `false` since 2.3.4). +This prevents the container from starting if any of the configured topics are not present on the broker. +It does not apply if the container is configured to listen to a topic pattern (regex). +Previously, the container threads looped within the `consumer.poll()` method waiting for the topic to appear while logging many messages. +Aside from the logs, there was no indication that there was a problem. + +As of version 2.8, a new container property `authExceptionRetryInterval` has been introduced. +This causes the container to retry fetching messages after getting any `AuthenticationException` or `AuthorizationException` from the `KafkaConsumer`. +This can happen when, for example, the configured user is denied access to read a certain topic or credentials are incorrect. +Defining `authExceptionRetryInterval` allows the container to recover when proper permissions are granted. + +| |By default, no interval is configured - authentication and authorization errors are considered fatal, which causes the container to stop.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------| + +Starting with version 2.8, when creating the consumer factory, if you provide deserializers as objects (in the constructor or via the setters), the factory will invoke the `configure()` method to configure them with the configuration properties. + +###### Using `ConcurrentMessageListenerContainer` + +The single constructor is similar to the `KafkaListenerContainer` constructor. +The following listing shows the constructor’s signature: + +``` +public ConcurrentMessageListenerContainer(ConsumerFactory consumerFactory, + ContainerProperties containerProperties) +``` + +It also has a `concurrency` property. +For example, `container.setConcurrency(3)` creates three `KafkaMessageListenerContainer` instances. + +For the first constructor, Kafka distributes the partitions across the consumers using its group management capabilities. + +| |When listening to multiple topics, the default partition distribution may not be what you expect.
For example, if you have three topics with five partitions each and you want to use `concurrency=15`, you see only five active consumers, each assigned one partition from each topic, with the other 10 consumers being idle.
This is because the default Kafka `PartitionAssignor` is the `RangeAssignor` (see its Javadoc).
For this scenario, you may want to consider using the `RoundRobinAssignor` instead, which distributes the partitions across all of the consumers.
Then, each consumer is assigned one topic or partition.
To change the `PartitionAssignor`, you can set the `partition.assignment.strategy` consumer property (`ConsumerConfigs.PARTITION_ASSIGNMENT_STRATEGY_CONFIG`) in the properties provided to the `DefaultKafkaConsumerFactory`.

When using Spring Boot, you can assign set the strategy as follows:

```
spring.kafka.consumer.properties.partition.assignment.strategy=\
org.apache.kafka.clients.consumer.RoundRobinAssignor
```| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +When the container properties are configured with `TopicPartitionOffset` s, the `ConcurrentMessageListenerContainer` distributes the `TopicPartitionOffset` instances across the delegate `KafkaMessageListenerContainer` instances. + +If, say, six `TopicPartitionOffset` instances are provided and the `concurrency` is `3`; each container gets two partitions. +For five `TopicPartitionOffset` instances, two containers get two partitions, and the third gets one. +If the `concurrency` is greater than the number of `TopicPartitions`, the `concurrency` is adjusted down such that each container gets one partition. + +| |The `client.id` property (if set) is appended with `-n` where `n` is the consumer instance that corresponds to the concurrency.
This is required to provide unique names for MBeans when JMX is enabled.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Starting with version 1.3, the `MessageListenerContainer` provides access to the metrics of the underlying `KafkaConsumer`. +In the case of `ConcurrentMessageListenerContainer`, the `metrics()` method returns the metrics for all the target `KafkaMessageListenerContainer` instances. +The metrics are grouped into the `Map` by the `client-id` provided for the underlying `KafkaConsumer`. + +Starting with version 2.3, the `ContainerProperties` provides an `idleBetweenPolls` option to let the main loop in the listener container to sleep between `KafkaConsumer.poll()` calls. +An actual sleep interval is selected as the minimum from the provided option and difference between the `max.poll.interval.ms` consumer config and the current records batch processing time. + +###### Committing Offsets + +Several options are provided for committing offsets. +If the `enable.auto.commit` consumer property is `true`, Kafka auto-commits the offsets according to its configuration. +If it is `false`, the containers support several `AckMode` settings (described in the next list). +The default `AckMode` is `BATCH`. +Starting with version 2.3, the framework sets `enable.auto.commit` to `false` unless explicitly set in the configuration. +Previously, the Kafka default (`true`) was used if the property was not set. + +The consumer `poll()` method returns one or more `ConsumerRecords`. +The `MessageListener` is called for each record. +The following lists describes the action taken by the container for each `AckMode` (when transactions are not being used): + +* `RECORD`: Commit the offset when the listener returns after processing the record. + +* `BATCH`: Commit the offset when all the records returned by the `poll()` have been processed. + +* `TIME`: Commit the offset when all the records returned by the `poll()` have been processed, as long as the `ackTime` since the last commit has been exceeded. + +* `COUNT`: Commit the offset when all the records returned by the `poll()` have been processed, as long as `ackCount` records have been received since the last commit. + +* `COUNT_TIME`: Similar to `TIME` and `COUNT`, but the commit is performed if either condition is `true`. + +* `MANUAL`: The message listener is responsible to `acknowledge()` the `Acknowledgment`. + After that, the same semantics as `BATCH` are applied. + +* `MANUAL_IMMEDIATE`: Commit the offset immediately when the `Acknowledgment.acknowledge()` method is called by the listener. + +When using [transactions](#transactions), the offset(s) are sent to the transaction and the semantics are equivalent to `RECORD` or `BATCH`, depending on the listener type (record or batch). + +| |`MANUAL`, and `MANUAL_IMMEDIATE` require the listener to be an `AcknowledgingMessageListener` or a `BatchAcknowledgingMessageListener`.
See [Message Listeners](#message-listeners).| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Depending on the `syncCommits` container property, the `commitSync()` or `commitAsync()` method on the consumer is used.`syncCommits` is `true` by default; also see `setSyncCommitTimeout`. +See `setCommitCallback` to get the results of asynchronous commits; the default callback is the `LoggingCommitCallback` which logs errors (and successes at debug level). + +Because the listener container has it’s own mechanism for committing offsets, it prefers the Kafka `ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG` to be `false`. +Starting with version 2.3, it unconditionally sets it to false unless specifically set in the consumer factory or the container’s consumer property overrides. + +The `Acknowledgment` has the following method: + +``` +public interface Acknowledgment { + + void acknowledge(); + +} +``` + +This method gives the listener control over when offsets are committed. + +Starting with version 2.3, the `Acknowledgment` interface has two additional methods `nack(long sleep)` and `nack(int index, long sleep)`. +The first one is used with a record listener, the second with a batch listener. +Calling the wrong method for your listener type will throw an `IllegalStateException`. + +| |If you want to commit a partial batch, using `nack()`, When using transactions, set the `AckMode` to `MANUAL`; invoking `nack()` will send the offsets of the successfully processed records to the transaction.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |`nack()` can only be called on the consumer thread that invokes your listener.| +|---|------------------------------------------------------------------------------| + +With a record listener, when `nack()` is called, any pending offsets are committed, the remaing records from the last poll are discarded, and seeks are performed on their partitions so that the failed record and unprocessed records are redelivered on the next `poll()`. +The consumer thread can be paused before redelivery, by setting the `sleep` argument. +This is similar functionality to throwing an exception when the container is configured with a `DefaultErrorHandler`. + +When using a batch listener, you can specify the index within the batch where the failure occurred. +When `nack()` is called, offsets will be committed for records before the index and seeks are performed on the partitions for the failed and discarded records so that they will be redelivered on the next `poll()`. + +See [Container Error Handlers](#error-handlers) for more information. + +| |When using partition assignment via group management, it is important to ensure the `sleep` argument (plus the time spent processing records from the previous poll) is less than the consumer `max.poll.interval.ms` property.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +###### Listener Container Auto Startup + +The listener containers implement `SmartLifecycle`, and `autoStartup` is `true` by default. +The containers are started in a late phase (`Integer.MAX-VALUE - 100`). +Other components that implement `SmartLifecycle`, to handle data from listeners, should be started in an earlier phase. +The `- 100` leaves room for later phases to enable components to be auto-started after the containers. + +##### Manually Committing Offsets + +Normally, when using `AckMode.MANUAL` or `AckMode.MANUAL_IMMEDIATE`, the acknowledgments must be acknowledged in order, because Kafka does not maintain state for each record, only a committed offset for each group/partition. +Starting with version 2.8, you can now set the container property `asyncAcks`, which allows the acknowledgments for records returned by the poll to be acknowledged in any order. +The listener container will defer the out-of-order commits until the missing acknowledgments are received. +The consumer will be paused (no new records delivered) until all the offsets for the previous poll have been committed. + +| |While this feature allows applications to process records asynchronously, it should be understood that it increases the possibility of duplicate deliveries after a failure.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### `@KafkaListener` Annotation + +The `@KafkaListener` annotation is used to designate a bean method as a listener for a listener container. +The bean is wrapped in a `MessagingMessageListenerAdapter` configured with various features, such as converters to convert the data, if necessary, to match the method parameters. + +You can configure most attributes on the annotation with SpEL by using `#{…​}` or property placeholders (`${…​}`). +See the [Javadoc](https://docs.spring.io/spring-kafka/api/org/springframework/kafka/annotation/KafkaListener.html) for more information. + +###### Record Listeners + +The `@KafkaListener` annotation provides a mechanism for simple POJO listeners. +The following example shows how to use it: + +``` +public class Listener { + + @KafkaListener(id = "foo", topics = "myTopic", clientIdPrefix = "myClientId") + public void listen(String data) { + ... + } + +} +``` + +This mechanism requires an `@EnableKafka` annotation on one of your `@Configuration` classes and a listener container factory, which is used to configure the underlying `ConcurrentMessageListenerContainer`. +By default, a bean with name `kafkaListenerContainerFactory` is expected. +The following example shows how to use `ConcurrentMessageListenerContainer`: + +``` +@Configuration +@EnableKafka +public class KafkaConfig { + + @Bean + KafkaListenerContainerFactory> + kafkaListenerContainerFactory() { + ConcurrentKafkaListenerContainerFactory factory = + new ConcurrentKafkaListenerContainerFactory<>(); + factory.setConsumerFactory(consumerFactory()); + factory.setConcurrency(3); + factory.getContainerProperties().setPollTimeout(3000); + return factory; + } + + @Bean + public ConsumerFactory consumerFactory() { + return new DefaultKafkaConsumerFactory<>(consumerConfigs()); + } + + @Bean + public Map consumerConfigs() { + Map props = new HashMap<>(); + props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, embeddedKafka.getBrokersAsString()); + ... + return props; + } +} +``` + +Notice that, to set container properties, you must use the `getContainerProperties()` method on the factory. +It is used as a template for the actual properties injected into the container. + +Starting with version 2.1.1, you can now set the `client.id` property for consumers created by the annotation. +The `clientIdPrefix` is suffixed with `-n`, where `n` is an integer representing the container number when using concurrency. + +Starting with version 2.2, you can now override the container factory’s `concurrency` and `autoStartup` properties by using properties on the annotation itself. +The properties can be simple values, property placeholders, or SpEL expressions. +The following example shows how to do so: + +``` +@KafkaListener(id = "myListener", topics = "myTopic", + autoStartup = "${listen.auto.start:true}", concurrency = "${listen.concurrency:3}") +public void listen(String data) { + ... +} +``` + +###### Explicit Partition Assignment + +You can also configure POJO listeners with explicit topics and partitions (and, optionally, their initial offsets). +The following example shows how to do so: + +``` +@KafkaListener(id = "thing2", topicPartitions = + { @TopicPartition(topic = "topic1", partitions = { "0", "1" }), + @TopicPartition(topic = "topic2", partitions = "0", + partitionOffsets = @PartitionOffset(partition = "1", initialOffset = "100")) + }) +public void listen(ConsumerRecord record) { + ... +} +``` + +You can specify each partition in the `partitions` or `partitionOffsets` attribute but not both. + +As with most annotation properties, you can use SpEL expressions; for an example of how to generate a large list of partitions, see [[tip-assign-all-parts]](#tip-assign-all-parts). + +Starting with version 2.5.5, you can apply an initial offset to all assigned partitions: + +``` +@KafkaListener(id = "thing3", topicPartitions = + { @TopicPartition(topic = "topic1", partitions = { "0", "1" }, + partitionOffsets = @PartitionOffset(partition = "*", initialOffset = "0")) + }) +public void listen(ConsumerRecord record) { + ... +} +``` + +The `*` wildcard represents all partitions in the `partitions` attribute. +There must only be one `@PartitionOffset` with the wildcard in each `@TopicPartition`. + +In addition, when the listener implements `ConsumerSeekAware`, `onPartitionsAssigned` is now called, even when using manual assignment. +This allows, for example, any arbitrary seek operations at that time. + +Starting with version 2.6.4, you can specify a comma-delimited list of partitions, or partition ranges: + +``` +@KafkaListener(id = "pp", autoStartup = "false", + topicPartitions = @TopicPartition(topic = "topic1", + partitions = "0-5, 7, 10-15")) +public void process(String in) { + ... +} +``` + +The range is inclusive; the example above will assign partitions `0, 1, 2, 3, 4, 5, 7, 10, 11, 12, 13, 14, 15`. + +The same technique can be used when specifying initial offsets: + +``` +@KafkaListener(id = "thing3", topicPartitions = + { @TopicPartition(topic = "topic1", + partitionOffsets = @PartitionOffset(partition = "0-5", initialOffset = "0")) + }) +public void listen(ConsumerRecord record) { + ... +} +``` + +The initial offset will be applied to all 6 partitions. + +###### Manual Acknowledgment + +When using manual `AckMode`, you can also provide the listener with the `Acknowledgment`. +The following example also shows how to use a different container factory. + +``` +@KafkaListener(id = "cat", topics = "myTopic", + containerFactory = "kafkaManualAckListenerContainerFactory") +public void listen(String data, Acknowledgment ack) { + ... + ack.acknowledge(); +} +``` + +###### Consumer Record Metadata + +Finally, metadata about the record is available from message headers. +You can use the following header names to retrieve the headers of the message: + +* `KafkaHeaders.OFFSET` + +* `KafkaHeaders.RECEIVED_MESSAGE_KEY` + +* `KafkaHeaders.RECEIVED_TOPIC` + +* `KafkaHeaders.RECEIVED_PARTITION_ID` + +* `KafkaHeaders.RECEIVED_TIMESTAMP` + +* `KafkaHeaders.TIMESTAMP_TYPE` + +Starting with version 2.5 the `RECEIVED_MESSAGE_KEY` is not present if the incoming record has a `null` key; previously the header was populated with a `null` value. +This change is to make the framework consistent with `spring-messaging` conventions where `null` valued headers are not present. + +The following example shows how to use the headers: + +``` +@KafkaListener(id = "qux", topicPattern = "myTopic1") +public void listen(@Payload String foo, + @Header(name = KafkaHeaders.RECEIVED_MESSAGE_KEY, required = false) Integer key, + @Header(KafkaHeaders.RECEIVED_PARTITION_ID) int partition, + @Header(KafkaHeaders.RECEIVED_TOPIC) String topic, + @Header(KafkaHeaders.RECEIVED_TIMESTAMP) long ts + ) { + ... +} +``` + +Starting with version 2.5, instead of using discrete headers, you can receive record metadata in a `ConsumerRecordMetadata` parameter. + +``` +@KafkaListener(...) +public void listen(String str, ConsumerRecordMetadata meta) { + ... +} +``` + +This contains all the data from the `ConsumerRecord` except the key and value. + +###### Batch Listeners + +Starting with version 1.1, you can configure `@KafkaListener` methods to receive the entire batch of consumer records received from the consumer poll. +To configure the listener container factory to create batch listeners, you can set the `batchListener` property. +The following example shows how to do so: + +``` +@Bean +public KafkaListenerContainerFactory batchFactory() { + ConcurrentKafkaListenerContainerFactory factory = + new ConcurrentKafkaListenerContainerFactory<>(); + factory.setConsumerFactory(consumerFactory()); + factory.setBatchListener(true); // <<<<<<<<<<<<<<<<<<<<<<<<< + return factory; +} +``` + +| |Starting with version 2.8, you can override the factory’s `batchListener` propery using the `batch` property on the `@KafkaListener` annotation.
This, together with the changes to [Container Error Handlers](#error-handlers) allows the same factory to be used for both record and batch listeners.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The following example shows how to receive a list of payloads: + +``` +@KafkaListener(id = "list", topics = "myTopic", containerFactory = "batchFactory") +public void listen(List list) { + ... +} +``` + +The topic, partition, offset, and so on are available in headers that parallel the payloads. +The following example shows how to use the headers: + +``` +@KafkaListener(id = "list", topics = "myTopic", containerFactory = "batchFactory") +public void listen(List list, + @Header(KafkaHeaders.RECEIVED_MESSAGE_KEY) List keys, + @Header(KafkaHeaders.RECEIVED_PARTITION_ID) List partitions, + @Header(KafkaHeaders.RECEIVED_TOPIC) List topics, + @Header(KafkaHeaders.OFFSET) List offsets) { + ... +} +``` + +Alternatively, you can receive a `List` of `Message` objects with each offset and other details in each message, but it must be the only parameter (aside from optional `Acknowledgment`, when using manual commits, and/or `Consumer` parameters) defined on the method. +The following example shows how to do so: + +``` +@KafkaListener(id = "listMsg", topics = "myTopic", containerFactory = "batchFactory") +public void listen14(List> list) { + ... +} + +@KafkaListener(id = "listMsgAck", topics = "myTopic", containerFactory = "batchFactory") +public void listen15(List> list, Acknowledgment ack) { + ... +} + +@KafkaListener(id = "listMsgAckConsumer", topics = "myTopic", containerFactory = "batchFactory") +public void listen16(List> list, Acknowledgment ack, Consumer consumer) { + ... +} +``` + +No conversion is performed on the payloads in this case. + +If the `BatchMessagingMessageConverter` is configured with a `RecordMessageConverter`, you can also add a generic type to the `Message` parameter and the payloads are converted. +See [Payload Conversion with Batch Listeners](#payload-conversion-with-batch) for more information. + +You can also receive a list of `ConsumerRecord` objects, but it must be the only parameter (aside from optional `Acknowledgment`, when using manual commits and `Consumer` parameters) defined on the method. +The following example shows how to do so: + +``` +@KafkaListener(id = "listCRs", topics = "myTopic", containerFactory = "batchFactory") +public void listen(List> list) { + ... +} + +@KafkaListener(id = "listCRsAck", topics = "myTopic", containerFactory = "batchFactory") +public void listen(List> list, Acknowledgment ack) { + ... +} +``` + +Starting with version 2.2, the listener can receive the complete `ConsumerRecords` object returned by the `poll()` method, letting the listener access additional methods, such as `partitions()` (which returns the `TopicPartition` instances in the list) and `records(TopicPartition)` (which gets selective records). +Again, this must be the only parameter (aside from optional `Acknowledgment`, when using manual commits or `Consumer` parameters) on the method. +The following example shows how to do so: + +``` +@KafkaListener(id = "pollResults", topics = "myTopic", containerFactory = "batchFactory") +public void pollResults(ConsumerRecords records) { + ... +} +``` + +| |If the container factory has a `RecordFilterStrategy` configured, it is ignored for `ConsumerRecords` listeners, with a `WARN` log message emitted.
Records can only be filtered with a batch listener if the `>` form of listener is used.
By default, records are filtered one-at-a-time; starting with version 2.8, you can override `filterBatch` to filter the entire batch in one call.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +###### Annotation Properties + +Starting with version 2.0, the `id` property (if present) is used as the Kafka consumer `group.id` property, overriding the configured property in the consumer factory, if present. +You can also set `groupId` explicitly or set `idIsGroup` to false to restore the previous behavior of using the consumer factory `group.id`. + +You can use property placeholders or SpEL expressions within most annotation properties, as the following example shows: + +``` +@KafkaListener(topics = "${some.property}") + +@KafkaListener(topics = "#{someBean.someProperty}", + groupId = "#{someBean.someProperty}.group") +``` + +Starting with version 2.1.2, the SpEL expressions support a special token: `__listener`. +It is a pseudo bean name that represents the current bean instance within which this annotation exists. + +Consider the following example: + +``` +@Bean +public Listener listener1() { + return new Listener("topic1"); +} + +@Bean +public Listener listener2() { + return new Listener("topic2"); +} +``` + +Given the beans in the previous example, we can then use the following: + +``` +public class Listener { + + private final String topic; + + public Listener(String topic) { + this.topic = topic; + } + + @KafkaListener(topics = "#{__listener.topic}", + groupId = "#{__listener.topic}.group") + public void listen(...) { + ... + } + + public String getTopic() { + return this.topic; + } + +} +``` + +If, in the unlikely event that you have an actual bean called `__listener`, you can change the expression token byusing the `beanRef` attribute. +The following example shows how to do so: + +``` +@KafkaListener(beanRef = "__x", topics = "#{__x.topic}", + groupId = "#{__x.topic}.group") +``` + +Starting with version 2.2.4, you can specify Kafka consumer properties directly on the annotation, these will override any properties with the same name configured in the consumer factory. You **cannot** specify the `group.id` and `client.id` properties this way; they will be ignored; use the `groupId` and `clientIdPrefix` annotation properties for those. + +The properties are specified as individual strings with the normal Java `Properties` file format: `foo:bar`, `foo=bar`, or `foo bar`. + +``` +@KafkaListener(topics = "myTopic", groupId = "group", properties = { + "max.poll.interval.ms:60000", + ConsumerConfig.MAX_POLL_RECORDS_CONFIG + "=100" +}) +``` + +The following is an example of the corresponding listeners for the example in [Using `RoutingKafkaTemplate`](#routing-template). + +``` +@KafkaListener(id = "one", topics = "one") +public void listen1(String in) { + System.out.println("1: " + in); +} + +@KafkaListener(id = "two", topics = "two", + properties = "value.deserializer:org.apache.kafka.common.serialization.ByteArrayDeserializer") +public void listen2(byte[] in) { + System.out.println("2: " + new String(in)); +} +``` + +##### Obtaining the Consumer `group.id` + +When running the same listener code in multiple containers, it may be useful to be able to determine which container (identified by its `group.id` consumer property) that a record came from. + +You can call `KafkaUtils.getConsumerGroupId()` on the listener thread to do this. +Alternatively, you can access the group id in a method parameter. + +``` +@KafkaListener(id = "bar", topicPattern = "${topicTwo:annotated2}", exposeGroupId = "${always:true}") +public void listener(@Payload String foo, + @Header(KafkaHeaders.GROUP_ID) String groupId) { +... +} +``` + +| |This is available in record listeners and batch listeners that receive a `List` of records.
It is **not** available in a batch listener that receives a `ConsumerRecords` argument.
Use the `KafkaUtils` mechanism in that case.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Container Thread Naming + +Listener containers currently use two task executors, one to invoke the consumer and another that is used to invoke the listener when the kafka consumer property `enable.auto.commit` is `false`. +You can provide custom executors by setting the `consumerExecutor` and `listenerExecutor` properties of the container’s `ContainerProperties`. +When using pooled executors, be sure that enough threads are available to handle the concurrency across all the containers in which they are used. +When using the `ConcurrentMessageListenerContainer`, a thread from each is used for each consumer (`concurrency`). + +If you do not provide a consumer executor, a `SimpleAsyncTaskExecutor` is used. +This executor creates threads with names similar to `-C-1` (consumer thread). +For the `ConcurrentMessageListenerContainer`, the `` part of the thread name becomes `-m`, where `m` represents the consumer instance.`n` increments each time the container is started. +So, with a bean name of `container`, threads in this container will be named `container-0-C-1`, `container-1-C-1` etc., after the container is started the first time; `container-0-C-2`, `container-1-C-2` etc., after a stop and subsequent start. + +##### `@KafkaListener` as a Meta Annotation + +Starting with version 2.2, you can now use `@KafkaListener` as a meta annotation. +The following example shows how to do so: + +``` +@Target(ElementType.METHOD) +@Retention(RetentionPolicy.RUNTIME) +@KafkaListener +public @interface MyThreeConsumersListener { + + @AliasFor(annotation = KafkaListener.class, attribute = "id") + String id(); + + @AliasFor(annotation = KafkaListener.class, attribute = "topics") + String[] topics(); + + @AliasFor(annotation = KafkaListener.class, attribute = "concurrency") + String concurrency() default "3"; + +} +``` + +You must alias at least one of `topics`, `topicPattern`, or `topicPartitions` (and, usually, `id` or `groupId` unless you have specified a `group.id` in the consumer factory configuration). +The following example shows how to do so: + +``` +@MyThreeConsumersListener(id = "my.group", topics = "my.topic") +public void listen1(String in) { + ... +} +``` + +##### `@KafkaListener` on a Class + +When you use `@KafkaListener` at the class-level, you must specify `@KafkaHandler` at the method level. +When messages are delivered, the converted message payload type is used to determine which method to call. +The following example shows how to do so: + +``` +@KafkaListener(id = "multi", topics = "myTopic") +static class MultiListenerBean { + + @KafkaHandler + public void listen(String foo) { + ... + } + + @KafkaHandler + public void listen(Integer bar) { + ... + } + + @KafkaHandler(isDefault = true) + public void listenDefault(Object object) { + ... + } + +} +``` + +Starting with version 2.1.3, you can designate a `@KafkaHandler` method as the default method that is invoked if there is no match on other methods. +At most, one method can be so designated. +When using `@KafkaHandler` methods, the payload must have already been converted to the domain object (so the match can be performed). +Use a custom deserializer, the `JsonDeserializer`, or the `JsonMessageConverter` with its `TypePrecedence` set to `TYPE_ID`. +See [Serialization, Deserialization, and Message Conversion](#serdes) for more information. + +| |Due to some limitations in the way Spring resolves method arguments, a default `@KafkaHandler` cannot receive discrete headers; it must use the `ConsumerRecordMetadata` as discussed in [Consumer Record Metadata](#consumer-record-metadata).| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +For example: + +``` +@KafkaHandler(isDefault = true) +public void listenDefault(Object object, @Header(KafkaHeaders.RECEIVED_TOPIC) String topic) { + ... +} +``` + +This won’t work if the object is a `String`; the `topic` parameter will also get a reference to `object`. + +If you need metadata about the record in a default method, use this: + +``` +@KafkaHandler(isDefault = true) +void listen(Object in, @Header(KafkaHeaders.RECORD_METADATA) ConsumerRecordMetadata meta) { + String topic = meta.topic(); + ... +} +``` + +##### `@KafkaListener` Attribute Modification + +Starting with version 2.7.2, you can now programmatically modify annotation attributes before the container is created. +To do so, add one or more `KafkaListenerAnnotationBeanPostProcessor.AnnotationEnhancer` to the application context.`AnnotationEnhancer` is a `BiFunction, AnnotatedElement, Map` and must return a map of attributes. +The attribute values can contain SpEL and/or property placeholders; the enhancer is called before any resolution is performed. +If more than one enhancer is present, and they implement `Ordered`, they will be invoked in order. + +| |`AnnotationEnhancer` bean definitions must be declared `static` because they are required very early in the application context’s lifecycle.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------| + +An example follows: + +``` +@Bean +public static AnnotationEnhancer groupIdEnhancer() { + return (attrs, element) -> { + attrs.put("groupId", attrs.get("id") + "." + (element instanceof Class + ? ((Class) element).getSimpleName() + : ((Method) element).getDeclaringClass().getSimpleName() + + "." + ((Method) element).getName())); + return attrs; + }; +} +``` + +##### `@KafkaListener` Lifecycle Management + +The listener containers created for `@KafkaListener` annotations are not beans in the application context. +Instead, they are registered with an infrastructure bean of type `KafkaListenerEndpointRegistry`. +This bean is automatically declared by the framework and manages the containers' lifecycles; it will auto-start any containers that have `autoStartup` set to `true`. +All containers created by all container factories must be in the same `phase`. +See [Listener Container Auto Startup](#container-auto-startup) for more information. +You can manage the lifecycle programmatically by using the registry. +Starting or stopping the registry will start or stop all the registered containers. +Alternatively, you can get a reference to an individual container by using its `id` attribute. +You can set `autoStartup` on the annotation, which overrides the default setting configured into the container factory. +You can get a reference to the bean from the application context, such as auto-wiring, to manage its registered containers. +The following examples show how to do so: + +``` +@KafkaListener(id = "myContainer", topics = "myTopic", autoStartup = "false") +public void listen(...) { ... } +``` + +``` +@Autowired +private KafkaListenerEndpointRegistry registry; + +... + + this.registry.getListenerContainer("myContainer").start(); + +... +``` + +The registry only maintains the life cycle of containers it manages; containers declared as beans are not managed by the registry and can be obtained from the application context. +A collection of managed containers can be obtained by calling the registry’s `getListenerContainers()` method. +Version 2.2.5 added a convenience method `getAllListenerContainers()`, which returns a collection of all containers, including those managed by the registry and those declared as beans. +The collection returned will include any prototype beans that have been initialized, but it will not initialize any lazy bean declarations. + +##### `@KafkaListener` `@Payload` Validation + +Starting with version 2.2, it is now easier to add a `Validator` to validate `@KafkaListener` `@Payload` arguments. +Previously, you had to configure a custom `DefaultMessageHandlerMethodFactory` and add it to the registrar. +Now, you can add the validator to the registrar itself. +The following code shows how to do so: + +``` +@Configuration +@EnableKafka +public class Config implements KafkaListenerConfigurer { + + ... + + @Override + public void configureKafkaListeners(KafkaListenerEndpointRegistrar registrar) { + registrar.setValidator(new MyValidator()); + } + +} +``` + +| |When you use Spring Boot with the validation starter, a `LocalValidatorFactoryBean` is auto-configured, as the following example shows:| +|---|---------------------------------------------------------------------------------------------------------------------------------------| + +``` +@Configuration +@EnableKafka +public class Config implements KafkaListenerConfigurer { + + @Autowired + private LocalValidatorFactoryBean validator; + ... + + @Override + public void configureKafkaListeners(KafkaListenerEndpointRegistrar registrar) { + registrar.setValidator(this.validator); + } +} +``` + +The following examples show how to validate: + +``` +public static class ValidatedClass { + + @Max(10) + private int bar; + + public int getBar() { + return this.bar; + } + + public void setBar(int bar) { + this.bar = bar; + } + +} +``` + +``` +@KafkaListener(id="validated", topics = "annotated35", errorHandler = "validationErrorHandler", + containerFactory = "kafkaJsonListenerContainerFactory") +public void validatedListener(@Payload @Valid ValidatedClass val) { + ... +} + +@Bean +public KafkaListenerErrorHandler validationErrorHandler() { + return (m, e) -> { + ... + }; +} +``` + +Starting with version 2.5.11, validation now works on payloads for `@KafkaHandler` methods in a class-level listener. +See [`@KafkaListener` on a Class](#class-level-kafkalistener). + +##### Rebalancing Listeners + +`ContainerProperties` has a property called `consumerRebalanceListener`, which takes an implementation of the Kafka client’s `ConsumerRebalanceListener` interface. +If this property is not provided, the container configures a logging listener that logs rebalance events at the `INFO` level. +The framework also adds a sub-interface `ConsumerAwareRebalanceListener`. +The following listing shows the `ConsumerAwareRebalanceListener` interface definition: + +``` +public interface ConsumerAwareRebalanceListener extends ConsumerRebalanceListener { + + void onPartitionsRevokedBeforeCommit(Consumer consumer, Collection partitions); + + void onPartitionsRevokedAfterCommit(Consumer consumer, Collection partitions); + + void onPartitionsAssigned(Consumer consumer, Collection partitions); + + void onPartitionsLost(Consumer consumer, Collection partitions); + +} +``` + +Notice that there are two callbacks when partitions are revoked. +The first is called immediately. +The second is called after any pending offsets are committed. +This is useful if you wish to maintain offsets in some external repository, as the following example shows: + +``` +containerProperties.setConsumerRebalanceListener(new ConsumerAwareRebalanceListener() { + + @Override + public void onPartitionsRevokedBeforeCommit(Consumer consumer, Collection partitions) { + // acknowledge any pending Acknowledgments (if using manual acks) + } + + @Override + public void onPartitionsRevokedAfterCommit(Consumer consumer, Collection partitions) { + // ... + store(consumer.position(partition)); + // ... + } + + @Override + public void onPartitionsAssigned(Collection partitions) { + // ... + consumer.seek(partition, offsetTracker.getOffset() + 1); + // ... + } +}); +``` + +| |Starting with version 2.4, a new method `onPartitionsLost()` has been added (similar to a method with the same name in `ConsumerRebalanceLister`).
The default implementation on `ConsumerRebalanceLister` simply calls `onPartionsRevoked`.
The default implementation on `ConsumerAwareRebalanceListener` does nothing.
When supplying the listener container with a custom listener (of either type), it is important that your implementation not call `onPartitionsRevoked` from `onPartitionsLost`.
If you implement `ConsumerRebalanceListener` you should override the default method.
This is because the listener container will call its own `onPartitionsRevoked` from its implementation of `onPartitionsLost` after calling the method on your implementation.
If you implementation delegates to the default behavior, `onPartitionsRevoked` will be called twice each time the `Consumer` calls that method on the container’s listener.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Forwarding Listener Results using `@SendTo` + +Starting with version 2.0, if you also annotate a `@KafkaListener` with a `@SendTo` annotation and the method invocation returns a result, the result is forwarded to the topic specified by the `@SendTo`. + +The `@SendTo` value can have several forms: + +* `@SendTo("someTopic")` routes to the literal topic + +* `@SendTo("#{someExpression}")` routes to the topic determined by evaluating the expression once during application context initialization. + +* `@SendTo("!{someExpression}")` routes to the topic determined by evaluating the expression at runtime. + The `#root` object for the evaluation has three properties: + + * `request`: The inbound `ConsumerRecord` (or `ConsumerRecords` object for a batch listener)) + + * `source`: The `org.springframework.messaging.Message` converted from the `request`. + + * `result`: The method return result. + +* `@SendTo` (no properties): This is treated as `!{source.headers['kafka_replyTopic']}` (since version 2.1.3). + +Starting with versions 2.1.11 and 2.2.1, property placeholders are resolved within `@SendTo` values. + +The result of the expression evaluation must be a `String` that represents the topic name. +The following examples show the various ways to use `@SendTo`: + +``` +@KafkaListener(topics = "annotated21") +@SendTo("!{request.value()}") // runtime SpEL +public String replyingListener(String in) { + ... +} + +@KafkaListener(topics = "${some.property:annotated22}") +@SendTo("#{myBean.replyTopic}") // config time SpEL +public Collection replyingBatchListener(List in) { + ... +} + +@KafkaListener(topics = "annotated23", errorHandler = "replyErrorHandler") +@SendTo("annotated23reply") // static reply topic definition +public String replyingListenerWithErrorHandler(String in) { + ... +} +... +@KafkaListener(topics = "annotated25") +@SendTo("annotated25reply1") +public class MultiListenerSendTo { + + @KafkaHandler + public String foo(String in) { + ... + } + + @KafkaHandler + @SendTo("!{'annotated25reply2'}") + public String bar(@Payload(required = false) KafkaNull nul, + @Header(KafkaHeaders.RECEIVED_MESSAGE_KEY) int key) { + ... + } + +} +``` + +| |In order to support `@SendTo`, the listener container factory must be provided with a `KafkaTemplate` (in its `replyTemplate` property), which is used to send the reply.
This should be a `KafkaTemplate` and not a `ReplyingKafkaTemplate` which is used on the client-side for request/reply processing.
When using Spring Boot, boot will auto-configure the template into the factory; when configuring your own factory, it must be set as shown in the examples below.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Starting with version 2.2, you can add a `ReplyHeadersConfigurer` to the listener container factory. +This is consulted to determine which headers you want to set in the reply message. +The following example shows how to add a `ReplyHeadersConfigurer`: + +``` +@Bean +public ConcurrentKafkaListenerContainerFactory kafkaListenerContainerFactory() { + ConcurrentKafkaListenerContainerFactory factory = + new ConcurrentKafkaListenerContainerFactory<>(); + factory.setConsumerFactory(cf()); + factory.setReplyTemplate(template()); + factory.setReplyHeadersConfigurer((k, v) -> k.equals("cat")); + return factory; +} +``` + +You can also add more headers if you wish. +The following example shows how to do so: + +``` +@Bean +public ConcurrentKafkaListenerContainerFactory kafkaListenerContainerFactory() { + ConcurrentKafkaListenerContainerFactory factory = + new ConcurrentKafkaListenerContainerFactory<>(); + factory.setConsumerFactory(cf()); + factory.setReplyTemplate(template()); + factory.setReplyHeadersConfigurer(new ReplyHeadersConfigurer() { + + @Override + public boolean shouldCopy(String headerName, Object headerValue) { + return false; + } + + @Override + public Map additionalHeaders() { + return Collections.singletonMap("qux", "fiz"); + } + + }); + return factory; +} +``` + +When you use `@SendTo`, you must configure the `ConcurrentKafkaListenerContainerFactory` with a `KafkaTemplate` in its `replyTemplate` property to perform the send. + +| |Unless you use [request/reply semantics](#replying-template) only the simple `send(topic, value)` method is used, so you may wish to create a subclass to generate the partition or key.
The following example shows how to do so:| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +``` +@Bean +public KafkaTemplate myReplyingTemplate() { + return new KafkaTemplate(producerFactory()) { + + @Override + public ListenableFuture> send(String topic, String data) { + return super.send(topic, partitionForData(data), keyForData(data), data); + } + + ... + + }; +} +``` + +| |If the listener method returns `Message` or `Collection>`, the listener method is responsible for setting up the message headers for the reply.
For example, when handling a request from a `ReplyingKafkaTemplate`, you might do the following:

```
@KafkaListener(id = "messageReturned", topics = "someTopic")
public Message listen(String in, @Header(KafkaHeaders.REPLY_TOPIC) byte[] replyTo,
@Header(KafkaHeaders.CORRELATION_ID) byte[] correlation) {
return MessageBuilder.withPayload(in.toUpperCase())
.setHeader(KafkaHeaders.TOPIC, replyTo)
.setHeader(KafkaHeaders.MESSAGE_KEY, 42)
.setHeader(KafkaHeaders.CORRELATION_ID, correlation)
.setHeader("someOtherHeader", "someValue")
.build();
}
```| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +When using request/reply semantics, the target partition can be requested by the sender. + +| |You can annotate a `@KafkaListener` method with `@SendTo` even if no result is returned.
This is to allow the configuration of an `errorHandler` that can forward information about a failed message delivery to some topic.
The following example shows how to do so:

```
@KafkaListener(id = "voidListenerWithReplyingErrorHandler", topics = "someTopic",
errorHandler = "voidSendToErrorHandler")
@SendTo("failures")
public void voidListenerWithReplyingErrorHandler(String in) {
throw new RuntimeException("fail");
}

@Bean
public KafkaListenerErrorHandler voidSendToErrorHandler() {
return (m, e) -> {
return ... // some information about the failure and input data
};
}
```

See [Handling Exceptions](#annotation-error-handling) for more information.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |If a listener method returns an `Iterable`, by default a record for each element as the value is sent.
Starting with version 2.3.5, set the `splitIterables` property on `@KafkaListener` to `false` and the entire result will be sent as the value of a single `ProducerRecord`.
This requires a suitable serializer in the reply template’s producer configuration.
However, if the reply is `Iterable>` the property is ignored and each message is sent separately.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Filtering Messages + +In certain scenarios, such as rebalancing, a message that has already been processed may be redelivered. +The framework cannot know whether such a message has been processed or not. +That is an application-level function. +This is known as the [Idempotent Receiver](https://www.enterpriseintegrationpatterns.com/patterns/messaging/IdempotentReceiver.html) pattern and Spring Integration provides an [implementation of it](https://docs.spring.io/spring-integration/reference/html/#idempotent-receiver). + +The Spring for Apache Kafka project also provides some assistance by means of the `FilteringMessageListenerAdapter` class, which can wrap your `MessageListener`. +This class takes an implementation of `RecordFilterStrategy` in which you implement the `filter` method to signal that a message is a duplicate and should be discarded. +This has an additional property called `ackDiscarded`, which indicates whether the adapter should acknowledge the discarded record. +It is `false` by default. + +When you use `@KafkaListener`, set the `RecordFilterStrategy` (and optionally `ackDiscarded`) on the container factory so that the listener is wrapped in the appropriate filtering adapter. + +In addition, a `FilteringBatchMessageListenerAdapter` is provided, for when you use a batch [message listener](#message-listeners). + +| |The `FilteringBatchMessageListenerAdapter` is ignored if your `@KafkaListener` receives a `ConsumerRecords` instead of `List>`, because `ConsumerRecords` is immutable.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Retrying Deliveries + +See the `DefaultErrorHandler` in [Handling Exceptions](#annotation-error-handling). + +##### Starting `@KafkaListener` s in Sequence + +A common use case is to start a listener after another listener has consumed all the records in a topic. +For example, you may want to load the contents of one or more compacted topics into memory before processing records from other topics. +Starting with version 2.7.3, a new component `ContainerGroupSequencer` has been introduced. +It uses the `@KafkaListener` `containerGroup` property to group containers together and start the containers in the next group, when all the containers in the current group have gone idle. + +It is best illustrated with an example. + +``` +@KafkaListener(id = "listen1", topics = "topic1", containerGroup = "g1", concurrency = "2") +public void listen1(String in) { +} + +@KafkaListener(id = "listen2", topics = "topic2", containerGroup = "g1", concurrency = "2") +public void listen2(String in) { +} + +@KafkaListener(id = "listen3", topics = "topic3", containerGroup = "g2", concurrency = "2") +public void listen3(String in) { +} + +@KafkaListener(id = "listen4", topics = "topic4", containerGroup = "g2", concurrency = "2") +public void listen4(String in) { +} + +@Bean +ContainerGroupSequencer sequencer(KafkaListenerEndpointRegistry registry) { + return new ContainerGroupSequencer(registry, 5000, "g1", "g2"); +} +``` + +Here, we have 4 listeners in two groups, `g1` and `g2`. + +During application context initialization, the sequencer, sets the `autoStartup` property of all the containers in the provided groups to `false`. +It also sets the `idleEventInterval` for any containers (that do not already have one set) to the supplied value (5000ms in this case). +Then, when the sequencer is started by the application context, the containers in the first group are started. +As `ListenerContainerIdleEvent` s are received, each individual child container in each container is stopped. +When all child containers in a `ConcurrentMessageListenerContainer` are stopped, the parent container is stopped. +When all containers in a group have been stopped, the containers in the next group are started. +There is no limit to the number of groups or containers in a group. + +By default, the containers in the final group (`g2` above) are not stopped when they go idle. +To modify that behavior, set `stopLastGroupWhenIdle` to `true` on the sequencer. + +As an aside; previously, containers in each group were added to a bean of type `Collection` with the bean name being the `containerGroup`. +These collections are now deprecated in favor of beans of type `ContainerGroup` with a bean name that is the group name, suffixed with `.group`; in the example above, there would be 2 beans `g1.group` and `g2.group`. +The `Collection` beans will be removed in a future release. + +##### Using `KafkaTemplate` to Receive + +This section covers how to use `KafkaTemplate` to receive messages. + +Starting with version 2.8, the template has four `receive()` methods: + +``` +ConsumerRecord receive(String topic, int partition, long offset); + +ConsumerRecord receive(String topic, int partition, long offset, Duration pollTimeout); + +ConsumerRecords receive(Collection requested); + +ConsumerRecords receive(Collection requested, Duration pollTimeout); +``` + +As you can see, you need to know the partition and offset of the record(s) you need to retrieve; a new `Consumer` is created (and closed) for each operation. + +With the last two methods, each record is retrieved individually and the results assembled into a `ConsumerRecords` object. +When creating the `TopicPartitionOffset` s for the request, only positive, absolute offsets are supported. + +#### 4.1.5. Listener Container Properties + +| Property | Default | Description | +|---------------------------------------------------------------|-------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| | 1 | The number of records before committing pending offsets when the `ackMode` is `COUNT` or `COUNT_TIME`. | +| wrapping the message listener, invoked in order. | +| . | +| `] | +| | 5000 | The time in milliseconds after which pending offsets are committed when the `ackMode` is `TIME` or `COUNT_TIME`. | +| | LATEST\_ONLY \_NO\_TX | Whether or not to commit the initial position on assignment; by default, the initial offset will only be committed if the `ConsumerConfig.AUTO_OFFSET_RESET_CONFIG` is `latest` and it won’t run in a transaction even if there is a transaction manager present.
See the javadocs for `ContainerProperties.AssignmentCommitOption` for more information about the available options. | +|| `null` | When not null, a `Duration` to sleep between polls when an `AuthenticationException` or `AuthorizationException` is thrown by the Kafka client.
When null, such exceptions are considered fatal and the container will stop. | +| | A prefix for the `client.id` consumer property.
Overrides the consumer factory `client.id` property; in a concurrent container, `-n` is added as a suffix for each consumer instance. | +| | false | Set to `true` to always check for a `DeserializationException` header when a `null` `key` is received.
Useful when the consumer code cannot determine that an `ErrorHandlingDeserializer` has been configured, such as when using a delegating deserializer. | +| | false | Set to `true` to always check for a `DeserializationException` header when a `null` `value` is received.
Useful when the consumer code cannot determine that an `ErrorHandlingDeserializer` has been configured, such as when using a delegating deserializer. | +| | `null` | When present and `syncCommits` is `false` a callback invoked after the commit completes. | +| | DEBUG | The logging level for logs pertaining to committing offsets. | +| . | +| | 30s | The time to wait for the consumer to start before logging an error; this might happen if, say, you use a task executor with insufficient threads. | +| |`SimpleAsyncTaskExecutor`| A task executor to run the consumer threads.
The default executor creates threads named `-C-n`; with the `KafkaMessageListenerContainer`, the name is the bean name; with the `ConcurrentMessageListenerContainer` the name is the bean name suffixed with `-n` where n is incremented for each child container. | +| . | +| . | +| for more information.| +| | `null` | Overrides the consumer `group.id` property; automatically set by the `@KafkaListener` `id` or `groupId` property. | +| | 5.0 | Multiplier for `idleEventInterval` that is applied before any records are received.
After a record is received, the multiplier is no longer applied.
Available since version 2.8. | +| | 0 | Used to slow down deliveries by sleeping the thread between polls.
The time to process a batch of records plus this value must be less than the `max.poll.interval.ms` consumer property. | +| .
Also see `idleBeforeDataMultiplier`. | +|. | +| | None | Used to override any arbitrary consumer properties configured on the consumer factory. | +| | `false` | Set to true to log at INFO level all container properties. | +| | `null` | The message listener. | +| | `true` | Whether or not to maintain Micrometer timers for the consumer threads. | +| are not present on the broker. | +| | 30s | How often to check the state of the consumer threads for `NonResponsiveConsumerEvent` s.
See `noPollThreshold` and `pollTimeout`. | +| | 3.0 | Multiplied by `pollTimeOut` to determine whether to publish a `NonResponsiveConsumerEvent`.
See `monitorInterval`. | +| `. | +| `. | +| |`ThreadPoolTaskScheduler`| A scheduler on which to run the consumer monitor task. | +| ` method until all consumers stop and before publishing the container stopped event. | +| for more information. | +| | `false` | When the container is stopped, stop processing after the current record instead of after processing all the records from the previous poll. | +| . | +| | `null` | The timeout to use when `syncCommits` is `true`.
When not set, the container will attempt to determine the `default.api.timeout.ms` consumer property and use that; otherwise it will use 60 seconds. | +| | `true` | Whether to use sync or async commits for offsets; see `commitCallback`. | +| | n/a | The configured topics, topic pattern or explicitly assigned topics/partitions.
Mutually exclusive; at least one must be provided; enforced by `ContainerProperties` constructors. | +| . | + +| Property | Default | Description | +|-------------------------------------------------------------|-------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| |`DefaultAfterRollbackProcessor`| An `AfterRollbackProcessor` to invoke after a transaction is rolled back. | +|| application context | The event publisher. | +| | See desc. | Deprecated - see `commonErrorHandler`. | +| | `null` | Set a `BatchInterceptor` to call before invoking the batch listener; does not apply to record listeners.
Also see `interceptBeforeTx`. | +| | bean name | The bean name of the container; suffixed with `-n` for child containers. | +| .| +| | `ContainerProperties` | The container properties instance. | +| | See desc. | Deprecated - see `commonErrorHandler`. | +| | See desc. | Deprecated - see `commonErrorHandler`. | +| | See desc. | The `containerProperties.groupId`, if present, otherwise the `group.id` property from the consumer factory. | +| | `true` | Determines whether the `recordInterceptor` is called before or after a transaction starts. | +| | See desc. | The bean name for user-configured containers or the `id` attribute of `@KafkaListener` s. | +| | True if a consumer pause has been requested. | +| | `null` | Set a `RecordInterceptor` to call before invoking the record listener; does not apply to batch listeners.
Also see `interceptBeforeTx`. | +| | 30s | When the `missingTopicsFatal` container property is `true`, how long to wait, in seconds, for the `describeTopics` operation to complete. | + +| Property | Default | Description | +|-------------------------------------------------------------------|-----------|----------------------------------------------------------------------------------------------| +| . | +|. | +| | `null` |Used by the concurrent container to give each child container’s consumer a unique `client.id`.| +| | n/a | True if pause has been requested and the consumer has actually paused. | + +| Property | Default | Description | +|-------------------------------------------------------------------|-----------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| | `true` | Set to false to suppress adding a suffix to the `client.id` consumer property, when the `concurrency` is only 1. | +| . | +|, keyed by the child container’s consumer’s `client.id` property.| +| | 1 | The number of child `KafkaMessageListenerContainer` s to manage. | +| | n/a | True if pause has been requested and all child containers' consumer has actually paused. | +| | n/a | A reference to all child `KafkaMessageListenerContainer` s. | + +#### 4.1.6. Application Events + +The following Spring application events are published by listener containers and their consumers: + +* `ConsumerStartingEvent` - published when a consumer thread is first started, before it starts polling. + +* `ConsumerStartedEvent` - published when a consumer is about to start polling. + +* `ConsumerFailedToStartEvent` - published if no `ConsumerStartingEvent` is published within the `consumerStartTimeout` container property. + This event might signal that the configured task executor has insufficient threads to support the containers it is used in and their concurrency. + An error message is also logged when this condition occurs. + +* `ListenerContainerIdleEvent`: published when no messages have been received in `idleInterval` (if configured). + +* `ListenerContainerNoLongerIdleEvent`: published when a record is consumed after previously publishing a `ListenerContainerIdleEvent`. + +* `ListenerContainerPartitionIdleEvent`: published when no messages have been received from that partition in `idlePartitionEventInterval` (if configured). + +* `ListenerContainerPartitionNoLongerIdleEvent`: published when a record is consumed from a partition that has previously published a `ListenerContainerPartitionIdleEvent`. + +* `NonResponsiveConsumerEvent`: published when the consumer appears to be blocked in the `poll` method. + +* `ConsumerPartitionPausedEvent`: published by each consumer when a partition is paused. + +* `ConsumerPartitionResumedEvent`: published by each consumer when a partition is resumed. + +* `ConsumerPausedEvent`: published by each consumer when the container is paused. + +* `ConsumerResumedEvent`: published by each consumer when the container is resumed. + +* `ConsumerStoppingEvent`: published by each consumer just before stopping. + +* `ConsumerStoppedEvent`: published after the consumer is closed. + See [Thread Safety](#thread-safety). + +* `ContainerStoppedEvent`: published when all consumers have stopped. + +| |By default, the application context’s event multicaster invokes event listeners on the calling thread.
If you change the multicaster to use an async executor, you must not invoke any `Consumer` methods when the event contains a reference to the consumer.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The `ListenerContainerIdleEvent` has the following properties: + +* `source`: The listener container instance that published the event. + +* `container`: The listener container or the parent listener container, if the source container is a child. + +* `id`: The listener ID (or container bean name). + +* `idleTime`: The time the container had been idle when the event was published. + +* `topicPartitions`: The topics and partitions that the container was assigned at the time the event was generated. + +* `consumer`: A reference to the Kafka `Consumer` object. + For example, if the consumer’s `pause()` method was previously called, it can `resume()` when the event is received. + +* `paused`: Whether the container is currently paused. + See [Pausing and Resuming Listener Containers](#pause-resume) for more information. + +The `ListenerContainerNoLongerIdleEvent` has the same properties, except `idleTime` and `paused`. + +The `ListenerContainerPartitionIdleEvent` has the following properties: + +* `source`: The listener container instance that published the event. + +* `container`: The listener container or the parent listener container, if the source container is a child. + +* `id`: The listener ID (or container bean name). + +* `idleTime`: The time partition consumption had been idle when the event was published. + +* `topicPartition`: The topic and partition that triggered the event. + +* `consumer`: A reference to the Kafka `Consumer` object. + For example, if the consumer’s `pause()` method was previously called, it can `resume()` when the event is received. + +* `paused`: Whether that partition consumption is currently paused for that consumer. + See [Pausing and Resuming Listener Containers](#pause-resume) for more information. + +The `ListenerContainerPartitionNoLongerIdleEvent` has the same properties, except `idleTime` and `paused`. + +The `NonResponsiveConsumerEvent` has the following properties: + +* `source`: The listener container instance that published the event. + +* `container`: The listener container or the parent listener container, if the source container is a child. + +* `id`: The listener ID (or container bean name). + +* `timeSinceLastPoll`: The time just before the container last called `poll()`. + +* `topicPartitions`: The topics and partitions that the container was assigned at the time the event was generated. + +* `consumer`: A reference to the Kafka `Consumer` object. + For example, if the consumer’s `pause()` method was previously called, it can `resume()` when the event is received. + +* `paused`: Whether the container is currently paused. + See [Pausing and Resuming Listener Containers](#pause-resume) for more information. + +The `ConsumerPausedEvent`, `ConsumerResumedEvent`, and `ConsumerStopping` events have the following properties: + +* `source`: The listener container instance that published the event. + +* `container`: The listener container or the parent listener container, if the source container is a child. + +* `partitions`: The `TopicPartition` instances involved. + +The `ConsumerPartitionPausedEvent`, `ConsumerPartitionResumedEvent` events have the following properties: + +* `source`: The listener container instance that published the event. + +* `container`: The listener container or the parent listener container, if the source container is a child. + +* `partition`: The `TopicPartition` instance involved. + +The `ConsumerStartingEvent`, `ConsumerStartingEvent`, `ConsumerFailedToStartEvent`, `ConsumerStoppedEvent` and `ContainerStoppedEvent` events have the following properties: + +* `source`: The listener container instance that published the event. + +* `container`: The listener container or the parent listener container, if the source container is a child. + +All containers (whether a child or a parent) publish `ContainerStoppedEvent`. +For a parent container, the source and container properties are identical. + +In addition, the `ConsumerStoppedEvent` has the following additional property: + +* `reason` + + * `NORMAL` - the consumer stopped normally (container was stopped). + + * `ERROR` - a `java.lang.Error` was thrown. + + * `FENCED` - the transactional producer was fenced and the `stopContainerWhenFenced` container property is `true`. + + * `AUTH` - an `AuthenticationException` or `AuthorizationException` was thrown and the `authExceptionRetryInterval` is not configured. + + * `NO_OFFSET` - there is no offset for a partition and the `auto.offset.reset` policy is `none`. + +You can use this event to restart the container after such a condition: + +``` +if (event.getReason.equals(Reason.FENCED)) { + event.getSource(MessageListenerContainer.class).start(); +} +``` + +##### Detecting Idle and Non-Responsive Consumers + +While efficient, one problem with asynchronous consumers is detecting when they are idle. +You might want to take some action if no messages arrive for some period of time. + +You can configure the listener container to publish a `ListenerContainerIdleEvent` when some time passes with no message delivery. +While the container is idle, an event is published every `idleEventInterval` milliseconds. + +To configure this feature, set the `idleEventInterval` on the container. +The following example shows how to do so: + +``` +@Bean +public KafkaMessageListenerContainer(ConsumerFactory consumerFactory) { + ContainerProperties containerProps = new ContainerProperties("topic1", "topic2"); + ... + containerProps.setIdleEventInterval(60000L); + ... + KafkaMessageListenerContainer container = new KafKaMessageListenerContainer<>(...); + return container; +} +``` + +The following example shows how to set the `idleEventInterval` for a `@KafkaListener`: + +``` +@Bean +public ConcurrentKafkaListenerContainerFactory kafkaListenerContainerFactory() { + ConcurrentKafkaListenerContainerFactory factory = + new ConcurrentKafkaListenerContainerFactory<>(); + ... + factory.getContainerProperties().setIdleEventInterval(60000L); + ... + return factory; +} +``` + +In each of these cases, an event is published once per minute while the container is idle. + +If, for some reason, the consumer `poll()` method does not exit, no messages are received and idle events cannot be generated (this was a problem with early versions of the `kafka-clients` when the broker wasn’t reachable). +In this case, the container publishes a `NonResponsiveConsumerEvent` if a poll does not return within `3x` the `pollTimeout` property. +By default, this check is performed once every 30 seconds in each container. +You can modify this behavior by setting the `monitorInterval` (default 30 seconds) and `noPollThreshold` (default 3.0) properties in the `ContainerProperties` when configuring the listener container. +The `noPollThreshold` should be greater than `1.0` to avoid getting spurious events due to a race condition. +Receiving such an event lets you stop the containers, thus waking the consumer so that it can stop. + +Starting with version 2.6.2, if a container has published a `ListenerContainerIdleEvent`, it will publish a `ListenerContainerNoLongerIdleEvent` when a record is subsequently received. + +##### Event Consumption + +You can capture these events by implementing `ApplicationListener` — either a general listener or one narrowed to only receive this specific event. +You can also use `@EventListener`, introduced in Spring Framework 4.2. + +The next example combines `@KafkaListener` and `@EventListener` into a single class. +You should understand that the application listener gets events for all containers, so you may need to check the listener ID if you want to take specific action based on which container is idle. +You can also use the `@EventListener` `condition` for this purpose. + +See [Application Events](#events) for information about event properties. + +The event is normally published on the consumer thread, so it is safe to interact with the `Consumer` object. + +The following example uses both `@KafkaListener` and `@EventListener`: + +``` +public class Listener { + + @KafkaListener(id = "qux", topics = "annotated") + public void listen4(@Payload String foo, Acknowledgment ack) { + ... + } + + @EventListener(condition = "event.listenerId.startsWith('qux-')") + public void eventHandler(ListenerContainerIdleEvent event) { + ... + } + +} +``` + +| |Event listeners see events for all containers.
Consequently, in the preceding example, we narrow the events received based on the listener ID.
Since containers created for the `@KafkaListener` support concurrency, the actual containers are named `id-n` where the `n` is a unique value for each instance to support the concurrency.
That is why we use `startsWith` in the condition.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |If you wish to use the idle event to stop the lister container, you should not call `container.stop()` on the thread that calls the listener.
Doing so causes delays and unnecessary log messages.
Instead, you should hand off the event to a different thread that can then stop the container.
Also, you should not `stop()` the container instance if it is a child container.
You should stop the concurrent container instead.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +###### Current Positions when Idle + +Note that you can obtain the current positions when idle is detected by implementing `ConsumerSeekAware` in your listener. +See `onIdleContainer()` in [Seeking to a Specific Offset](#seek). + +#### 4.1.7. Topic/Partition Initial Offset + +There are several ways to set the initial offset for a partition. + +When manually assigning partitions, you can set the initial offset (if desired) in the configured `TopicPartitionOffset` arguments (see [Message Listener Containers](#message-listener-container)). +You can also seek to a specific offset at any time. + +When you use group management where the broker assigns partitions: + +* For a new `group.id`, the initial offset is determined by the `auto.offset.reset` consumer property (`earliest` or `latest`). + +* For an existing group ID, the initial offset is the current offset for that group ID. + You can, however, seek to a specific offset during initialization (or at any time thereafter). + +#### 4.1.8. Seeking to a Specific Offset + +In order to seek, your listener must implement `ConsumerSeekAware`, which has the following methods: + +``` +void registerSeekCallback(ConsumerSeekCallback callback); + +void onPartitionsAssigned(Map assignments, ConsumerSeekCallback callback); + +void onPartitionsRevoked(Collection partitions) + +void onIdleContainer(Map assignments, ConsumerSeekCallback callback); +``` + +The `registerSeekCallback` is called when the container is started and whenever partitions are assigned. +You should use this callback when seeking at some arbitrary time after initialization. +You should save a reference to the callback. +If you use the same listener in multiple containers (or in a `ConcurrentMessageListenerContainer`), you should store the callback in a `ThreadLocal` or some other structure keyed by the listener `Thread`. + +When using group management, `onPartitionsAssigned` is called when partitions are assigned. +You can use this method, for example, for setting initial offsets for the partitions, by calling the callback. +You can also use this method to associate this thread’s callback with the assigned partitions (see the example below). +You must use the callback argument, not the one passed into `registerSeekCallback`. +Starting with version 2.5.5, this method is called, even when using [manual partition assignment](#manual-assignment). + +`onPartitionsRevoked` is called when the container is stopped or Kafka revokes assignments. +You should discard this thread’s callback and remove any associations to the revoked partitions. + +The callback has the following methods: + +``` +void seek(String topic, int partition, long offset); + +void seekToBeginning(String topic, int partition); + +void seekToBeginning(Collection= partitions); + +void seekToEnd(String topic, int partition); + +void seekToEnd(Collection= partitions); + +void seekRelative(String topic, int partition, long offset, boolean toCurrent); + +void seekToTimestamp(String topic, int partition, long timestamp); + +void seekToTimestamp(Collection topicPartitions, long timestamp); +``` + +`seekRelative` was added in version 2.3, to perform relative seeks. + +* `offset` negative and `toCurrent` `false` - seek relative to the end of the partition. + +* `offset` positive and `toCurrent` `false` - seek relative to the beginning of the partition. + +* `offset` negative and `toCurrent` `true` - seek relative to the current position (rewind). + +* `offset` positive and `toCurrent` `true` - seek relative to the current position (fast forward). + +The `seekToTimestamp` methods were also added in version 2.3. + +| |When seeking to the same timestamp for multiple partitions in the `onIdleContainer` or `onPartitionsAssigned` methods, the second method is preferred because it is more efficient to find the offsets for the timestamps in a single call to the consumer’s `offsetsForTimes` method.
When called from other locations, the container will gather all timestamp seek requests and make one call to `offsetsForTimes`.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +You can also perform seek operations from `onIdleContainer()` when an idle container is detected. +See [Detecting Idle and Non-Responsive Consumers](#idle-containers) for how to enable idle container detection. + +| |The `seekToBeginning` method that accepts a collection is useful, for example, when processing a compacted topic and you wish to seek to the beginning every time the application is started:| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +``` +public class MyListener implements ConsumerSeekAware { + +... + + @Override + public void onPartitionsAssigned(Map assignments, ConsumerSeekCallback callback) { + callback.seekToBeginning(assignments.keySet()); + } + +} +``` + +To arbitrarily seek at runtime, use the callback reference from the `registerSeekCallback` for the appropriate thread. + +Here is a trivial Spring Boot application that demonstrates how to use the callback; it sends 10 records to the topic; hitting `` in the console causes all partitions to seek to the beginning. + +``` +@SpringBootApplication +public class SeekExampleApplication { + + public static void main(String[] args) { + SpringApplication.run(SeekExampleApplication.class, args); + } + + @Bean + public ApplicationRunner runner(Listener listener, KafkaTemplate template) { + return args -> { + IntStream.range(0, 10).forEach(i -> template.send( + new ProducerRecord<>("seekExample", i % 3, "foo", "bar"))); + while (true) { + System.in.read(); + listener.seekToStart(); + } + }; + } + + @Bean + public NewTopic topic() { + return new NewTopic("seekExample", 3, (short) 1); + } + +} + +@Component +class Listener implements ConsumerSeekAware { + + private static final Logger logger = LoggerFactory.getLogger(Listener.class); + + private final ThreadLocal callbackForThread = new ThreadLocal<>(); + + private final Map callbacks = new ConcurrentHashMap<>(); + + @Override + public void registerSeekCallback(ConsumerSeekCallback callback) { + this.callbackForThread.set(callback); + } + + @Override + public void onPartitionsAssigned(Map assignments, ConsumerSeekCallback callback) { + assignments.keySet().forEach(tp -> this.callbacks.put(tp, this.callbackForThread.get())); + } + + @Override + public void onPartitionsRevoked(Collection partitions) { + partitions.forEach(tp -> this.callbacks.remove(tp)); + this.callbackForThread.remove(); + } + + @Override + public void onIdleContainer(Map assignments, ConsumerSeekCallback callback) { + } + + @KafkaListener(id = "seekExample", topics = "seekExample", concurrency = "3") + public void listen(ConsumerRecord in) { + logger.info(in.toString()); + } + + public void seekToStart() { + this.callbacks.forEach((tp, callback) -> callback.seekToBeginning(tp.topic(), tp.partition())); + } + +} +``` + +To make things simpler, version 2.3 added the `AbstractConsumerSeekAware` class, which keeps track of which callback is to be used for a topic/partition. +The following example shows how to seek to the last record processed, in each partition, each time the container goes idle. +It also has methods that allow arbitrary external calls to rewind partitions by one record. + +``` +public class SeekToLastOnIdleListener extends AbstractConsumerSeekAware { + + @KafkaListener(id = "seekOnIdle", topics = "seekOnIdle") + public void listen(String in) { + ... + } + + @Override + public void onIdleContainer(Map assignments, + ConsumerSeekCallback callback) { + + assignments.keySet().forEach(tp -> callback.seekRelative(tp.topic(), tp.partition(), -1, true)); + } + + /** + * Rewind all partitions one record. + */ + public void rewindAllOneRecord() { + getSeekCallbacks() + .forEach((tp, callback) -> + callback.seekRelative(tp.topic(), tp.partition(), -1, true)); + } + + /** + * Rewind one partition one record. + */ + public void rewindOnePartitionOneRecord(String topic, int partition) { + getSeekCallbackFor(new org.apache.kafka.common.TopicPartition(topic, partition)) + .seekRelative(topic, partition, -1, true); + } + +} +``` + +Version 2.6 added convenience methods to the abstract class: + +* `seekToBeginning()` - seeks all assigned partitions to the beginning + +* `seekToEnd()` - seeks all assigned partitions to the end + +* `seekToTimestamp(long time)` - seeks all assigned partitions to the offset represented by that timestamp. + +Example: + +``` +public class MyListener extends AbstractConsumerSeekAware { + + @KafkaListener(...) + void listn(...) { + ... + } +} + +public class SomeOtherBean { + + MyListener listener; + + ... + + void someMethod() { + this.listener.seekToTimestamp(System.currentTimeMillis - 60_000); + } + +} +``` + +#### 4.1.9. Container factory + +As discussed in [`@KafkaListener` Annotation](#kafka-listener-annotation), a `ConcurrentKafkaListenerContainerFactory` is used to create containers for annotated methods. + +Starting with version 2.2, you can use the same factory to create any `ConcurrentMessageListenerContainer`. +This might be useful if you want to create several containers with similar properties or you wish to use some externally configured factory, such as the one provided by Spring Boot auto-configuration. +Once the container is created, you can further modify its properties, many of which are set by using `container.getContainerProperties()`. +The following example configures a `ConcurrentMessageListenerContainer`: + +``` +@Bean +public ConcurrentMessageListenerContainer( + ConcurrentKafkaListenerContainerFactory factory) { + + ConcurrentMessageListenerContainer container = + factory.createContainer("topic1", "topic2"); + container.setMessageListener(m -> { ... } ); + return container; +} +``` + +| |Containers created this way are not added to the endpoint registry.
They should be created as `@Bean` definitions so that they are registered with the application context.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Starting with version 2.3.4, you can add a `ContainerCustomizer` to the factory to further configure each container after it has been created and configured. + +``` +@Bean +public KafkaListenerContainerFactory kafkaListenerContainerFactory() { + ConcurrentKafkaListenerContainerFactory factory = + new ConcurrentKafkaListenerContainerFactory<>(); + ... + factory.setContainerCustomizer(container -> { /* customize the container */ }); + return factory; +} +``` + +#### 4.1.10. Thread Safety + +When using a concurrent message listener container, a single listener instance is invoked on all consumer threads. +Listeners, therefore, need to be thread-safe, and it is preferable to use stateless listeners. +If it is not possible to make your listener thread-safe or adding synchronization would significantly reduce the benefit of adding concurrency, you can use one of a few techniques: + +* Use `n` containers with `concurrency=1` with a prototype scoped `MessageListener` bean so that each container gets its own instance (this is not possible when using `@KafkaListener`). + +* Keep the state in `ThreadLocal` instances. + +* Have the singleton listener delegate to a bean that is declared in `SimpleThreadScope` (or a similar scope). + +To facilitate cleaning up thread state (for the second and third items in the preceding list), starting with version 2.2, the listener container publishes a `ConsumerStoppedEvent` when each thread exits. +You can consume these events with an `ApplicationListener` or `@EventListener` method to remove `ThreadLocal` instances or `remove()` thread-scoped beans from the scope. +Note that `SimpleThreadScope` does not destroy beans that have a destruction interface (such as `DisposableBean`), so you should `destroy()` the instance yourself. + +| |By default, the application context’s event multicaster invokes event listeners on the calling thread.
If you change the multicaster to use an async executor, thread cleanup is not effective.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 4.1.11. Monitoring + +##### Monitoring Listener Performance + +Starting with version 2.3, the listener container will automatically create and update Micrometer `Timer` s for the listener, if `Micrometer` is detected on the class path, and a single `MeterRegistry` is present in the application context. +The timers can be disabled by setting the `ContainerProperty` `micrometerEnabled` to `false`. + +Two timers are maintained - one for successful calls to the listener and one for failures. + +The timers are named `spring.kafka.listener` and have the following tags: + +* `name` : (container bean name) + +* `result` : `success` or `failure` + +* `exception` : `none` or `ListenerExecutionFailedException` + +You can add additional tags using the `ContainerProperties` `micrometerTags` property. + +| |With the concurrent container, timers are created for each thread and the `name` tag is suffixed with `-n` where n is `0` to `concurrency-1`.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------| + +##### Monitoring KafkaTemplate Performance + +Starting with version 2.5, the template will automatically create and update Micrometer `Timer` s for send operations, if `Micrometer` is detected on the class path, and a single `MeterRegistry` is present in the application context. +The timers can be disabled by setting the template’s `micrometerEnabled` property to `false`. + +Two timers are maintained - one for successful calls to the listener and one for failures. + +The timers are named `spring.kafka.template` and have the following tags: + +* `name` : (template bean name) + +* `result` : `success` or `failure` + +* `exception` : `none` or the exception class name for failures + +You can add additional tags using the template’s `micrometerTags` property. + +##### Micrometer Native Metrics + +Starting with version 2.5, the framework provides [Factory Listeners](#factory-listeners) to manage a Micrometer `KafkaClientMetrics` instance whenever producers and consumers are created and closed. + +To enable this feature, simply add the listeners to your producer and consumer factories: + +``` +@Bean +public ConsumerFactory myConsumerFactory() { + Map configs = consumerConfigs(); + ... + DefaultKafkaConsumerFactory cf = new DefaultKafkaConsumerFactory<>(configs); + ... + cf.addListener(new MicrometerConsumerListener(meterRegistry(), + Collections.singletonList(new ImmutableTag("customTag", "customTagValue")))); + ... + return cf; +} + +@Bean +public ProducerFactory myProducerFactory() { + Map configs = producerConfigs(); + configs.put(ProducerConfig.CLIENT_ID_CONFIG, "myClientId"); + ... + DefaultKafkaProducerFactory pf = new DefaultKafkaProducerFactory<>(configs); + ... + pf.addListener(new MicrometerProducerListener(meterRegistry(), + Collections.singletonList(new ImmutableTag("customTag", "customTagValue")))); + ... + return pf; +} +``` + +The consumer/producer `id` passed to the listener is added to the meter’s tags with tag name `spring.id`. + +An example of obtaining one of the Kafka metrics + +``` +double count = this.meterRegistry.get("kafka.producer.node.incoming.byte.total") + .tag("customTag", "customTagValue") + .tag("spring.id", "myProducerFactory.myClientId-1") + .functionCounter() + .count() +``` + +A similar listener is provided for the `StreamsBuilderFactoryBean` - see [KafkaStreams Micrometer Support](#streams-micrometer). + +#### 4.1.12. Transactions + +This section describes how Spring for Apache Kafka supports transactions. + +##### Overview + +The 0.11.0.0 client library added support for transactions. +Spring for Apache Kafka adds support in the following ways: + +* `KafkaTransactionManager`: Used with normal Spring transaction support (`@Transactional`, `TransactionTemplate` etc). + +* Transactional `KafkaMessageListenerContainer` + +* Local transactions with `KafkaTemplate` + +* Transaction synchronization with other transaction managers + +Transactions are enabled by providing the `DefaultKafkaProducerFactory` with a `transactionIdPrefix`. +In that case, instead of managing a single shared `Producer`, the factory maintains a cache of transactional producers. +When the user calls `close()` on a producer, it is returned to the cache for reuse instead of actually being closed. +The `transactional.id` property of each producer is `transactionIdPrefix` + `n`, where `n` starts with `0` and is incremented for each new producer, unless the transaction is started by a listener container with a record-based listener. +In that case, the `transactional.id` is `...`. +This is to properly support fencing zombies, [as described here](https://www.confluent.io/blog/transactions-apache-kafka/). +This new behavior was added in versions 1.3.7, 2.0.6, 2.1.10, and 2.2.0. +If you wish to revert to the previous behavior, you can set the `producerPerConsumerPartition` property on the `DefaultKafkaProducerFactory` to `false`. + +| |While transactions are supported with batch listeners, by default, zombie fencing is not supported because a batch may contain records from multiple topics or partitions.
However, starting with version 2.3.2, zombie fencing is supported if you set the container property `subBatchPerPartition` to true.
In that case, the batch listener is invoked once per partition received from the last poll, as if each poll only returned records for a single partition.
This is `true` by default since version 2.5 when transactions are enabled with `EOSMode.ALPHA`; set it to `false` if you are using transactions but are not concerned about zombie fencing.
Also see [Exactly Once Semantics](#exactly-once).| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Also see [`transactionIdPrefix`](#transaction-id-prefix). + +With Spring Boot, it is only necessary to set the `spring.kafka.producer.transaction-id-prefix` property - Boot will automatically configure a `KafkaTransactionManager` bean and wire it into the listener container. + +| |Starting with version 2.5.8, you can now configure the `maxAge` property on the producer factory.
This is useful when using transactional producers that might lay idle for the broker’s `transactional.id.expiration.ms`.
With current `kafka-clients`, this can cause a `ProducerFencedException` without a rebalance.
By setting the `maxAge` to less than `transactional.id.expiration.ms`, the factory will refresh the producer if it is past it’s max age.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Using `KafkaTransactionManager` + +The `KafkaTransactionManager` is an implementation of Spring Framework’s `PlatformTransactionManager`. +It is provided with a reference to the producer factory in its constructor. +If you provide a custom producer factory, it must support transactions. +See `ProducerFactory.transactionCapable()`. + +You can use the `KafkaTransactionManager` with normal Spring transaction support (`@Transactional`, `TransactionTemplate`, and others). +If a transaction is active, any `KafkaTemplate` operations performed within the scope of the transaction use the transaction’s `Producer`. +The manager commits or rolls back the transaction, depending on success or failure. +You must configure the `KafkaTemplate` to use the same `ProducerFactory` as the transaction manager. + +##### Transaction Synchronization + +This section refers to producer-only transactions (transactions not started by a listener container); see [Using Consumer-Initiated Transactions](#container-transaction-manager) for information about chaining transactions when the container starts the transaction. + +If you want to send records to kafka and perform some database updates, you can use normal Spring transaction management with, say, a `DataSourceTransactionManager`. + +``` +@Transactional +public void process(List things) { + things.forEach(thing -> this.kafkaTemplate.send("topic", thing)); + updateDb(things); +} +``` + +The interceptor for the `@Transactional` annotation starts the transaction and the `KafkaTemplate` will synchronize a transaction with that transaction manager; each send will participate in that transaction. +When the method exits, the database transaction will commit followed by the Kafka transaction. +If you wish the commits to be performed in the reverse order (Kafka first), use nested `@Transactional` methods, with the outer method configured to use the `DataSourceTransactionManager`, and the inner method configured to use the `KafkaTransactionManager`. + +See [[ex-jdbc-sync]](#ex-jdbc-sync) for examples of an application that synchronizes JDBC and Kafka transactions in Kafka-first or DB-first configurations. + +| |Starting with versions 2.5.17, 2.6.12, 2.7.9 and 2.8.0, if the commit fails on the synchronized transaction (after the primary transaction has committed), the exception will be thrown to the caller.
Previously, this was silently ignored (logged at debug).
Applications should take remedial action, if necessary, to compensate for the committed primary transaction.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Using Consumer-Initiated Transactions + +The `ChainedKafkaTransactionManager` is now deprecated, since version 2.7; see the javadocs for its super class `ChainedTransactionManager` for more information. +Instead, use a `KafkaTransactionManager` in the container to start the Kafka transaction and annotate the listener method with `@Transactional` to start the other transaction. + +See [[ex-jdbc-sync]](#ex-jdbc-sync) for an example application that chains JDBC and Kafka transactions. + +##### `KafkaTemplate` Local Transactions + +You can use the `KafkaTemplate` to execute a series of operations within a local transaction. +The following example shows how to do so: + +``` +boolean result = template.executeInTransaction(t -> { + t.sendDefault("thing1", "thing2"); + t.sendDefault("cat", "hat"); + return true; +}); +``` + +The argument in the callback is the template itself (`this`). +If the callback exits normally, the transaction is committed. +If an exception is thrown, the transaction is rolled back. + +| |If there is a `KafkaTransactionManager` (or synchronized) transaction in process, it is not used.
Instead, a new "nested" transaction is used.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------| + +##### `transactionIdPrefix` + +As mentioned in [the overview](#transactions), the producer factory is configured with this property to build the producer `transactional.id` property. +There is a dichotomy when specifying this property in that, when running multiple instances of the application with `EOSMode.ALPHA`, it must be the same on all instances to satisfy fencing zombies (also mentioned in the overview) when producing records on a listener container thread. +However, when producing records using transactions that are **not** started by a listener container, the prefix has to be different on each instance. +Version 2.3, makes this simpler to configure, especially in a Spring Boot application. +In previous versions, you had to create two producer factories and `KafkaTemplate` s - one for producing records on a listener container thread and one for stand-alone transactions started by `kafkaTemplate.executeInTransaction()` or by a transaction interceptor on a `@Transactional` method. + +Now, you can override the factory’s `transactionalIdPrefix` on the `KafkaTemplate` and the `KafkaTransactionManager`. + +When using a transaction manager and template for a listener container, you would normally leave this to default to the producer factory’s property. +This value should be the same for all application instances when using `EOSMode.ALPHA`. +With `EOSMode.BETA` it is no longer necessary to use the same `transactional.id`, even for consumer-initiated transactions; in fact, it must be unique on each instance the same as producer-initiated transactions. +For transactions started by the template (or the transaction manager for `@Transaction`) you should set the property on the template and transaction manager respectively. +This property must have a different value on each application instance. + +This problem (different rules for `transactional.id`) has been eliminated when `EOSMode.BETA` is being used (with broker versions \>= 2.5); see [Exactly Once Semantics](#exactly-once). + +##### `KafkaTemplate` Transactional and non-Transactional Publishing + +Normally, when a `KafkaTemplate` is transactional (configured with a transaction-capable producer factory), transactions are required. +The transaction can be started by a `TransactionTemplate`, a `@Transactional` method, calling `executeInTransaction`, or by a listener container, when configured with a `KafkaTransactionManager`. +Any attempt to use the template outside the scope of a transaction results in the template throwing an `IllegalStateException`. +Starting with version 2.4.3, you can set the template’s `allowNonTransactional` property to `true`. +In that case, the template will allow the operation to run without a transaction, by calling the `ProducerFactory` 's `createNonTransactionalProducer()` method; the producer will be cached, or thread-bound, as normal for reuse. +See [Using `DefaultKafkaProducerFactory`](#producer-factory). + +##### Transactions with Batch Listeners + +When a listener fails while transactions are being used, the `AfterRollbackProcessor` is invoked to take some action after the rollback occurs. +When using the default `AfterRollbackProcessor` with a record listener, seeks are performed so that the failed record will be redelivered. +With a batch listener, however, the whole batch will be redelivered because the framework doesn’t know which record in the batch failed. +See [After-rollback Processor](#after-rollback) for more information. + +When using a batch listener, version 2.4.2 introduced an alternative mechanism to deal with failures while processing a batch; the `BatchToRecordAdapter`. +When a container factory with `batchListener` set to true is configured with a `BatchToRecordAdapter`, the listener is invoked with one record at a time. +This enables error handling within the batch, while still making it possible to stop processing the entire batch, depending on the exception type. +A default `BatchToRecordAdapter` is provided, that can be configured with a standard `ConsumerRecordRecoverer` such as the `DeadLetterPublishingRecoverer`. +The following test case configuration snippet illustrates how to use this feature: + +``` +public static class TestListener { + + final List values = new ArrayList<>(); + + @KafkaListener(id = "batchRecordAdapter", topics = "test") + public void listen(String data) { + values.add(data); + if ("bar".equals(data)) { + throw new RuntimeException("reject partial"); + } + } + +} + +@Configuration +@EnableKafka +public static class Config { + + ConsumerRecord failed; + + @Bean + public TestListener test() { + return new TestListener(); + } + + @Bean + public ConsumerFactory consumerFactory() { + return mock(ConsumerFactory.class); + } + + @Bean + public ConcurrentKafkaListenerContainerFactory kafkaListenerContainerFactory() { + ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory(); + factory.setConsumerFactory(consumerFactory()); + factory.setBatchListener(true); + factory.setBatchToRecordAdapter(new DefaultBatchToRecordAdapter<>((record, ex) -> { + this.failed = record; + })); + return factory; + } + +} +``` + +#### 4.1.13. Exactly Once Semantics + +You can provide a listener container with a `KafkaAwareTransactionManager` instance. +When so configured, the container starts a transaction before invoking the listener. +Any `KafkaTemplate` operations performed by the listener participate in the transaction. +If the listener successfully processes the record (or multiple records, when using a `BatchMessageListener`), the container sends the offset(s) to the transaction by using `producer.sendOffsetsToTransaction()`), before the transaction manager commits the transaction. +If the listener throws an exception, the transaction is rolled back and the consumer is repositioned so that the rolled-back record(s) can be retrieved on the next poll. +See [After-rollback Processor](#after-rollback) for more information and for handling records that repeatedly fail. + +Using transactions enables Exactly Once Semantics (EOS). + +This means that, for a `read→process-write` sequence, it is guaranteed that the **sequence** is completed exactly once. +(The read and process are have at least once semantics). + +Spring for Apache Kafka version 2.5 and later supports two EOS modes: + +* `ALPHA` - alias for `V1` (deprecated) + +* `BETA` - alias for `V2` (deprecated) + +* `V1` - aka `transactional.id` fencing (since version 0.11.0.0) + +* `V2` - aka fetch-offset-request fencing (since version 2.5) + +With mode `V1`, the producer is "fenced" if another instance with the same `transactional.id` is started. +Spring manages this by using a `Producer` for each `group.id/topic/partition`; when a rebalance occurs a new instance will use the same `transactional.id` and the old producer is fenced. + +With mode `V2`, it is not necessary to have a producer for each `group.id/topic/partition` because consumer metadata is sent along with the offsets to the transaction and the broker can determine if the producer is fenced using that information instead. + +Starting with version 2.6, the default `EOSMode` is `V2`. + +To configure the container to use mode `ALPHA`, set the container property `EOSMode` to `ALPHA`, to revert to the previous behavior. + +| |With `V2` (default), your brokers must be version 2.5 or later; `kafka-clients` version 3.0, the producer will no longer fall back to `V1`; if the broker does not support `V2`, an exception is thrown.
If your brokers are earlier than 2.5, you must set the `EOSMode` to `V1`, leave the `DefaultKafkaProducerFactory` `producerPerConsumerPartition` set to `true` and, if you are using a batch listener, you should set `subBatchPerPartition` to `true`.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +When your brokers are upgraded to 2.5 or later, you should switch the mode to `V2`, but the number of producers will remain as before. +You can then do a rolling upgrade of your application with `producerPerConsumerPartition` set to `false` to reduce the number of producers; you should also no longer set the `subBatchPerPartition` container property. + +If your brokers are already 2.5 or newer, you should set the `DefaultKafkaProducerFactory` `producerPerConsumerPartition` property to `false`, to reduce the number of producers needed. + +| |When using `EOSMode.V2` with `producerPerConsumerPartition=false` the `transactional.id` must be unique across all application instances.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------| + +When using `V2` mode, it is no longer necessary to set the `subBatchPerPartition` to `true`; it will default to `false` when the `EOSMode` is `V2`. + +Refer to [KIP-447](https://cwiki.apache.org/confluence/display/KAFKA/KIP-447%3A+Producer+scalability+for+exactly+once+semantics) for more information. + +`V1` and `V2` were previously `ALPHA` and `BETA`; they have been changed to align the framework with [KIP-732](https://cwiki.apache.org/confluence/display/KAFKA/KIP-732%3A+Deprecate+eos-alpha+and+replace+eos-beta+with+eos-v2). + +#### 4.1.14. Wiring Spring Beans into Producer/Consumer Interceptors + +Apache Kafka provides a mechanism to add interceptors to producers and consumers. +These objects are managed by Kafka, not Spring, and so normal Spring dependency injection won’t work for wiring in dependent Spring Beans. +However, you can manually wire in those dependencies using the interceptor `config()` method. +The following Spring Boot application shows how to do this by overriding boot’s default factories to add some dependent bean into the configuration properties. + +``` +@SpringBootApplication +public class Application { + + public static void main(String[] args) { + SpringApplication.run(Application.class, args); + } + + @Bean + public ConsumerFactory kafkaConsumerFactory(SomeBean someBean) { + Map consumerProperties = new HashMap<>(); + // consumerProperties.put(..., ...) + // ... + consumerProperties.put(ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG, MyConsumerInterceptor.class.getName()); + consumerProperties.put("some.bean", someBean); + return new DefaultKafkaConsumerFactory<>(consumerProperties); + } + + @Bean + public ProducerFactory kafkaProducerFactory(SomeBean someBean) { + Map producerProperties = new HashMap<>(); + // producerProperties.put(..., ...) + // ... + Map producerProperties = properties.buildProducerProperties(); + producerProperties.put(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, MyProducerInterceptor.class.getName()); + producerProperties.put("some.bean", someBean); + DefaultKafkaProducerFactory factory = new DefaultKafkaProducerFactory<>(producerProperties); + return factory; + } + + @Bean + public SomeBean someBean() { + return new SomeBean(); + } + + @KafkaListener(id = "kgk897", topics = "kgh897") + public void listen(String in) { + System.out.println("Received " + in); + } + + @Bean + public ApplicationRunner runner(KafkaTemplate template) { + return args -> template.send("kgh897", "test"); + } + + @Bean + public NewTopic kRequests() { + return TopicBuilder.name("kgh897") + .partitions(1) + .replicas(1) + .build(); + } + +} +``` + +``` +public class SomeBean { + + public void someMethod(String what) { + System.out.println(what + " in my foo bean"); + } + +} +``` + +``` +public class MyProducerInterceptor implements ProducerInterceptor { + + private SomeBean bean; + + @Override + public void configure(Map configs) { + this.bean = (SomeBean) configs.get("some.bean"); + } + + @Override + public ProducerRecord onSend(ProducerRecord record) { + this.bean.someMethod("producer interceptor"); + return record; + } + + @Override + public void onAcknowledgement(RecordMetadata metadata, Exception exception) { + } + + @Override + public void close() { + } + +} +``` + +``` +public class MyConsumerInterceptor implements ConsumerInterceptor { + + private SomeBean bean; + + @Override + public void configure(Map configs) { + this.bean = (SomeBean) configs.get("some.bean"); + } + + @Override + public ConsumerRecords onConsume(ConsumerRecords records) { + this.bean.someMethod("consumer interceptor"); + return records; + } + + @Override + public void onCommit(Map offsets) { + } + + @Override + public void close() { + } + +} +``` + +Result: + +``` +producer interceptor in my foo bean +consumer interceptor in my foo bean +Received test +``` + +#### 4.1.15. Pausing and Resuming Listener Containers + +Version 2.1.3 added `pause()` and `resume()` methods to listener containers. +Previously, you could pause a consumer within a `ConsumerAwareMessageListener` and resume it by listening for a `ListenerContainerIdleEvent`, which provides access to the `Consumer` object. +While you could pause a consumer in an idle container by using an event listener, in some cases, this was not thread-safe, since there is no guarantee that the event listener is invoked on the consumer thread. +To safely pause and resume consumers, you should use the `pause` and `resume` methods on the listener containers. +A `pause()` takes effect just before the next `poll()`; a `resume()` takes effect just after the current `poll()` returns. +When a container is paused, it continues to `poll()` the consumer, avoiding a rebalance if group management is being used, but it does not retrieve any records. +See the Kafka documentation for more information. + +Starting with version 2.1.5, you can call `isPauseRequested()` to see if `pause()` has been called. +However, the consumers might not have actually paused yet.`isConsumerPaused()` returns true if all `Consumer` instances have actually paused. + +In addition (also since 2.1.5), `ConsumerPausedEvent` and `ConsumerResumedEvent` instances are published with the container as the `source` property and the `TopicPartition` instances involved in the `partitions` property. + +The following simple Spring Boot application demonstrates by using the container registry to get a reference to a `@KafkaListener` method’s container and pausing or resuming its consumers as well as receiving the corresponding events: + +``` +@SpringBootApplication +public class Application implements ApplicationListener { + + public static void main(String[] args) { + SpringApplication.run(Application.class, args).close(); + } + + @Override + public void onApplicationEvent(KafkaEvent event) { + System.out.println(event); + } + + @Bean + public ApplicationRunner runner(KafkaListenerEndpointRegistry registry, + KafkaTemplate template) { + return args -> { + template.send("pause.resume.topic", "thing1"); + Thread.sleep(10_000); + System.out.println("pausing"); + registry.getListenerContainer("pause.resume").pause(); + Thread.sleep(10_000); + template.send("pause.resume.topic", "thing2"); + Thread.sleep(10_000); + System.out.println("resuming"); + registry.getListenerContainer("pause.resume").resume(); + Thread.sleep(10_000); + }; + } + + @KafkaListener(id = "pause.resume", topics = "pause.resume.topic") + public void listen(String in) { + System.out.println(in); + } + + @Bean + public NewTopic topic() { + return TopicBuilder.name("pause.resume.topic") + .partitions(2) + .replicas(1) + .build(); + } + +} +``` + +The following listing shows the results of the preceding example: + +``` +partitions assigned: [pause.resume.topic-1, pause.resume.topic-0] +thing1 +pausing +ConsumerPausedEvent [partitions=[pause.resume.topic-1, pause.resume.topic-0]] +resuming +ConsumerResumedEvent [partitions=[pause.resume.topic-1, pause.resume.topic-0]] +thing2 +``` + +#### 4.1.16. Pausing and Resuming Partitions on Listener Containers + +Since version 2.7 you can pause and resume the consumption of specific partitions assigned to that consumer by using the `pausePartition(TopicPartition topicPartition)` and `resumePartition(TopicPartition topicPartition)` methods in the listener containers. +The pausing and resuming takes place respectively before and after the `poll()` similar to the `pause()` and `resume()` methods. +The `isPartitionPauseRequested()` method returns true if pause for that partition has been requested. +The `isPartitionPaused()` method returns true if that partition has effectively been paused. + +Also since version 2.7 `ConsumerPartitionPausedEvent` and `ConsumerPartitionResumedEvent` instances are published with the container as the `source` property and the `TopicPartition` instance. + +#### 4.1.17. Serialization, Deserialization, and Message Conversion + +##### Overview + +Apache Kafka provides a high-level API for serializing and deserializing record values as well as their keys. +It is present with the `org.apache.kafka.common.serialization.Serializer` and`org.apache.kafka.common.serialization.Deserializer` abstractions with some built-in implementations. +Meanwhile, we can specify serializer and deserializer classes by using `Producer` or `Consumer` configuration properties. +The following example shows how to do so: + +``` +props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, IntegerDeserializer.class); +props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); +... +props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class); +props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); +``` + +For more complex or particular cases, the `KafkaConsumer` (and, therefore, `KafkaProducer`) provides overloaded +constructors to accept `Serializer` and `Deserializer` instances for `keys` and `values`, respectively. + +When you use this API, the `DefaultKafkaProducerFactory` and `DefaultKafkaConsumerFactory` also provide properties (through constructors or setter methods) to inject custom `Serializer` and `Deserializer` instances into the target `Producer` or `Consumer`. +Also, you can pass in `Supplier` or `Supplier` instances through constructors - these `Supplier` s are called on creation of each `Producer` or `Consumer`. + +##### String serialization + +Since version 2.5, Spring for Apache Kafka provides `ToStringSerializer` and `ParseStringDeserializer` classes that use String representation of entities. +They rely on methods `toString` and some `Function` or `BiFunction` to parse the String and populate properties of an instance. +Usually, this would invoke some static method on the class, such as `parse`: + +``` +ToStringSerializer thingSerializer = new ToStringSerializer<>(); +//... +ParseStringDeserializer deserializer = new ParseStringDeserializer<>(Thing::parse); +``` + +By default, the `ToStringSerializer` is configured to convey type information about the serialized entity in the record `Headers`. +You can disable this by setting the `addTypeInfo` property to false. +This information can be used by `ParseStringDeserializer` on the receiving side. + +* `ToStringSerializer.ADD_TYPE_INFO_HEADERS` (default `true`): You can set it to `false` to disable this feature on the `ToStringSerializer` (sets the `addTypeInfo` property). + +``` +ParseStringDeserializer deserializer = new ParseStringDeserializer<>((str, headers) -> { + byte[] header = headers.lastHeader(ToStringSerializer.VALUE_TYPE).value(); + String entityType = new String(header); + + if (entityType.contains("Thing")) { + return Thing.parse(str); + } + else { + // ...parsing logic + } +}); +``` + +You can configure the `Charset` used to convert `String` to/from `byte[]` with the default being `UTF-8`. + +You can configure the deserializer with the name of the parser method using `ConsumerConfig` properties: + +* `ParseStringDeserializer.KEY_PARSER` + +* `ParseStringDeserializer.VALUE_PARSER` + +The properties must contain the fully qualified name of the class followed by the method name, separated by a period `.`. +The method must be static and have a signature of either `(String, Headers)` or `(String)`. + +A `ToFromStringSerde` is also provided, for use with Kafka Streams. + +##### JSON + +Spring for Apache Kafka also provides `JsonSerializer` and `JsonDeserializer` implementations that are based on the +Jackson JSON object mapper. +The `JsonSerializer` allows writing any Java object as a JSON `byte[]`. +The `JsonDeserializer` requires an additional `Class targetType` argument to allow the deserialization of a consumed `byte[]` to the proper target object. +The following example shows how to create a `JsonDeserializer`: + +``` +JsonDeserializer thingDeserializer = new JsonDeserializer<>(Thing.class); +``` + +You can customize both `JsonSerializer` and `JsonDeserializer` with an `ObjectMapper`. +You can also extend them to implement some particular configuration logic in the `configure(Map configs, boolean isKey)` method. + +Starting with version 2.3, all the JSON-aware components are configured by default with a `JacksonUtils.enhancedObjectMapper()` instance, which comes with the `MapperFeature.DEFAULT_VIEW_INCLUSION` and `DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES` features disabled. +Also such an instance is supplied with well-known modules for custom data types, such a Java time and Kotlin support. +See `JacksonUtils.enhancedObjectMapper()` JavaDocs for more information. +This method also registers a `org.springframework.kafka.support.JacksonMimeTypeModule` for `org.springframework.util.MimeType` objects serialization into the plain string for inter-platform compatibility over the network. +A `JacksonMimeTypeModule` can be registered as a bean in the application context and it will be auto-configured into [Spring Boot `ObjectMapper` instance](https://docs.spring.io/spring-boot/docs/current/reference/html/howto-spring-mvc.html#howto-customize-the-jackson-objectmapper). + +Also starting with version 2.3, the `JsonDeserializer` provides `TypeReference`-based constructors for better handling of target generic container types. + +Starting with version 2.1, you can convey type information in record `Headers`, allowing the handling of multiple types. +In addition, you can configure the serializer and deserializer by using the following Kafka properties. +They have no effect if you have provided `Serializer` and `Deserializer` instances for `KafkaConsumer` and `KafkaProducer`, respectively. + +###### Configuration Properties + +* `JsonSerializer.ADD_TYPE_INFO_HEADERS` (default `true`): You can set it to `false` to disable this feature on the `JsonSerializer` (sets the `addTypeInfo` property). + +* `JsonSerializer.TYPE_MAPPINGS` (default `empty`): See [Mapping Types](#serdes-mapping-types). + +* `JsonDeserializer.USE_TYPE_INFO_HEADERS` (default `true`): You can set it to `false` to ignore headers set by the serializer. + +* `JsonDeserializer.REMOVE_TYPE_INFO_HEADERS` (default `true`): You can set it to `false` to retain headers set by the serializer. + +* `JsonDeserializer.KEY_DEFAULT_TYPE`: Fallback type for deserialization of keys if no header information is present. + +* `JsonDeserializer.VALUE_DEFAULT_TYPE`: Fallback type for deserialization of values if no header information is present. + +* `JsonDeserializer.TRUSTED_PACKAGES` (default `java.util`, `java.lang`): Comma-delimited list of package patterns allowed for deserialization.`*` means deserialize all. + +* `JsonDeserializer.TYPE_MAPPINGS` (default `empty`): See [Mapping Types](#serdes-mapping-types). + +* `JsonDeserializer.KEY_TYPE_METHOD` (default `empty`): See [Using Methods to Determine Types](#serdes-type-methods). + +* `JsonDeserializer.VALUE_TYPE_METHOD` (default `empty`): See [Using Methods to Determine Types](#serdes-type-methods). + +Starting with version 2.2, the type information headers (if added by the serializer) are removed by the deserializer. +You can revert to the previous behavior by setting the `removeTypeHeaders` property to `false`, either directly on the deserializer or with the configuration property described earlier. + +See also [[tip-json]](#tip-json). + +| |Starting with version 2.8, if you construct the serializer or deserializer programmatically as shown in [Programmatic Construction](#prog-json), the above properties will be applied by the factories, as long as you have not set any properties explicitly (using `set*()` methods or using the fluent API).
Previously, when creating programmatically, the configuration properties were never applied; this is still the case if you explicitly set properties on the object directly.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +###### Mapping Types + +Starting with version 2.2, when using JSON, you can now provide type mappings by using the properties in the preceding list. +Previously, you had to customize the type mapper within the serializer and deserializer. +Mappings consist of a comma-delimited list of `token:className` pairs. +On outbound, the payload’s class name is mapped to the corresponding token. +On inbound, the token in the type header is mapped to the corresponding class name. + +The following example creates a set of mappings: + +``` +senderProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, JsonSerializer.class); +senderProps.put(JsonSerializer.TYPE_MAPPINGS, "cat:com.mycat.Cat, hat:com.myhat.hat"); +... +consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, JsonDeserializer.class); +consumerProps.put(JsonDeSerializer.TYPE_MAPPINGS, "cat:com.yourcat.Cat, hat:com.yourhat.hat"); +``` + +| |The corresponding objects must be compatible.| +|---|---------------------------------------------| + +If you use [Spring Boot](https://docs.spring.io/spring-boot/docs/current/reference/html/boot-features-messaging.html#boot-features-kafka), you can provide these properties in the `application.properties` (or yaml) file. +The following example shows how to do so: + +``` +spring.kafka.producer.value-serializer=org.springframework.kafka.support.serializer.JsonSerializer +spring.kafka.producer.properties.spring.json.type.mapping=cat:com.mycat.Cat,hat:com.myhat.Hat +``` + +| |You can perform only simple configuration with properties.
For more advanced configuration (such as using a custom `ObjectMapper` in the serializer and deserializer), you should use the producer and consumer factory constructors that accept a pre-built serializer and deserializer.
The following Spring Boot example overrides the default factories:

```
@Bean
public ConsumerFactory kafkaConsumerFactory(JsonDeserializer customValueDeserializer) {
Map properties = new HashMap<>();
// properties.put(..., ...)
// ...
return new DefaultKafkaConsumerFactory<>(properties,
new StringDeserializer(), customValueDeserializer);
}

@Bean
public ProducerFactory kafkaProducerFactory(JsonSerializer customValueSerializer) {

return new DefaultKafkaProducerFactory<>(properties.buildProducerProperties(),
new StringSerializer(), customValueSerializer);
}
```

Setters are also provided, as an alternative to using these constructors.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Starting with version 2.2, you can explicitly configure the deserializer to use the supplied target type and ignore type information in headers by using one of the overloaded constructors that have a boolean `useHeadersIfPresent` (which is `true` by default). +The following example shows how to do so: + +``` +DefaultKafkaConsumerFactory cf = new DefaultKafkaConsumerFactory<>(props, + new IntegerDeserializer(), new JsonDeserializer<>(Cat1.class, false)); +``` + +###### Using Methods to Determine Types + +Starting with version 2.5, you can now configure the deserializer, via properties, to invoke a method to determine the target type. +If present, this will override any of the other techniques discussed above. +This can be useful if the data is published by an application that does not use the Spring serializer and you need to deserialize to different types depending on the data, or other headers. +Set these properties to the method name - a fully qualified class name followed by the method name, separated by a period `.`. +The method must be declared as `public static`, have one of three signatures `(String topic, byte[] data, Headers headers)`, `(byte[] data, Headers headers)` or `(byte[] data)` and return a Jackson `JavaType`. + +* `JsonDeserializer.KEY_TYPE_METHOD` : `spring.json.key.type.method` + +* `JsonDeserializer.VALUE_TYPE_METHOD` : `spring.json.value.type.method` + +You can use arbitrary headers or inspect the data to determine the type. + +Example + +``` +JavaType thing1Type = TypeFactory.defaultInstance().constructType(Thing1.class); + +JavaType thing2Type = TypeFactory.defaultInstance().constructType(Thing2.class); + +public static JavaType thingOneOrThingTwo(byte[] data, Headers headers) { + // {"thisIsAFieldInThing1":"value", ... + if (data[21] == '1') { + return thing1Type; + } + else { + return thing2Type; + } +} +``` + +For more sophisticated data inspection consider using `JsonPath` or similar but, the simpler the test to determine the type, the more efficient the process will be. + +The following is an example of creating the deserializer programmatically (when providing the consumer factory with the deserializer in the constructor): + +``` +JsonDeserializer deser = new JsonDeserializer<>() + .trustedPackages("*") + .typeResolver(SomeClass::thing1Thing2JavaTypeForTopic); + +... + +public static JavaType thing1Thing2JavaTypeForTopic(String topic, byte[] data, Headers headers) { + ... +} +``` + +###### Programmatic Construction + +When constructing the serializer/deserializer programmatically for use in the producer/consumer factory, since version 2.3, you can use the fluent API, which simplifies configuration. + +``` +@Bean +public ProducerFactory pf() { + Map props = new HashMap<>(); + // props.put(..., ...) + // ... + DefaultKafkaProducerFactory pf = new DefaultKafkaProducerFactory<>(props, + new JsonSerializer() + .forKeys() + .noTypeInfo(), + new JsonSerializer() + .noTypeInfo()); + return pf; +} + +@Bean +public ConsumerFactory cf() { + Map props = new HashMap<>(); + // props.put(..., ...) + // ... + DefaultKafkaConsumerFactory cf = new DefaultKafkaConsumerFactory<>(props, + new JsonDeserializer<>(MyKeyType.class) + .forKeys() + .ignoreTypeHeaders(), + new JsonDeserializer<>(MyValueType.class) + .ignoreTypeHeaders()); + return cf; +} +``` + +To provide type mapping programmatically, similar to [Using Methods to Determine Types](#serdes-type-methods), use the `typeFunction` property. + +Example + +``` +JsonDeserializer deser = new JsonDeserializer<>() + .trustedPackages("*") + .typeFunction(MyUtils::thingOneOrThingTwo); +``` + +Alternatively, as long as you don’t use the fluent API to configure properties, or set them using `set*()` methods, the factories will configure the serializer/deserializer using the configuration properties; see [Configuration Properties](#serdes-json-config). + +##### Delegating Serializer and Deserializer + +###### Using Headers + +Version 2.3 introduced the `DelegatingSerializer` and `DelegatingDeserializer`, which allow producing and consuming records with different key and/or value types. +Producers must set a header `DelegatingSerializer.VALUE_SERIALIZATION_SELECTOR` to a selector value that is used to select which serializer to use for the value and `DelegatingSerializer.KEY_SERIALIZATION_SELECTOR` for the key; if a match is not found, an `IllegalStateException` is thrown. + +For incoming records, the deserializer uses the same headers to select the deserializer to use; if a match is not found or the header is not present, the raw `byte[]` is returned. + +You can configure the map of selector to `Serializer` / `Deserializer` via a constructor, or you can configure it via Kafka producer/consumer properties with the keys `DelegatingSerializer.VALUE_SERIALIZATION_SELECTOR_CONFIG` and `DelegatingSerializer.KEY_SERIALIZATION_SELECTOR_CONFIG`. +For the serializer, the producer property can be a `Map` where the key is the selector and the value is a `Serializer` instance, a serializer `Class` or the class name. +The property can also be a String of comma-delimited map entries, as shown below. + +For the deserializer, the consumer property can be a `Map` where the key is the selector and the value is a `Deserializer` instance, a deserializer `Class` or the class name. +The property can also be a String of comma-delimited map entries, as shown below. + +To configure using properties, use the following syntax: + +``` +producerProps.put(DelegatingSerializer.VALUE_SERIALIZATION_SELECTOR_CONFIG, + "thing1:com.example.MyThing1Serializer, thing2:com.example.MyThing2Serializer") + +consumerProps.put(DelegatingDeserializer.VALUE_SERIALIZATION_SELECTOR_CONFIG, + "thing1:com.example.MyThing1Deserializer, thing2:com.example.MyThing2Deserializer") +``` + +Producers would then set the `DelegatingSerializer.VALUE_SERIALIZATION_SELECTOR` header to `thing1` or `thing2`. + +This technique supports sending different types to the same topic (or different topics). + +| |Starting with version 2.5.1, it is not necessary to set the selector header, if the type (key or value) is one of the standard types supported by `Serdes` (`Long`, `Integer`, etc).
Instead, the serializer will set the header to the class name of the type.
It is not necessary to configure serializers or deserializers for these types, they will be created (once) dynamically.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +For another technique to send different types to different topics, see [Using `RoutingKafkaTemplate`](#routing-template). + +###### By Type + +Version 2.8 introduced the `DelegatingByTypeSerializer`. + +``` +@Bean +public ProducerFactory producerFactory(Map config) { + return new DefaultKafkaProducerFactory<>(config, + null, new DelegatingByTypeSerializer(Map.of( + byte[].class, new ByteArraySerializer(), + Bytes.class, new BytesSerializer(), + String.class, new StringSerializer()))); +} +``` + +Starting with version 2.8.3, you can configure the serializer to check if the map key is assignable from the target object, useful when a delegate serializer can serialize sub classes. +In this case, if there are amiguous matches, an ordered `Map`, such as a `LinkedHashMap` should be provided. + +###### By Topic + +Starting with version 2.8, the `DelegatingByTopicSerializer` and `DelegatingByTopicDeserializer` allow selection of a serializer/deserializer based on the topic name. +Regex `Pattern` s are used to lookup the instance to use. +The map can be configured using a constructor, or via properties (a comma delimited list of `pattern:serializer`). + +``` +producerConfigs.put(DelegatingByTopicSerializer.VALUE_SERIALIZATION_TOPIC_CONFIG, + "topic[0-4]:" + ByteArraySerializer.class.getName() + + ", topic[5-9]:" + StringSerializer.class.getName()); +... +ConsumerConfigs.put(DelegatingByTopicDeserializer.VALUE_SERIALIZATION_TOPIC_CONFIG, + "topic[0-4]:" + ByteArrayDeserializer.class.getName() + + ", topic[5-9]:" + StringDeserializer.class.getName()); +``` + +Use `KEY_SERIALIZATION_TOPIC_CONFIG` when using this for keys. + +``` +@Bean +public ProducerFactory producerFactory(Map config) { + return new DefaultKafkaProducerFactory<>(config, + null, + new DelegatingByTopicSerializer(Map.of( + Pattern.compile("topic[0-4]"), new ByteArraySerializer(), + Pattern.compile("topic[5-9]"), new StringSerializer())), + new JsonSerializer()); // default +} +``` + +You can specify a default serializer/deserializer to use when there is no pattern match using `DelegatingByTopicSerialization.KEY_SERIALIZATION_TOPIC_DEFAULT` and `DelegatingByTopicSerialization.VALUE_SERIALIZATION_TOPIC_DEFAULT`. + +An additional property `DelegatingByTopicSerialization.CASE_SENSITIVE` (default `true`), when set to `false` makes the topic lookup case insensitive. + +##### Retrying Deserializer + +The `RetryingDeserializer` uses a delegate `Deserializer` and `RetryTemplate` to retry deserialization when the delegate might have transient errors, such a network issues, during deserialization. + +``` +ConsumerFactory cf = new DefaultKafkaConsumerFactory(myConsumerConfigs, + new RetryingDeserializer(myUnreliableKeyDeserializer, retryTemplate), + new RetryingDeserializer(myUnreliableValueDeserializer, retryTemplate)); +``` + +Refer to the [spring-retry](https://github.com/spring-projects/spring-retry) project for configuration of the `RetryTemplate` with a retry policy, back off policy, etc. + +##### Spring Messaging Message Conversion + +Although the `Serializer` and `Deserializer` API is quite simple and flexible from the low-level Kafka `Consumer` and `Producer` perspective, you might need more flexibility at the Spring Messaging level, when using either `@KafkaListener` or [Spring Integration’s Apache Kafka Support](https://docs.spring.io/spring-integration/docs/current/reference/html/kafka.html#kafka). +To let you easily convert to and from `org.springframework.messaging.Message`, Spring for Apache Kafka provides a `MessageConverter` abstraction with the `MessagingMessageConverter` implementation and its `JsonMessageConverter` (and subclasses) customization. +You can inject the `MessageConverter` into a `KafkaTemplate` instance directly and by using `AbstractKafkaListenerContainerFactory` bean definition for the `@KafkaListener.containerFactory()` property. +The following example shows how to do so: + +``` +@Bean +public KafkaListenerContainerFactory kafkaJsonListenerContainerFactory() { + ConcurrentKafkaListenerContainerFactory factory = + new ConcurrentKafkaListenerContainerFactory<>(); + factory.setConsumerFactory(consumerFactory()); + factory.setMessageConverter(new JsonMessageConverter()); + return factory; +} +... +@KafkaListener(topics = "jsonData", + containerFactory = "kafkaJsonListenerContainerFactory") +public void jsonListener(Cat cat) { +... +} +``` + +When using Spring Boot, simply define the converter as a `@Bean` and Spring Boot auto configuration will wire it into the auto-configured template and container factory. + +When you use a `@KafkaListener`, the parameter type is provided to the message converter to assist with the conversion. + +| |This type inference can be achieved only when the `@KafkaListener` annotation is declared at the method level.
With a class-level `@KafkaListener`, the payload type is used to select which `@KafkaHandler` method to invoke, so it must already have been converted before the method can be chosen.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |On the consumer side, you can configure a `JsonMessageConverter`; it can handle `ConsumerRecord` values of type `byte[]`, `Bytes` and `String` so should be used in conjunction with a `ByteArrayDeserializer`, `BytesDeserializer` or `StringDeserializer`.
(`byte[]` and `Bytes` are more efficient because they avoid an unnecessary `byte[]` to `String` conversion).
You can also configure the specific subclass of `JsonMessageConverter` corresponding to the deserializer, if you so wish.

On the producer side, when you use Spring Integration or the `KafkaTemplate.send(Message message)` method (see [Using `KafkaTemplate`](#kafka-template)), you must configure a message converter that is compatible with the configured Kafka `Serializer`.

* `StringJsonMessageConverter` with `StringSerializer`

* `BytesJsonMessageConverter` with `BytesSerializer`

* `ByteArrayJsonMessageConverter` with `ByteArraySerializer`

Again, using `byte[]` or `Bytes` is more efficient because they avoid a `String` to `byte[]` conversion.

For convenience, starting with version 2.3, the framework also provides a `StringOrBytesSerializer` which can serialize all three value types so it can be used with any of the message converters.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Starting with version 2.7.1, message payload conversion can be delegated to a `spring-messaging` `SmartMessageConverter`; this enables conversion, for example, to be based on the `MessageHeaders.CONTENT_TYPE` header. + +| |The `KafkaMessageConverter.fromMessage()` method is called for outbound conversion to a `ProducerRecord` with the message payload in the `ProducerRecord.value()` property.
The `KafkaMessageConverter.toMessage()` method is called for inbound conversion from `ConsumerRecord` with the payload being the `ConsumerRecord.value()` property.
The `SmartMessageConverter.toMessage()` method is called to create a new outbound `Message` from the `Message` passed to`fromMessage()` (usually by `KafkaTemplate.send(Message msg)`).
Similarly, in the `KafkaMessageConverter.toMessage()` method, after the converter has created a new `Message` from the `ConsumerRecord`, the `SmartMessageConverter.fromMessage()` method is called and then the final inbound message is created with the newly converted payload.
In either case, if the `SmartMessageConverter` returns `null`, the original message is used.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +When the default converter is used in the `KafkaTemplate` and listener container factory, you configure the `SmartMessageConverter` by calling `setMessagingConverter()` on the template and via the `contentMessageConverter` property on `@KafkaListener` methods. + +Examples: + +``` +template.setMessagingConverter(mySmartConverter); +``` + +``` +@KafkaListener(id = "withSmartConverter", topics = "someTopic", + contentTypeConverter = "mySmartConverter") +public void smart(Thing thing) { + ... +} +``` + +###### Using Spring Data Projection Interfaces + +Starting with version 2.1.1, you can convert JSON to a Spring Data Projection interface instead of a concrete type. +This allows very selective, and low-coupled bindings to data, including the lookup of values from multiple places inside the JSON document. +For example the following interface can be defined as message payload type: + +``` +interface SomeSample { + + @JsonPath({ "$.username", "$.user.name" }) + String getUsername(); + +} +``` + +``` +@KafkaListener(id="projection.listener", topics = "projection") +public void projection(SomeSample in) { + String username = in.getUsername(); + ... +} +``` + +Accessor methods will be used to lookup the property name as field in the received JSON document by default. +The `@JsonPath` expression allows customization of the value lookup, and even to define multiple JSON Path expressions, to lookup values from multiple places until an expression returns an actual value. + +To enable this feature, use a `ProjectingMessageConverter` configured with an appropriate delegate converter (used for outbound conversion and converting non-projection interfaces). +You must also add `spring-data:spring-data-commons` and `com.jayway.jsonpath:json-path` to the class path. + +When used as the parameter to a `@KafkaListener` method, the interface type is automatically passed to the converter as normal. + +##### Using `ErrorHandlingDeserializer` + +When a deserializer fails to deserialize a message, Spring has no way to handle the problem, because it occurs before the `poll()` returns. +To solve this problem, the `ErrorHandlingDeserializer` has been introduced. +This deserializer delegates to a real deserializer (key or value). +If the delegate fails to deserialize the record content, the `ErrorHandlingDeserializer` returns a `null` value and a `DeserializationException` in a header that contains the cause and the raw bytes. +When you use a record-level `MessageListener`, if the `ConsumerRecord` contains a `DeserializationException` header for either the key or value, the container’s `ErrorHandler` is called with the failed `ConsumerRecord`. +The record is not passed to the listener. + +Alternatively, you can configure the `ErrorHandlingDeserializer` to create a custom value by providing a `failedDeserializationFunction`, which is a `Function`. +This function is invoked to create an instance of `T`, which is passed to the listener in the usual fashion. +An object of type `FailedDeserializationInfo`, which contains all the contextual information is provided to the function. +You can find the `DeserializationException` (as a serialized Java object) in headers. +See the [Javadoc](https://docs.spring.io/spring-kafka/api/org/springframework/kafka/support/serializer/ErrorHandlingDeserializer.html) for the `ErrorHandlingDeserializer` for more information. + +You can use the `DefaultKafkaConsumerFactory` constructor that takes key and value `Deserializer` objects and wire in appropriate `ErrorHandlingDeserializer` instances that you have configured with the proper delegates. +Alternatively, you can use consumer configuration properties (which are used by the `ErrorHandlingDeserializer`) to instantiate the delegates. +The property names are `ErrorHandlingDeserializer.KEY_DESERIALIZER_CLASS` and `ErrorHandlingDeserializer.VALUE_DESERIALIZER_CLASS`. +The property value can be a class or class name. +The following example shows how to set these properties: + +``` +... // other props +props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ErrorHandlingDeserializer.class); +props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ErrorHandlingDeserializer.class); +props.put(ErrorHandlingDeserializer.KEY_DESERIALIZER_CLASS, JsonDeserializer.class); +props.put(JsonDeserializer.KEY_DEFAULT_TYPE, "com.example.MyKey") +props.put(ErrorHandlingDeserializer.VALUE_DESERIALIZER_CLASS, JsonDeserializer.class.getName()); +props.put(JsonDeserializer.VALUE_DEFAULT_TYPE, "com.example.MyValue") +props.put(JsonDeserializer.TRUSTED_PACKAGES, "com.example") +return new DefaultKafkaConsumerFactory<>(props); +``` + +The following example uses a `failedDeserializationFunction`. + +``` +public class BadFoo extends Foo { + + private final FailedDeserializationInfo failedDeserializationInfo; + + public BadFoo(FailedDeserializationInfo failedDeserializationInfo) { + this.failedDeserializationInfo = failedDeserializationInfo; + } + + public FailedDeserializationInfo getFailedDeserializationInfo() { + return this.failedDeserializationInfo; + } + +} + +public class FailedFooProvider implements Function { + + @Override + public Foo apply(FailedDeserializationInfo info) { + return new BadFoo(info); + } + +} +``` + +The preceding example uses the following configuration: + +``` +... +consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ErrorHandlingDeserializer.class); +consumerProps.put(ErrorHandlingDeserializer.VALUE_DESERIALIZER_CLASS, JsonDeserializer.class); +consumerProps.put(ErrorHandlingDeserializer.VALUE_FUNCTION, FailedFooProvider.class); +... +``` + +| |If the consumer is configured with an `ErrorHandlingDeserializer` it is important to configure the `KafkaTemplate` and its producer with a serializer that can handle normal objects as well as raw `byte[]` values, which result from deserialization exceptions.
The generic value type of the template should be `Object`.
One technique is to use the `DelegatingByTypeSerializer`; an example follows:| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +``` +@Bean +public ProducerFactory producerFactory() { + return new DefaultKafkaProducerFactory<>(producerConfiguration(), new StringSerializer(), + new DelegatingByTypeSerializer(Map.of(byte[].class, new ByteArraySerializer(), + MyNormalObject.class, new JsonSerializer()))); +} + +@Bean +public KafkaTemplate kafkaTemplate() { + return new KafkaTemplate<>(producerFactory()); +} +``` + +When using an `ErrorHandlingDeserializer` with a batch listener, you must check for the deserialization exceptions in message headers. +When used with a `DefaultBatchErrorHandler`, you can use that header to determine which record the exception failed on and communicate to the error handler via a `BatchListenerFailedException`. + +``` +@KafkaListener(id = "test", topics = "test") +void listen(List in, @Header(KafkaHeaders.BATCH_CONVERTED_HEADERS) List> headers) { + for (int i = 0; i < in.size(); i++) { + Thing thing = in.get(i); + if (thing == null + && headers.get(i).get(SerializationUtils.VALUE_DESERIALIZER_EXCEPTION_HEADER) != null) { + DeserializationException deserEx = ListenerUtils.byteArrayToDeserializationException(this.logger, + (byte[]) headers.get(i).get(SerializationUtils.VALUE_DESERIALIZER_EXCEPTION_HEADER)); + if (deserEx != null) { + logger.error(deserEx, "Record at index " + i + " could not be deserialized"); + } + throw new BatchListenerFailedException("Deserialization", deserEx, i); + } + process(thing); + } +} +``` + +`ListenerUtils.byteArrayToDeserializationException()` can be used to convert the header to a `DeserializationException`. + +When consuming `List`, `ListenerUtils.getExceptionFromHeader()` is used instead: + +``` +@KafkaListener(id = "kgh2036", topics = "kgh2036") +void listen(List> in) { + for (int i = 0; i < in.size(); i++) { + ConsumerRecord rec = in.get(i); + if (rec.value() == null) { + DeserializationException deserEx = ListenerUtils.getExceptionFromHeader(rec, + SerializationUtils.VALUE_DESERIALIZER_EXCEPTION_HEADER, this.logger); + if (deserEx != null) { + logger.error(deserEx, "Record at offset " + rec.offset() + " could not be deserialized"); + throw new BatchListenerFailedException("Deserialization", deserEx, i); + } + } + process(rec.value()); + } +} +``` + +##### Payload Conversion with Batch Listeners + +You can also use a `JsonMessageConverter` within a `BatchMessagingMessageConverter` to convert batch messages when you use a batch listener container factory. +See [Serialization, Deserialization, and Message Conversion](#serdes) and [Spring Messaging Message Conversion](#messaging-message-conversion) for more information. + +By default, the type for the conversion is inferred from the listener argument. +If you configure the `JsonMessageConverter` with a `DefaultJackson2TypeMapper` that has its `TypePrecedence` set to `TYPE_ID` (instead of the default `INFERRED`), the converter uses the type information in headers (if present) instead. +This allows, for example, listener methods to be declared with interfaces instead of concrete classes. +Also, the type converter supports mapping, so the deserialization can be to a different type than the source (as long as the data is compatible). +This is also useful when you use [class-level `@KafkaListener` instances](#class-level-kafkalistener) where the payload must have already been converted to determine which method to invoke. +The following example creates beans that use this method: + +``` +@Bean +public KafkaListenerContainerFactory kafkaListenerContainerFactory() { + ConcurrentKafkaListenerContainerFactory factory = + new ConcurrentKafkaListenerContainerFactory<>(); + factory.setConsumerFactory(consumerFactory()); + factory.setBatchListener(true); + factory.setMessageConverter(new BatchMessagingMessageConverter(converter())); + return factory; +} + +@Bean +public JsonMessageConverter converter() { + return new JsonMessageConverter(); +} +``` + +Note that, for this to work, the method signature for the conversion target must be a container object with a single generic parameter type, such as the following: + +``` +@KafkaListener(topics = "blc1") +public void listen(List foos, @Header(KafkaHeaders.OFFSET) List offsets) { + ... +} +``` + +Note that you can still access the batch headers. + +If the batch converter has a record converter that supports it, you can also receive a list of messages where the payloads are converted according to the generic type. +The following example shows how to do so: + +``` +@KafkaListener(topics = "blc3", groupId = "blc3") +public void listen1(List> fooMessages) { + ... +} +``` + +##### `ConversionService` Customization + +Starting with version 2.1.1, the `org.springframework.core.convert.ConversionService` used by the default `o.s.messaging.handler.annotation.support.MessageHandlerMethodFactory` to resolve parameters for the invocation of a listener method is supplied with all beans that implement any of the following interfaces: + +* `org.springframework.core.convert.converter.Converter` + +* `org.springframework.core.convert.converter.GenericConverter` + +* `org.springframework.format.Formatter` + +This lets you further customize listener deserialization without changing the default configuration for `ConsumerFactory` and `KafkaListenerContainerFactory`. + +| |Setting a custom `MessageHandlerMethodFactory` on the `KafkaListenerEndpointRegistrar` through a `KafkaListenerConfigurer` bean disables this feature.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Adding custom `HandlerMethodArgumentResolver` to `@KafkaListener` + +Starting with version 2.4.2 you are able to add your own `HandlerMethodArgumentResolver` and resolve custom method parameters. +All you need is to implement `KafkaListenerConfigurer` and use method `setCustomMethodArgumentResolvers()` from class `KafkaListenerEndpointRegistrar`. + +``` +@Configuration +class CustomKafkaConfig implements KafkaListenerConfigurer { + + @Override + public void configureKafkaListeners(KafkaListenerEndpointRegistrar registrar) { + registrar.setCustomMethodArgumentResolvers( + new HandlerMethodArgumentResolver() { + + @Override + public boolean supportsParameter(MethodParameter parameter) { + return CustomMethodArgument.class.isAssignableFrom(parameter.getParameterType()); + } + + @Override + public Object resolveArgument(MethodParameter parameter, Message message) { + return new CustomMethodArgument( + message.getHeaders().get(KafkaHeaders.RECEIVED_TOPIC, String.class) + ); + } + } + ); + } + +} +``` + +You can also completely replace the framework’s argument resolution by adding a custom `MessageHandlerMethodFactory` to the `KafkaListenerEndpointRegistrar` bean. +If you do this, and your application needs to handle tombstone records, with a `null` `value()` (e.g. from a compacted topic), you should add a `KafkaNullAwarePayloadArgumentResolver` to the factory; it must be the last resolver because it supports all types and can match arguments without a `@Payload` annotation. +If you are using a `DefaultMessageHandlerMethodFactory`, set this resolver as the last custom resolver; the factory will ensure that this resolver will be used before the standard `PayloadMethodArgumentResolver`, which has no knowledge of `KafkaNull` payloads. + +See also [Null Payloads and Log Compaction of 'Tombstone' Records](#tombstones). + +#### 4.1.18. Message Headers + +The 0.11.0.0 client introduced support for headers in messages. +As of version 2.0, Spring for Apache Kafka now supports mapping these headers to and from `spring-messaging` `MessageHeaders`. + +| |Previous versions mapped `ConsumerRecord` and `ProducerRecord` to spring-messaging `Message`, where the value property is mapped to and from the `payload` and other properties (`topic`, `partition`, and so on) were mapped to headers.
This is still the case, but additional (arbitrary) headers can now be mapped.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Apache Kafka headers have a simple API, shown in the following interface definition: + +``` +public interface Header { + + String key(); + + byte[] value(); + +} +``` + +The `KafkaHeaderMapper` strategy is provided to map header entries between Kafka `Headers` and `MessageHeaders`. +Its interface definition is as follows: + +``` +public interface KafkaHeaderMapper { + + void fromHeaders(MessageHeaders headers, Headers target); + + void toHeaders(Headers source, Map target); + +} +``` + +The `DefaultKafkaHeaderMapper` maps the key to the `MessageHeaders` header name and, in order to support rich header types for outbound messages, JSON conversion is performed. +A “special” header (with a key of `spring_json_header_types`) contains a JSON map of `:`. +This header is used on the inbound side to provide appropriate conversion of each header value to the original type. + +On the inbound side, all Kafka `Header` instances are mapped to `MessageHeaders`. +On the outbound side, by default, all `MessageHeaders` are mapped, except `id`, `timestamp`, and the headers that map to `ConsumerRecord` properties. + +You can specify which headers are to be mapped for outbound messages, by providing patterns to the mapper. +The following listing shows a number of example mappings: + +``` +public DefaultKafkaHeaderMapper() { (1) + ... +} + +public DefaultKafkaHeaderMapper(ObjectMapper objectMapper) { (2) + ... +} + +public DefaultKafkaHeaderMapper(String... patterns) { (3) + ... +} + +public DefaultKafkaHeaderMapper(ObjectMapper objectMapper, String... patterns) { (4) + ... +} +``` + +|**1**| Uses a default Jackson `ObjectMapper` and maps most headers, as discussed before the example. | +|-----|------------------------------------------------------------------------------------------------| +|**2**|Uses the provided Jackson `ObjectMapper` and maps most headers, as discussed before the example.| +|**3**| Uses a default Jackson `ObjectMapper` and maps headers according to the provided patterns. | +|**4**| Uses the provided Jackson `ObjectMapper` and maps headers according to the provided patterns. | + +Patterns are rather simple and can contain a leading wildcard (``**), a trailing wildcard, or both (for example, ``**`.cat.*`). +You can negate patterns with a leading `!`. +The first pattern that matches a header name (whether positive or negative) wins. + +When you provide your own patterns, we recommend including `!id` and `!timestamp`, since these headers are read-only on the inbound side. + +| |By default, the mapper deserializes only classes in `java.lang` and `java.util`.
You can trust other (or all) packages by adding trusted packages with the `addTrustedPackages` method.
If you receive messages from untrusted sources, you may wish to add only those packages you trust.
To trust all packages, you can use `mapper.addTrustedPackages("*")`.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Mapping `String` header values in a raw form is useful when communicating with systems that are not aware of the mapper’s JSON format.| +|---|--------------------------------------------------------------------------------------------------------------------------------------| + +Starting with version 2.2.5, you can specify that certain string-valued headers should not be mapped using JSON, but to/from a raw `byte[]`. +The `AbstractKafkaHeaderMapper` has new properties; `mapAllStringsOut` when set to true, all string-valued headers will be converted to `byte[]` using the `charset` property (default `UTF-8`). +In addition, there is a property `rawMappedHeaders`, which is a map of `header name : boolean`; if the map contains a header name, and the header contains a `String` value, it will be mapped as a raw `byte[]` using the charset. +This map is also used to map raw incoming `byte[]` headers to `String` using the charset if, and only if, the boolean in the map value is `true`. +If the boolean is `false`, or the header name is not in the map with a `true` value, the incoming header is simply mapped as the raw unmapped header. + +The following test case illustrates this mechanism. + +``` +@Test +public void testSpecificStringConvert() { + DefaultKafkaHeaderMapper mapper = new DefaultKafkaHeaderMapper(); + Map rawMappedHeaders = new HashMap<>(); + rawMappedHeaders.put("thisOnesAString", true); + rawMappedHeaders.put("thisOnesBytes", false); + mapper.setRawMappedHeaders(rawMappedHeaders); + Map headersMap = new HashMap<>(); + headersMap.put("thisOnesAString", "thing1"); + headersMap.put("thisOnesBytes", "thing2"); + headersMap.put("alwaysRaw", "thing3".getBytes()); + MessageHeaders headers = new MessageHeaders(headersMap); + Headers target = new RecordHeaders(); + mapper.fromHeaders(headers, target); + assertThat(target).containsExactlyInAnyOrder( + new RecordHeader("thisOnesAString", "thing1".getBytes()), + new RecordHeader("thisOnesBytes", "thing2".getBytes()), + new RecordHeader("alwaysRaw", "thing3".getBytes())); + headersMap.clear(); + mapper.toHeaders(target, headersMap); + assertThat(headersMap).contains( + entry("thisOnesAString", "thing1"), + entry("thisOnesBytes", "thing2".getBytes()), + entry("alwaysRaw", "thing3".getBytes())); +} +``` + +By default, the `DefaultKafkaHeaderMapper` is used in the `MessagingMessageConverter` and `BatchMessagingMessageConverter`, as long as Jackson is on the class path. + +With the batch converter, the converted headers are available in the `KafkaHeaders.BATCH_CONVERTED_HEADERS` as a `List>` where the map in a position of the list corresponds to the data position in the payload. + +If there is no converter (either because Jackson is not present or it is explicitly set to `null`), the headers from the consumer record are provided unconverted in the `KafkaHeaders.NATIVE_HEADERS` header. +This header is a `Headers` object (or a `List` in the case of the batch converter), where the position in the list corresponds to the data position in the payload). + +| |Certain types are not suitable for JSON serialization, and a simple `toString()` serialization might be preferred for these types.
The `DefaultKafkaHeaderMapper` has a method called `addToStringClasses()` that lets you supply the names of classes that should be treated this way for outbound mapping.
During inbound mapping, they are mapped as `String`.
By default, only `org.springframework.util.MimeType` and `org.springframework.http.MediaType` are mapped this way.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Starting with version 2.3, handling of String-valued headers is simplified.
Such headers are no longer JSON encoded, by default (i.e. they do not have enclosing `"…​"` added).
The type is still added to the JSON\_TYPES header so the receiving system can convert back to a String (from `byte[]`).
The mapper can handle (decode) headers produced by older versions (it checks for a leading `"`); in this way an application using 2.3 can consume records from older versions.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |To be compatible with earlier versions, set `encodeStrings` to `true`, if records produced by a version using 2.3 might be consumed by applications using earlier versions.
When all applications are using 2.3 or higher, you can leave the property at its default value of `false`.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +``` +@Bean +MessagingMessageConverter converter() { + MessagingMessageConverter converter = new MessagingMessageConverter(); + DefaultKafkaHeaderMapper mapper = new DefaultKafkaHeaderMapper(); + mapper.setEncodeStrings(true); + converter.setHeaderMapper(mapper); + return converter; +} +``` + +If using Spring Boot, it will auto configure this converter bean into the auto-configured `KafkaTemplate`; otherwise you should add this converter to the template. + +#### 4.1.19. Null Payloads and Log Compaction of 'Tombstone' Records + +When you use [Log Compaction](https://kafka.apache.org/documentation/#compaction), you can send and receive messages with `null` payloads to identify the deletion of a key. + +You can also receive `null` values for other reasons, such as a `Deserializer` that might return `null` when it cannot deserialize a value. + +To send a `null` payload by using the `KafkaTemplate`, you can pass null into the value argument of the `send()` methods. +One exception to this is the `send(Message message)` variant. +Since `spring-messaging` `Message` cannot have a `null` payload, you can use a special payload type called `KafkaNull`, and the framework sends `null`. +For convenience, the static `KafkaNull.INSTANCE` is provided. + +When you use a message listener container, the received `ConsumerRecord` has a `null` `value()`. + +To configure the `@KafkaListener` to handle `null` payloads, you must use the `@Payload` annotation with `required = false`. +If it is a tombstone message for a compacted log, you usually also need the key so that your application can determine which key was “deleted”. +The following example shows such a configuration: + +``` +@KafkaListener(id = "deletableListener", topics = "myTopic") +public void listen(@Payload(required = false) String value, @Header(KafkaHeaders.RECEIVED_MESSAGE_KEY) String key) { + // value == null represents key deletion +} +``` + +When you use a class-level `@KafkaListener` with multiple `@KafkaHandler` methods, some additional configuration is needed. +Specifically, you need a `@KafkaHandler` method with a `KafkaNull` payload. +The following example shows how to configure one: + +``` +@KafkaListener(id = "multi", topics = "myTopic") +static class MultiListenerBean { + + @KafkaHandler + public void listen(String cat) { + ... + } + + @KafkaHandler + public void listen(Integer hat) { + ... + } + + @KafkaHandler + public void delete(@Payload(required = false) KafkaNull nul, @Header(KafkaHeaders.RECEIVED_MESSAGE_KEY) int key) { + ... + } + +} +``` + +Note that the argument is `null`, not `KafkaNull`. + +| |See [[tip-assign-all-parts]](#tip-assign-all-parts).| +|---|----------------------------------------------------| + +| |This feature requires the use of a `KafkaNullAwarePayloadArgumentResolver` which the framework will configure when using the default `MessageHandlerMethodFactory`.
When using a custom `MessageHandlerMethodFactory`, see [Adding custom `HandlerMethodArgumentResolver` to `@KafkaListener`](#custom-arg-resolve).| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 4.1.20. Handling Exceptions + +This section describes how to handle various exceptions that may arise when you use Spring for Apache Kafka. + +##### Listener Error Handlers + +Starting with version 2.0, the `@KafkaListener` annotation has a new attribute: `errorHandler`. + +You can use the `errorHandler` to provide the bean name of a `KafkaListenerErrorHandler` implementation. +This functional interface has one method, as the following listing shows: + +``` +@FunctionalInterface +public interface KafkaListenerErrorHandler { + + Object handleError(Message message, ListenerExecutionFailedException exception) throws Exception; + +} +``` + +You have access to the spring-messaging `Message` object produced by the message converter and the exception that was thrown by the listener, which is wrapped in a `ListenerExecutionFailedException`. +The error handler can throw the original or a new exception, which is thrown to the container. +Anything returned by the error handler is ignored. + +Starting with version 2.7, you can set the `rawRecordHeader` property on the `MessagingMessageConverter` and `BatchMessagingMessageConverter` which causes the raw `ConsumerRecord` to be added to the converted `Message` in the `KafkaHeaders.RAW_DATA` header. +This is useful, for example, if you wish to use a `DeadLetterPublishingRecoverer` in a listener error handler. +It might be used in a request/reply scenario where you wish to send a failure result to the sender, after some number of retries, after capturing the failed record in a dead letter topic. + +``` +@Bean +KafkaListenerErrorHandler eh(DeadLetterPublishingRecoverer recoverer) { + return (msg, ex) -> { + if (msg.getHeaders().get(KafkaHeaders.DELIVERY_ATTEMPT, Integer.class) > 9) { + recoverer.accept(msg.getHeaders().get(KafkaHeaders.RAW_DATA, ConsumerRecord.class), ex); + return "FAILED"; + } + throw ex; + }; +} +``` + +It has a sub-interface (`ConsumerAwareListenerErrorHandler`) that has access to the consumer object, through the following method: + +``` +Object handleError(Message message, ListenerExecutionFailedException exception, Consumer consumer); +``` + +If your error handler implements this interface, you can, for example, adjust the offsets accordingly. +For example, to reset the offset to replay the failed message, you could do something like the following: + +``` +@Bean +public ConsumerAwareListenerErrorHandler listen3ErrorHandler() { + return (m, e, c) -> { + this.listen3Exception = e; + MessageHeaders headers = m.getHeaders(); + c.seek(new org.apache.kafka.common.TopicPartition( + headers.get(KafkaHeaders.RECEIVED_TOPIC, String.class), + headers.get(KafkaHeaders.RECEIVED_PARTITION_ID, Integer.class)), + headers.get(KafkaHeaders.OFFSET, Long.class)); + return null; + }; +} +``` + +Similarly, you could do something like the following for a batch listener: + +``` +@Bean +public ConsumerAwareListenerErrorHandler listen10ErrorHandler() { + return (m, e, c) -> { + this.listen10Exception = e; + MessageHeaders headers = m.getHeaders(); + List topics = headers.get(KafkaHeaders.RECEIVED_TOPIC, List.class); + List partitions = headers.get(KafkaHeaders.RECEIVED_PARTITION_ID, List.class); + List offsets = headers.get(KafkaHeaders.OFFSET, List.class); + Map offsetsToReset = new HashMap<>(); + for (int i = 0; i < topics.size(); i++) { + int index = i; + offsetsToReset.compute(new TopicPartition(topics.get(i), partitions.get(i)), + (k, v) -> v == null ? offsets.get(index) : Math.min(v, offsets.get(index))); + } + offsetsToReset.forEach((k, v) -> c.seek(k, v)); + return null; + }; +} +``` + +This resets each topic/partition in the batch to the lowest offset in the batch. + +| |The preceding two examples are simplistic implementations, and you would probably want more checking in the error handler.| +|---|--------------------------------------------------------------------------------------------------------------------------| + +##### Container Error Handlers + +Starting with version 2.8, the legacy `ErrorHandler` and `BatchErrorHandler` interfaces have been superceded by a new `CommonErrorHandler`. +These error handlers can handle errors for both record and batch listeners, allowing a single listener container factory to create containers for both types of listener.`CommonErrorHandler` implementations to replace most legacy framework error handler implementations are provided and the legacy error handlers deprecated. +The legacy interfaces are still supported by listener containers and listener container factories; they will be deprecated in a future release. + +When transactions are being used, no error handlers are configured, by default, so that the exception will roll back the transaction. +Error handling for transactional containers are handled by the [`AfterRollbackProcessor`](#after-rollback). +If you provide a custom error handler when using transactions, it must throw an exception if you want the transaction rolled back. + +This interface has a default method `isAckAfterHandle()` which is called by the container to determine whether the offset(s) should be committed if the error handler returns without throwing an exception; it returns true by default. + +Typically, the error handlers provided by the framework will throw an exception when the error is not "handled" (e.g. after performing a seek operation). +By default, such exceptions are logged by the container at `ERROR` level. +All of the framework error handlers extend `KafkaExceptionLogLevelAware` which allows you to control the level at which these exceptions are logged. + +``` +/** + * Set the level at which the exception thrown by this handler is logged. + * @param logLevel the level (default ERROR). + */ +public void setLogLevel(KafkaException.Level logLevel) { + ... +} +``` + +You can specify a global error handler to be used for all listeners in the container factory. +The following example shows how to do so: + +``` +@Bean +public KafkaListenerContainerFactory> + kafkaListenerContainerFactory() { + ConcurrentKafkaListenerContainerFactory factory = + new ConcurrentKafkaListenerContainerFactory<>(); + ... + factory.setCommonErrorHandler(myErrorHandler); + ... + return factory; +} +``` + +By default, if an annotated listener method throws an exception, it is thrown to the container, and the message is handled according to the container configuration. + +The container commits any pending offset commits before calling the error handler. + +If you are using Spring Boot, you simply need to add the error handler as a `@Bean` and Boot will add it to the auto-configured factory. + +##### DefaultErrorHandler + +This new error handler replaces the `SeekToCurrentErrorHandler` and `RecoveringBatchErrorHandler`, which have been the default error handlers for several releases now. +One difference is that the fallback behavior for batch listeners (when an exception other than a `BatchListenerFailedException` is thrown) is the equivalent of the [Retrying Complete Batches](#retrying-batch-eh). + +The error handler can recover (skip) a record that keeps failing. +By default, after ten failures, the failed record is logged (at the `ERROR` level). +You can configure the handler with a custom recoverer (`BiConsumer`) and a `BackOff` that controls the delivery attempts and delays between each. +Using a `FixedBackOff` with `FixedBackOff.UNLIMITED_ATTEMPTS` causes (effectively) infinite retries. +The following example configures recovery after three tries: + +``` +DefaultErrorHandler errorHandler = + new DefaultErrorHandler((record, exception) -> { + // recover after 3 failures, with no back off - e.g. send to a dead-letter topic + }, new FixedBackOff(0L, 2L)); +``` + +To configure the listener container with a customized instance of this handler, add it to the container factory. + +For example, with the `@KafkaListener` container factory, you can add `DefaultErrorHandler` as follows: + +``` +@Bean +public ConcurrentKafkaListenerContainerFactory kafkaListenerContainerFactory() { + ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory(); + factory.setConsumerFactory(consumerFactory()); + factory.getContainerProperties().setAckOnError(false); + factory.getContainerProperties().setAckMode(AckMode.RECORD); + factory.setCommonErrorHandler(new DefaultErrorHandler(new FixedBackOff(1000L, 2L))); + return factory; +} +``` + +For a record listener, this will retry a delivery up to 2 times (3 delivery attempts) with a back off of 1 second, instead of the default configuration (`FixedBackOff(0L, 9)`). +Failures are simply logged after retries are exhausted. + +As an example; if the `poll` returns six records (two from each partition 0, 1, 2) and the listener throws an exception on the fourth record, the container acknowledges the first three messages by committing their offsets. +The `DefaultErrorHandler` seeks to offset 1 for partition 1 and offset 0 for partition 2. +The next `poll()` returns the three unprocessed records. + +If the `AckMode` was `BATCH`, the container commits the offsets for the first two partitions before calling the error handler. + +For a batch listener, the listener must throw a `BatchListenerFailedException` indicating which records in the batch failed. + +The sequence of events is: + +* Commit the offsets of the records before the index. + +* If retries are not exhausted, perform seeks so that all the remaining records (including the failed record) will be redelivered. + +* If retries are exhausted, attempt recovery of the failed record (default log only) and perform seeks so that the remaining records (excluding the failed record) will be redelivered. + The recovered record’s offset is committed + +* If retries are exhausted and recovery fails, seeks are performed as if retries are not exhausted. + +The default recoverer logs the failed record after retries are exhausted. +You can use a custom recoverer, or one provided by the framework such as the [`DeadLetterPublishingRecoverer`](#dead-letters). + +When using a POJO batch listener (e.g. `List`), and you don’t have the full consumer record to add to the exception, you can just add the index of the record that failed: + +``` +@KafkaListener(id = "recovering", topics = "someTopic") +public void listen(List things) { + for (int i = 0; i < records.size(); i++) { + try { + process(things.get(i)); + } + catch (Exception e) { + throw new BatchListenerFailedException("Failed to process", i); + } + } +} +``` + +When the container is configured with `AckMode.MANUAL_IMMEDIATE`, the error handler can be configured to commit the offset of recovered records; set the `commitRecovered` property to `true`. + +See also [Publishing Dead-letter Records](#dead-letters). + +When using transactions, similar functionality is provided by the `DefaultAfterRollbackProcessor`. +See [After-rollback Processor](#after-rollback). + +The `DefaultErrorHandler` considers certain exceptions to be fatal, and retries are skipped for such exceptions; the recoverer is invoked on the first failure. +The exceptions that are considered fatal, by default, are: + +* `DeserializationException` + +* `MessageConversionException` + +* `ConversionException` + +* `MethodArgumentResolutionException` + +* `NoSuchMethodException` + +* `ClassCastException` + +since these exceptions are unlikely to be resolved on a retried delivery. + +You can add more exception types to the not-retryable category, or completely replace the map of classified exceptions. +See the Javadocs for `DefaultErrorHandler.addNotRetryableException()` and `DefaultErrorHandler.setClassifications()` for more information, as well as those for the `spring-retry` `BinaryExceptionClassifier`. + +Here is an example that adds `IllegalArgumentException` to the not-retryable exceptions: + +``` +@Bean +public DefaultErrorHandler errorHandler(ConsumerRecordRecoverer recoverer) { + DefaultErrorHandler handler = new DefaultErrorHandler(recoverer); + handler.addNotRetryableExceptions(IllegalArgumentException.class); + return handler; +} +``` + +The error handler can be configured with one or more `RetryListener` s, receiving notifications of retry and recovery progress. + +``` +@FunctionalInterface +public interface RetryListener { + + void failedDelivery(ConsumerRecord record, Exception ex, int deliveryAttempt); + + default void recovered(ConsumerRecord record, Exception ex) { + } + + default void recoveryFailed(ConsumerRecord record, Exception original, Exception failure) { + } + +} +``` + +See the javadocs for more information. + +| |If the recoverer fails (throws an exception), the failed record will be included in the seeks.
If the recoverer fails, the `BackOff` will be reset by default and redeliveries will again go through the back offs before recovery is attempted again.
To skip retries after a recovery failure, set the error handler’s `resetStateOnRecoveryFailure` to `false`.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +You can provide the error handler with a `BiFunction, Exception, BackOff>` to determine the `BackOff` to use, based on the failed record and/or the exception: + +``` +handler.setBackOffFunction((record, ex) -> { ... }); +``` + +If the function returns `null`, the handler’s default `BackOff` will be used. + +Set `resetStateOnExceptionChange` to `true` and the retry sequence will be restarted (including the selection of a new `BackOff`, if so configured) if the exception type changes between failures. +By default, the exception type is not considered. + +Also see [Delivery Attempts Header](#delivery-header). + +#### 4.1.21. Conversion Errors with Batch Error Handlers + +Starting with version 2.8, batch listeners can now properly handle conversion errors, when using a `MessageConverter` with a `ByteArrayDeserializer`, a `BytesDeserializer` or a `StringDeserializer`, as well as a `DefaultErrorHandler`. +When a conversion error occurs, the payload is set to null and a deserialization exception is added to the record headers, similar to the `ErrorHandlingDeserializer`. +A list of `ConversionException` s is available in the listener so the listener can throw a `BatchListenerFailedException` indicating the first index at which a conversion exception occurred. + +Example: + +``` +@KafkaListener(id = "test", topics = "topic") +void listen(List in, @Header(KafkaHeaders.CONVERSION_FAILURES) List exceptions) { + for (int i = 0; i < in.size(); i++) { + Foo foo = in.get(i); + if (foo == null && exceptions.get(i) != null) { + throw new BatchListenerFailedException("Conversion error", exceptions.get(i), i); + } + process(foo); + } +} +``` + +##### Retrying Complete Batches + +This is now the fallback behavior of the `DefaultErrorHandler` for a batch listener where the listener throws an exception other than a `BatchListenerFailedException`. + +There is no guarantee that, when a batch is redelivered, the batch has the same number of records and/or the redelivered records are in the same order. +It is impossible, therefore, to easily maintain retry state for a batch. +The `FallbackBatchErrorHandler` takes a the following approach. +If a batch listener throws an exception that is not a `BatchListenerFailedException`, the retries are performed from the in-memory batch of records. +In order to avoid a rebalance during an extended retry sequence, the error handler pauses the consumer, polls it before sleeping for the back off, for each retry, and calls the listener again. +If/when retries are exhausted, the `ConsumerRecordRecoverer` is called for each record in the batch. +If the recoverer throws an exception, or the thread is interrupted during its sleep, the batch of records will be redelivered on the next poll. +Before exiting, regardless of the outcome, the consumer is resumed. + +| |This mechanism cannot be used with transactions.| +|---|------------------------------------------------| + +While waiting for a `BackOff` interval, the error handler will loop with a short sleep until the desired delay is reached, while checking to see if the container has been stopped, allowing the sleep to exit soon after the `stop()` rather than causing a delay. + +##### Container Stopping Error Handlers + +The `CommonContainerStoppingErrorHandler` stops the container if the listener throws an exception. +For record listeners, when the `AckMode` is `RECORD`, offsets for already processed records are committed. +For record listeners, when the `AckMode` is any manual value, offsets for already acknowledged records are committed. +For record listeners, wWhen the `AckMode` is `BATCH`, or for batch listeners, the entire batch is replayed when the container is restarted. + +After the container stops, an exception that wraps the `ListenerExecutionFailedException` is thrown. +This is to cause the transaction to roll back (if transactions are enabled). + +##### Delegating Error Handler + +The `CommonDelegatingErrorHandler` can delegate to different error handlers, depending on the exception type. +For example, you may wish to invoke a `DefaultErrorHandler` for most exceptions, or a `CommonContainerStoppingErrorHandler` for others. + +##### Logging Error Handler + +The `CommonLoggingErrorHandler` simply logs the exception; with a record listener, the remaining records from the previous poll are passed to the listener. +For a batch listener, all the records in the batch are logged. + +##### Using Different Common Error Handlers for Record and Batch Listeners + +If you wish to use a different error handling strategy for record and batch listeners, the `CommonMixedErrorHandler` is provided allowing the configuration of a specific error handler for each listener type. + +##### Common Error Handler Summery + +* `DefaultErrorHandler` + +* `CommonContainerStoppingErrorHandler` + +* `CommonDelegatingErrorHandler` + +* `CommonLoggingErrorHandler` + +* `CommonMixedErrorHandler` + +##### Legacy Error Handlers and Their Replacements + +| Legacy Error Handler | Replacement | +|----------------------------------------|-------------------------------------------------------------------------------------------------------------| +| `LoggingErrorHandler` | `CommonLoggingErrorHandler` | +| `BatchLoggingErrorHandler` | `CommonLoggingErrorHandler` | +| `ConditionalDelegatingErrorHandler` | `DelegatingErrorHandler` | +|`ConditionalDelegatingBatchErrorHandler`| `DelegatingErrorHandler` | +| `ContainerStoppingErrorHandler` | `CommonContainerStoppingErrorHandler` | +| `ContainerStoppingBatchErrorHandler` | `CommonContainerStoppingErrorHandler` | +| `SeekToCurrentErrorHandler` | `DefaultErrorHandler` | +| `SeekToCurrentBatchErrorHandler` | No replacement, use `DefaultErrorHandler` with an infinite `BackOff`. | +| `RecoveringBatchErrorHandler` | `DefaultErrorHandler` | +| `RetryingBatchErrorHandler` |No replacements - use `DefaultErrorHandler` and throw an exception other than `BatchListenerFailedException`.| + +##### After-rollback Processor + +When using transactions, if the listener throws an exception (and an error handler, if present, throws an exception), the transaction is rolled back. +By default, any unprocessed records (including the failed record) are re-fetched on the next poll. +This is achieved by performing `seek` operations in the `DefaultAfterRollbackProcessor`. +With a batch listener, the entire batch of records is reprocessed (the container has no knowledge of which record in the batch failed). +To modify this behavior, you can configure the listener container with a custom `AfterRollbackProcessor`. +For example, with a record-based listener, you might want to keep track of the failed record and give up after some number of attempts, perhaps by publishing it to a dead-letter topic. + +Starting with version 2.2, the `DefaultAfterRollbackProcessor` can now recover (skip) a record that keeps failing. +By default, after ten failures, the failed record is logged (at the `ERROR` level). +You can configure the processor with a custom recoverer (`BiConsumer`) and maximum failures. +Setting the `maxFailures` property to a negative number causes infinite retries. +The following example configures recovery after three tries: + +``` +AfterRollbackProcessor processor = + new DefaultAfterRollbackProcessor((record, exception) -> { + // recover after 3 failures, with no back off - e.g. send to a dead-letter topic + }, new FixedBackOff(0L, 2L)); +``` + +When you do not use transactions, you can achieve similar functionality by configuring a `DefaultErrorHandler`. +See [Container Error Handlers](#error-handlers). + +| |Recovery is not possible with a batch listener, since the framework has no knowledge about which record in the batch keeps failing.
In such cases, the application listener must handle a record that keeps failing.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +See also [Publishing Dead-letter Records](#dead-letters). + +Starting with version 2.2.5, the `DefaultAfterRollbackProcessor` can be invoked in a new transaction (started after the failed transaction rolls back). +Then, if you are using the `DeadLetterPublishingRecoverer` to publish a failed record, the processor will send the recovered record’s offset in the original topic/partition to the transaction. +To enable this feature, set the `commitRecovered` and `kafkaTemplate` properties on the `DefaultAfterRollbackProcessor`. + +| |If the recoverer fails (throws an exception), the failed record will be included in the seeks.
Starting with version 2.5.5, if the recoverer fails, the `BackOff` will be reset by default and redeliveries will again go through the back offs before recovery is attempted again.
With earlier versions, the `BackOff` was not reset and recovery was re-attempted on the next failure.
To revert to the previous behavior, set the processor’s `resetStateOnRecoveryFailure` property to `false`.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Starting with version 2.6, you can now provide the processor with a `BiFunction, Exception, BackOff>` to determine the `BackOff` to use, based on the failed record and/or the exception: + +``` +handler.setBackOffFunction((record, ex) -> { ... }); +``` + +If the function returns `null`, the processor’s default `BackOff` will be used. + +Starting with version 2.6.3, set `resetStateOnExceptionChange` to `true` and the retry sequence will be restarted (including the selection of a new `BackOff`, if so configured) if the exception type changes between failures. +By default, the exception type is not considered. + +Starting with version 2.3.1, similar to the `DefaultErrorHandler`, the `DefaultAfterRollbackProcessor` considers certain exceptions to be fatal, and retries are skipped for such exceptions; the recoverer is invoked on the first failure. +The exceptions that are considered fatal, by default, are: + +* `DeserializationException` + +* `MessageConversionException` + +* `ConversionException` + +* `MethodArgumentResolutionException` + +* `NoSuchMethodException` + +* `ClassCastException` + +since these exceptions are unlikely to be resolved on a retried delivery. + +You can add more exception types to the not-retryable category, or completely replace the map of classified exceptions. +See the Javadocs for `DefaultAfterRollbackProcessor.setClassifications()` for more information, as well as those for the `spring-retry` `BinaryExceptionClassifier`. + +Here is an example that adds `IllegalArgumentException` to the not-retryable exceptions: + +``` +@Bean +public DefaultAfterRollbackProcessor errorHandler(BiConsumer, Exception> recoverer) { + DefaultAfterRollbackProcessor processor = new DefaultAfterRollbackProcessor(recoverer); + processor.addNotRetryableException(IllegalArgumentException.class); + return processor; +} +``` + +Also see [Delivery Attempts Header](#delivery-header). + +| |With current `kafka-clients`, the container cannot detect whether a `ProducerFencedException` is caused by a rebalance or if the producer’s `transactional.id` has been revoked due to a timeout or expiry.
Because, in most cases, it is caused by a rebalance, the container does not call the `AfterRollbackProcessor` (because it’s not appropriate to seek the partitions because we no longer are assigned them).
If you ensure the timeout is large enough to process each transaction and periodically perform an "empty" transaction (e.g. via a `ListenerContainerIdleEvent`) you can avoid fencing due to timeout and expiry.
Or, you can set the `stopContainerWhenFenced` container property to `true` and the container will stop, avoiding the loss of records.
You can consume a `ConsumerStoppedEvent` and check the `Reason` property for `FENCED` to detect this condition.
Since the event also has a reference to the container, you can restart the container using this event.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Starting with version 2.7, while waiting for a `BackOff` interval, the error handler will loop with a short sleep until the desired delay is reached, while checking to see if the container has been stopped, allowing the sleep to exit soon after the `stop()` rather than causing a delay. + +Starting with version 2.7, the processor can be configured with one or more `RetryListener` s, receiving notifications of retry and recovery progress. + +``` +@FunctionalInterface +public interface RetryListener { + + void failedDelivery(ConsumerRecord record, Exception ex, int deliveryAttempt); + + default void recovered(ConsumerRecord record, Exception ex) { + } + + default void recoveryFailed(ConsumerRecord record, Exception original, Exception failure) { + } + +} +``` + +See the javadocs for more information. + +##### Delivery Attempts Header + +The following applies to record listeners only, not batch listeners. + +Starting with version 2.5, when using an `ErrorHandler` or `AfterRollbackProcessor` that implements `DeliveryAttemptAware`, it is possible to enable the addition of the `KafkaHeaders.DELIVERY_ATTEMPT` header (`kafka_deliveryAttempt`) to the record. +The value of this header is an incrementing integer starting at 1. +When receiving a raw `ConsumerRecord` the integer is in a `byte[4]`. + +``` +int delivery = ByteBuffer.wrap(record.headers() + .lastHeader(KafkaHeaders.DELIVERY_ATTEMPT).value()) + .getInt() +``` + +When using `@KafkaListener` with the `DefaultKafkaHeaderMapper` or `SimpleKafkaHeaderMapper`, it can be obtained by adding `@Header(KafkaHeaders.DELIVERY_ATTEMPT) int delivery` as a parameter to the listener method. + +To enable population of this header, set the container property `deliveryAttemptHeader` to `true`. +It is disabled by default to avoid the (small) overhead of looking up the state for each record and adding the header. + +The `DefaultErrorHandler` and `DefaultAfterRollbackProcessor` support this feature. + +##### Publishing Dead-letter Records + +You can configure the `DefaultErrorHandler` and `DefaultAfterRollbackProcessor` with a record recoverer when the maximum number of failures is reached for a record. +The framework provides the `DeadLetterPublishingRecoverer`, which publishes the failed message to another topic. +The recoverer requires a `KafkaTemplate`, which is used to send the record. +You can also, optionally, configure it with a `BiFunction, Exception, TopicPartition>`, which is called to resolve the destination topic and partition. + +| |By default, the dead-letter record is sent to a topic named `.DLT` (the original topic name suffixed with `.DLT`) and to the same partition as the original record.
Therefore, when you use the default resolver, the dead-letter topic **must have at least as many partitions as the original topic.**| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +If the returned `TopicPartition` has a negative partition, the partition is not set in the `ProducerRecord`, so the partition is selected by Kafka. +Starting with version 2.2.4, any `ListenerExecutionFailedException` (thrown, for example, when an exception is detected in a `@KafkaListener` method) is enhanced with the `groupId` property. +This allows the destination resolver to use this, in addition to the information in the `ConsumerRecord` to select the dead letter topic. + +The following example shows how to wire a custom destination resolver: + +``` +DeadLetterPublishingRecoverer recoverer = new DeadLetterPublishingRecoverer(template, + (r, e) -> { + if (e instanceof FooException) { + return new TopicPartition(r.topic() + ".Foo.failures", r.partition()); + } + else { + return new TopicPartition(r.topic() + ".other.failures", r.partition()); + } + }); +ErrorHandler errorHandler = new DefaultErrorHandler(recoverer, new FixedBackOff(0L, 2L)); +``` + +The record sent to the dead-letter topic is enhanced with the following headers: + +* `KafkaHeaders.DLT_EXCEPTION_FQCN`: The Exception class name (generally a `ListenerExecutionFailedException`, but can be others). + +* `KafkaHeaders.DLT_EXCEPTION_CAUSE_FQCN`: The Exception cause class name, if present (since version 2.8). + +* `KafkaHeaders.DLT_EXCEPTION_STACKTRACE`: The Exception stack trace. + +* `KafkaHeaders.DLT_EXCEPTION_MESSAGE`: The Exception message. + +* `KafkaHeaders.DLT_KEY_EXCEPTION_FQCN`: The Exception class name (key deserialization errors only). + +* `KafkaHeaders.DLT_KEY_EXCEPTION_STACKTRACE`: The Exception stack trace (key deserialization errors only). + +* `KafkaHeaders.DLT_KEY_EXCEPTION_MESSAGE`: The Exception message (key deserialization errors only). + +* `KafkaHeaders.DLT_ORIGINAL_TOPIC`: The original topic. + +* `KafkaHeaders.DLT_ORIGINAL_PARTITION`: The original partition. + +* `KafkaHeaders.DLT_ORIGINAL_OFFSET`: The original offset. + +* `KafkaHeaders.DLT_ORIGINAL_TIMESTAMP`: The original timestamp. + +* `KafkaHeaders.DLT_ORIGINAL_TIMESTAMP_TYPE`: The original timestamp type. + +* `KafkaHeaders.DLT_ORIGINAL_CONSUMER_GROUP`: The original consumer group that failed to process the record (since version 2.8). + +Key exceptions are only caused by `DeserializationException` s so there is no `DLT_KEY_EXCEPTION_CAUSE_FQCN`. + +There are two mechanisms to add more headers. + +1. Subclass the recoverer and override `createProducerRecord()` - call `super.createProducerRecord()` and add more headers. + +2. Provide a `BiFunction` to receive the consumer record and exception, returning a `Headers` object; headers from there will be copied to the final producer record. + Use `setHeadersFunction()` to set the `BiFunction`. + +The second is simpler to implement but the first has more information available, including the already assembled standard headers. + +Starting with version 2.3, when used in conjunction with an `ErrorHandlingDeserializer`, the publisher will restore the record `value()`, in the dead-letter producer record, to the original value that failed to be deserialized. +Previously, the `value()` was null and user code had to decode the `DeserializationException` from the message headers. +In addition, you can provide multiple `KafkaTemplate` s to the publisher; this might be needed, for example, if you want to publish the `byte[]` from a `DeserializationException`, as well as values using a different serializer from records that were deserialized successfully. +Here is an example of configuring the publisher with `KafkaTemplate` s that use a `String` and `byte[]` serializer: + +``` +@Bean +public DeadLetterPublishingRecoverer publisher(KafkaTemplate stringTemplate, + KafkaTemplate bytesTemplate) { + + Map, KafkaTemplate> templates = new LinkedHashMap<>(); + templates.put(String.class, stringTemplate); + templates.put(byte[].class, bytesTemplate); + return new DeadLetterPublishingRecoverer(templates); +} +``` + +The publisher uses the map keys to locate a template that is suitable for the `value()` about to be published. +A `LinkedHashMap` is recommended so that the keys are examined in order. + +When publishing `null` values, when there are multiple templates, the recoverer will look for a template for the `Void` class; if none is present, the first template from the `values().iterator()` will be used. + +Since 2.7 you can use the `setFailIfSendResultIsError` method so that an exception is thrown when message publishing fails. +You can also set a timeout for the verification of the sender success with `setWaitForSendResultTimeout`. + +| |If the recoverer fails (throws an exception), the failed record will be included in the seeks.
Starting with version 2.5.5, if the recoverer fails, the `BackOff` will be reset by default and redeliveries will again go through the back offs before recovery is attempted again.
With earlier versions, the `BackOff` was not reset and recovery was re-attempted on the next failure.
To revert to the previous behavior, set the error handler’s `resetStateOnRecoveryFailure` property to `false`.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Starting with version 2.6.3, set `resetStateOnExceptionChange` to `true` and the retry sequence will be restarted (including the selection of a new `BackOff`, if so configured) if the exception type changes between failures. +By default, the exception type is not considered. + +Starting with version 2.3, the recoverer can also be used with Kafka Streams - see [Recovery from Deserialization Exceptions](#streams-deser-recovery) for more information. + +The `ErrorHandlingDeserializer` adds the deserialization exception(s) in headers `ErrorHandlingDeserializer.VALUE_DESERIALIZER_EXCEPTION_HEADER` and `ErrorHandlingDeserializer.KEY_DESERIALIZER_EXCEPTION_HEADER` (using java serialization). +By default, these headers are not retained in the message published to the dead letter topic. +Starting with version 2.7, if both the key and value fail deserialization, the original values of both are populated in the record sent to the DLT. + +If incoming records are dependent on each other, but may arrive out of order, it may be useful to republish a failed record to the tail of the original topic (for some number of times), instead of sending it directly to the dead letter topic. +See [this Stack Overflow Question](https://stackoverflow.com/questions/64646996) for an example. + +The following error handler configuration will do exactly that: + +``` +@Bean +public ErrorHandler eh(KafkaOperations template) { + return new DefaultErrorHandler(new DeadLetterPublishingRecoverer(template, + (rec, ex) -> { + org.apache.kafka.common.header.Header retries = rec.headers().lastHeader("retries"); + if (retries == null) { + retries = new RecordHeader("retries", new byte[] { 1 }); + rec.headers().add(retries); + } + else { + retries.value()[0]++; + } + return retries.value()[0] > 5 + ? new TopicPartition("topic.DLT", rec.partition()) + : new TopicPartition("topic", rec.partition()); + }), new FixedBackOff(0L, 0L)); +} +``` + +Starting with version 2.7, the recoverer checks that the partition selected by the destination resolver actually exists. +If the partition is not present, the partition in the `ProducerRecord` is set to `null`, allowing the `KafkaProducer` to select the partition. +You can disable this check by setting the `verifyPartition` property to `false`. + +##### Managing Dead Letter Record Headers + +Referring to [Publishing Dead-letter Records](#dead-letters) above, the `DeadLetterPublishingRecoverer` has two properties used to manage headers when those headers already exist (such as when reprocessing a dead letter record that failed, including when using [Non-Blocking Retries](#retry-topic)). + +* `appendOriginalHeaders` (default `true`) + +* `stripPreviousExceptionHeaders` (default `true` since version 2.8) + +Apache Kafka supports multiple headers with the same name; to obtain the "latest" value, you can use `headers.lastHeader(headerName)`; to get an iterator over multiple headers, use `headers.headers(headerName).iterator()`. + +When repeatedly republishing a failed record, these headers can grow (and eventually cause publication to fail due to a `RecordTooLargeException`); this is especially true for the exception headers and particularly for the stack trace headers. + +The reason for the two properties is because, while you might want to retain only the last exception information, you might want to retain the history of which topic(s) the record passed through for each failure. + +`appendOriginalHeaders` is applied to all headers named `**ORIGINAL**` while `stripPreviousExceptionHeaders` is applied to all headers named `**EXCEPTION**`. + +Also see [Failure Header Management](#retry-headers) with [Non-Blocking Retries](#retry-topic). + +##### `ExponentialBackOffWithMaxRetries` Implementation + +Spring Framework provides a number of `BackOff` implementations. +By default, the `ExponentialBackOff` will retry indefinitely; to give up after some number of retry attempts requires calculating the `maxElapsedTime`. +Since version 2.7.3, Spring for Apache Kafka provides the `ExponentialBackOffWithMaxRetries` which is a subclass that receives the `maxRetries` property and automatically calculates the `maxElapsedTime`, which is a little more convenient. + +``` +@Bean +DefaultErrorHandler handler() { + ExponentialBackOffWithMaxRetries bo = new ExponentialBackOffWithMaxRetries(6); + bo.setInitialInterval(1_000L); + bo.setMultiplier(2.0); + bo.setMaxInterval(10_000L); + return new DefaultErrorHandler(myRecoverer, bo); +} +``` + +This will retry after `1, 2, 4, 8, 10, 10` seconds, before calling the recoverer. + +#### 4.1.22. JAAS and Kerberos + +Starting with version 2.0, a `KafkaJaasLoginModuleInitializer` class has been added to assist with Kerberos configuration. +You can add this bean, with the desired configuration, to your application context. +The following example configures such a bean: + +``` +@Bean +public KafkaJaasLoginModuleInitializer jaasConfig() throws IOException { + KafkaJaasLoginModuleInitializer jaasConfig = new KafkaJaasLoginModuleInitializer(); + jaasConfig.setControlFlag("REQUIRED"); + Map options = new HashMap<>(); + options.put("useKeyTab", "true"); + options.put("storeKey", "true"); + options.put("keyTab", "/etc/security/keytabs/kafka_client.keytab"); + options.put("principal", "[email protected]"); + jaasConfig.setOptions(options); + return jaasConfig; +} +``` + +### 4.2. Apache Kafka Streams Support + +Starting with version 1.1.4, Spring for Apache Kafka provides first-class support for [Kafka Streams](https://kafka.apache.org/documentation/streams). +To use it from a Spring application, the `kafka-streams` jar must be present on classpath. +It is an optional dependency of the Spring for Apache Kafka project and is not downloaded transitively. + +#### 4.2.1. Basics + +The reference Apache Kafka Streams documentation suggests the following way of using the API: + +``` +// Use the builders to define the actual processing topology, e.g. to specify +// from which input topics to read, which stream operations (filter, map, etc.) +// should be called, and so on. + +StreamsBuilder builder = ...; // when using the Kafka Streams DSL + +// Use the configuration to tell your application where the Kafka cluster is, +// which serializers/deserializers to use by default, to specify security settings, +// and so on. +StreamsConfig config = ...; + +KafkaStreams streams = new KafkaStreams(builder, config); + +// Start the Kafka Streams instance +streams.start(); + +// Stop the Kafka Streams instance +streams.close(); +``` + +So, we have two main components: + +* `StreamsBuilder`: With an API to build `KStream` (or `KTable`) instances. + +* `KafkaStreams`: To manage the lifecycle of those instances. + +| |All `KStream` instances exposed to a `KafkaStreams` instance by a single `StreamsBuilder` are started and stopped at the same time, even if they have different logic.
In other words, all streams defined by a `StreamsBuilder` are tied with a single lifecycle control.
Once a `KafkaStreams` instance has been closed by `streams.close()`, it cannot be restarted.
Instead, a new `KafkaStreams` instance to restart stream processing must be created.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 4.2.2. Spring Management + +To simplify using Kafka Streams from the Spring application context perspective and use the lifecycle management through a container, the Spring for Apache Kafka introduces `StreamsBuilderFactoryBean`. +This is an `AbstractFactoryBean` implementation to expose a `StreamsBuilder` singleton instance as a bean. +The following example creates such a bean: + +``` +@Bean +public FactoryBean myKStreamBuilder(KafkaStreamsConfiguration streamsConfig) { + return new StreamsBuilderFactoryBean(streamsConfig); +} +``` + +| |Starting with version 2.2, the stream configuration is now provided as a `KafkaStreamsConfiguration` object rather than a `StreamsConfig`.| +|---|------------------------------------------------------------------------------------------------------------------------------------------| + +The `StreamsBuilderFactoryBean` also implements `SmartLifecycle` to manage the lifecycle of an internal `KafkaStreams` instance. +Similar to the Kafka Streams API, you must define the `KStream` instances before you start the `KafkaStreams`. +That also applies for the Spring API for Kafka Streams. +Therefore, when you use default `autoStartup = true` on the `StreamsBuilderFactoryBean`, you must declare `KStream` instances on the `StreamsBuilder` before the application context is refreshed. +For example, `KStream` can be a regular bean definition, while the Kafka Streams API is used without any impacts. +The following example shows how to do so: + +``` +@Bean +public KStream kStream(StreamsBuilder kStreamBuilder) { + KStream stream = kStreamBuilder.stream(STREAMING_TOPIC1); + // Fluent KStream API + return stream; +} +``` + +If you would like to control the lifecycle manually (for example, stopping and starting by some condition), you can reference the `StreamsBuilderFactoryBean` bean directly by using the factory bean (`&`) [prefix](https://docs.spring.io/spring/docs/current/spring-framework-reference/html/beans.html#beans-factory-extension-factorybean). +Since `StreamsBuilderFactoryBean` use its internal `KafkaStreams` instance, it is safe to stop and restart it again. +A new `KafkaStreams` is created on each `start()`. +You might also consider using different `StreamsBuilderFactoryBean` instances, if you would like to control the lifecycles for `KStream` instances separately. + +You also can specify `KafkaStreams.StateListener`, `Thread.UncaughtExceptionHandler`, and `StateRestoreListener` options on the `StreamsBuilderFactoryBean`, which are delegated to the internal `KafkaStreams` instance. +Also, apart from setting those options indirectly on `StreamsBuilderFactoryBean`, starting with *version 2.1.5*, you can use a `KafkaStreamsCustomizer` callback interface to configure an inner `KafkaStreams` instance. +Note that `KafkaStreamsCustomizer` overrides the options provided by `StreamsBuilderFactoryBean`. +If you need to perform some `KafkaStreams` operations directly, you can access that internal `KafkaStreams` instance by using `StreamsBuilderFactoryBean.getKafkaStreams()`. +You can autowire `StreamsBuilderFactoryBean` bean by type, but you should be sure to use the full type in the bean definition, as the following example shows: + +``` +@Bean +public StreamsBuilderFactoryBean myKStreamBuilder(KafkaStreamsConfiguration streamsConfig) { + return new StreamsBuilderFactoryBean(streamsConfig); +} +... +@Autowired +private StreamsBuilderFactoryBean myKStreamBuilderFactoryBean; +``` + +Alternatively, you can add `@Qualifier` for injection by name if you use interface bean definition. +The following example shows how to do so: + +``` +@Bean +public FactoryBean myKStreamBuilder(KafkaStreamsConfiguration streamsConfig) { + return new StreamsBuilderFactoryBean(streamsConfig); +} +... +@Autowired +@Qualifier("&myKStreamBuilder") +private StreamsBuilderFactoryBean myKStreamBuilderFactoryBean; +``` + +Starting with version 2.4.1, the factory bean has a new property `infrastructureCustomizer` with type `KafkaStreamsInfrastructureCustomizer`; this allows customization of the `StreamsBuilder` (e.g. to add a state store) and/or the `Topology` before the stream is created. + +``` +public interface KafkaStreamsInfrastructureCustomizer { + + void configureBuilder(StreamsBuilder builder); + + void configureTopology(Topology topology); + +} +``` + +Default no-op implementations are provided to avoid having to implement both methods if one is not required. + +A `CompositeKafkaStreamsInfrastructureCustomizer` is provided, for when you need to apply multiple customizers. + +#### 4.2.3. KafkaStreams Micrometer Support + +Introduced in version 2.5.3, you can configure a `KafkaStreamsMicrometerListener` to automatically register micrometer meters for the `KafkaStreams` object managed by the factory bean: + +``` +streamsBuilderFactoryBean.addListener(new KafkaStreamsMicrometerListener(meterRegistry, + Collections.singletonList(new ImmutableTag("customTag", "customTagValue")))); +``` + +#### 4.2.4. Streams JSON Serialization and Deserialization + +For serializing and deserializing data when reading or writing to topics or state stores in JSON format, Spring for Apache Kafka provides a `JsonSerde` implementation that uses JSON, delegating to the `JsonSerializer` and `JsonDeserializer` described in [Serialization, Deserialization, and Message Conversion](#serdes). +The `JsonSerde` implementation provides the same configuration options through its constructor (target type or `ObjectMapper`). +In the following example, we use the `JsonSerde` to serialize and deserialize the `Cat` payload of a Kafka stream (the `JsonSerde` can be used in a similar fashion wherever an instance is required): + +``` +stream.through(Serdes.Integer(), new JsonSerde<>(Cat.class), "cats"); +``` + +When constructing the serializer/deserializer programmatically for use in the producer/consumer factory, since version 2.3, you can use the fluent API, which simplifies configuration. + +``` +stream.through(new JsonSerde<>(MyKeyType.class) + .forKeys() + .noTypeInfo(), + new JsonSerde<>(MyValueType.class) + .noTypeInfo(), + "myTypes"); +``` + +#### 4.2.5. Using `KafkaStreamBrancher` + +The `KafkaStreamBrancher` class introduces a more convenient way to build conditional branches on top of `KStream`. + +Consider the following example that does not use `KafkaStreamBrancher`: + +``` +KStream[] branches = builder.stream("source").branch( + (key, value) -> value.contains("A"), + (key, value) -> value.contains("B"), + (key, value) -> true + ); +branches[0].to("A"); +branches[1].to("B"); +branches[2].to("C"); +``` + +The following example uses `KafkaStreamBrancher`: + +``` +new KafkaStreamBrancher() + .branch((key, value) -> value.contains("A"), ks -> ks.to("A")) + .branch((key, value) -> value.contains("B"), ks -> ks.to("B")) + //default branch should not necessarily be defined in the end of the chain! + .defaultBranch(ks -> ks.to("C")) + .onTopOf(builder.stream("source")); + //onTopOf method returns the provided stream so we can continue with method chaining +``` + +#### 4.2.6. Configuration + +To configure the Kafka Streams environment, the `StreamsBuilderFactoryBean` requires a `KafkaStreamsConfiguration` instance. +See the Apache Kafka [documentation](https://kafka.apache.org/0102/documentation/#streamsconfigs) for all possible options. + +| |Starting with version 2.2, the stream configuration is now provided as a `KafkaStreamsConfiguration` object, rather than as a `StreamsConfig`.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------| + +To avoid boilerplate code for most cases, especially when you develop microservices, Spring for Apache Kafka provides the `@EnableKafkaStreams` annotation, which you should place on a `@Configuration` class. +All you need is to declare a `KafkaStreamsConfiguration` bean named `defaultKafkaStreamsConfig`. +A `StreamsBuilderFactoryBean` bean, named `defaultKafkaStreamsBuilder`, is automatically declared in the application context. +You can declare and use any additional `StreamsBuilderFactoryBean` beans as well. +You can perform additional customization of that bean, by providing a bean that implements `StreamsBuilderFactoryBeanConfigurer`. +If there are multiple such beans, they will be applied according to their `Ordered.order` property. + +By default, when the factory bean is stopped, the `KafkaStreams.cleanUp()` method is called. +Starting with version 2.1.2, the factory bean has additional constructors, taking a `CleanupConfig` object that has properties to let you control whether the `cleanUp()` method is called during `start()` or `stop()` or neither. +Starting with version 2.7, the default is to never clean up local state. + +#### 4.2.7. Header Enricher + +Version 2.3 added the `HeaderEnricher` implementation of `Transformer`. +This can be used to add headers within the stream processing; the header values are SpEL expressions; the root object of the expression evaluation has 3 properties: + +* `context` - the `ProcessorContext`, allowing access to the current record metadata + +* `key` - the key of the current record + +* `value` - the value of the current record + +The expressions must return a `byte[]` or a `String` (which will be converted to `byte[]` using `UTF-8`). + +To use the enricher within a stream: + +``` +.transform(() -> enricher) +``` + +The transformer does not change the `key` or `value`; it simply adds headers. + +| |If your stream is multi-threaded, you need a new instance for each record.| +|---|--------------------------------------------------------------------------| + +``` +.transform(() -> new HeaderEnricher<..., ...>(expressionMap)) +``` + +Here is a simple example, adding one literal header and one variable: + +``` +Map headers = new HashMap<>(); +headers.put("header1", new LiteralExpression("value1")); +SpelExpressionParser parser = new SpelExpressionParser(); +headers.put("header2", parser.parseExpression("context.timestamp() + ' @' + context.offset()")); +HeaderEnricher enricher = new HeaderEnricher<>(headers); +KStream stream = builder.stream(INPUT); +stream + .transform(() -> enricher) + .to(OUTPUT); +``` + +#### 4.2.8. `MessagingTransformer` + +Version 2.3 added the `MessagingTransformer` this allows a Kafka Streams topology to interact with a Spring Messaging component, such as a Spring Integration flow. +The transformer requires an implementation of `MessagingFunction`. + +``` +@FunctionalInterface +public interface MessagingFunction { + + Message exchange(Message message); + +} +``` + +Spring Integration automatically provides an implementation using its `GatewayProxyFactoryBean`. +It also requires a `MessagingMessageConverter` to convert the key, value and metadata (including headers) to/from a Spring Messaging `Message`. +See [[Calling a Spring Integration Flow from a `KStream`](https://docs.spring.io/spring-integration/docs/current/reference/html/kafka.html#streams-integration)] for more information. + +#### 4.2.9. Recovery from Deserialization Exceptions + +Version 2.3 introduced the `RecoveringDeserializationExceptionHandler` which can take some action when a deserialization exception occurs. +Refer to the Kafka documentation about `DeserializationExceptionHandler`, of which the `RecoveringDeserializationExceptionHandler` is an implementation. +The `RecoveringDeserializationExceptionHandler` is configured with a `ConsumerRecordRecoverer` implementation. +The framework provides the `DeadLetterPublishingRecoverer` which sends the failed record to a dead-letter topic. +See [Publishing Dead-letter Records](#dead-letters) for more information about this recoverer. + +To configure the recoverer, add the following properties to your streams configuration: + +``` +@Bean(name = KafkaStreamsDefaultConfiguration.DEFAULT_STREAMS_CONFIG_BEAN_NAME) +public KafkaStreamsConfiguration kStreamsConfigs() { + Map props = new HashMap<>(); + ... + props.put(StreamsConfig.DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG, + RecoveringDeserializationExceptionHandler.class); + props.put(RecoveringDeserializationExceptionHandler.KSTREAM_DESERIALIZATION_RECOVERER, recoverer()); + ... + return new KafkaStreamsConfiguration(props); +} + +@Bean +public DeadLetterPublishingRecoverer recoverer() { + return new DeadLetterPublishingRecoverer(kafkaTemplate(), + (record, ex) -> new TopicPartition("recovererDLQ", -1)); +} +``` + +Of course, the `recoverer()` bean can be your own implementation of `ConsumerRecordRecoverer`. + +#### 4.2.10. Kafka Streams Example + +The following example combines all the topics we have covered in this chapter: + +``` +@Configuration +@EnableKafka +@EnableKafkaStreams +public static class KafkaStreamsConfig { + + @Bean(name = KafkaStreamsDefaultConfiguration.DEFAULT_STREAMS_CONFIG_BEAN_NAME) + public KafkaStreamsConfiguration kStreamsConfigs() { + Map props = new HashMap<>(); + props.put(StreamsConfig.APPLICATION_ID_CONFIG, "testStreams"); + props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); + props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.Integer().getClass().getName()); + props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); + props.put(StreamsConfig.DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG, WallclockTimestampExtractor.class.getName()); + return new KafkaStreamsConfiguration(props); + } + + @Bean + public StreamsBuilderFactoryBeanConfigurer configurer() { + return fb -> fb.setStateListener((newState, oldState) -> { + System.out.println("State transition from " + oldState + " to " + newState); + }); + } + + @Bean + public KStream kStream(StreamsBuilder kStreamBuilder) { + KStream stream = kStreamBuilder.stream("streamingTopic1"); + stream + .mapValues((ValueMapper) String::toUpperCase) + .groupByKey() + .windowedBy(TimeWindows.of(Duration.ofMillis(1000))) + .reduce((String value1, String value2) -> value1 + value2, + Named.as("windowStore")) + .toStream() + .map((windowedId, value) -> new KeyValue<>(windowedId.key(), value)) + .filter((i, s) -> s.length() > 40) + .to("streamingTopic2"); + + stream.print(Printed.toSysOut()); + + return stream; + } + +} +``` + +### 4.3. Testing Applications + +The `spring-kafka-test` jar contains some useful utilities to assist with testing your applications. + +#### 4.3.1. KafkaTestUtils + +`o.s.kafka.test.utils.KafkaTestUtils` provides a number of static helper methods to consume records, retrieve various record offsets, and others. +Refer to its [Javadocs](https://docs.spring.io/spring-kafka/docs/current/api/org/springframework/kafka/test/utils/KafkaTestUtils.html) for complete details. + +#### 4.3.2. JUnit + +`o.s.kafka.test.utils.KafkaTestUtils` also provides some static methods to set up producer and consumer properties. +The following listing shows those method signatures: + +``` +/** + * Set up test properties for an {@code } consumer. + * @param group the group id. + * @param autoCommit the auto commit. + * @param embeddedKafka a {@link EmbeddedKafkaBroker} instance. + * @return the properties. + */ +public static Map consumerProps(String group, String autoCommit, + EmbeddedKafkaBroker embeddedKafka) { ... } + +/** + * Set up test properties for an {@code } producer. + * @param embeddedKafka a {@link EmbeddedKafkaBroker} instance. + * @return the properties. + */ +public static Map producerProps(EmbeddedKafkaBroker embeddedKafka) { ... } +``` + +| |Starting with version 2.5, the `consumerProps` method sets the `ConsumerConfig.AUTO_OFFSET_RESET_CONFIG` to `earliest`.
This is because, in most cases, you want the consumer to consume any messages sent in a test case.
The `ConsumerConfig` default is `latest` which means that messages already sent by a test, before the consumer starts, will not receive those records.
To revert to the previous behavior, set the property to `latest` after calling the method.

When using the embedded broker, it is generally best practice to use a different topic for each test, to prevent cross-talk.
If this is not possible for some reason, note that the `consumeFromEmbeddedTopics` method’s default behavior is to seek the assigned partitions to the beginning after assignment.
Since it does not have access to the consumer properties, you must use the overloaded method that takes a `seekToEnd` boolean parameter to seek to the end instead of the beginning.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +A JUnit 4 `@Rule` wrapper for the `EmbeddedKafkaBroker` is provided to create an embedded Kafka and an embedded Zookeeper server. +(See [@EmbeddedKafka Annotation](#embedded-kafka-annotation) for information about using `@EmbeddedKafka` with JUnit 5). +The following listing shows the signatures of those methods: + +``` +/** + * Create embedded Kafka brokers. + * @param count the number of brokers. + * @param controlledShutdown passed into TestUtils.createBrokerConfig. + * @param topics the topics to create (2 partitions per). + */ +public EmbeddedKafkaRule(int count, boolean controlledShutdown, String... topics) { ... } + +/** + * + * Create embedded Kafka brokers. + * @param count the number of brokers. + * @param controlledShutdown passed into TestUtils.createBrokerConfig. + * @param partitions partitions per topic. + * @param topics the topics to create. + */ +public EmbeddedKafkaRule(int count, boolean controlledShutdown, int partitions, String... topics) { ... } +``` + +The `EmbeddedKafkaBroker` class has a utility method that lets you consume for all the topics it created. +The following example shows how to use it: + +``` +Map consumerProps = KafkaTestUtils.consumerProps("testT", "false", embeddedKafka); +DefaultKafkaConsumerFactory cf = new DefaultKafkaConsumerFactory( + consumerProps); +Consumer consumer = cf.createConsumer(); +embeddedKafka.consumeFromAllEmbeddedTopics(consumer); +``` + +The `KafkaTestUtils` has some utility methods to fetch results from the consumer. +The following listing shows those method signatures: + +``` +/** + * Poll the consumer, expecting a single record for the specified topic. + * @param consumer the consumer. + * @param topic the topic. + * @return the record. + * @throws org.junit.ComparisonFailure if exactly one record is not received. + */ +public static ConsumerRecord getSingleRecord(Consumer consumer, String topic) { ... } + +/** + * Poll the consumer for records. + * @param consumer the consumer. + * @return the records. + */ +public static ConsumerRecords getRecords(Consumer consumer) { ... } +``` + +The following example shows how to use `KafkaTestUtils`: + +``` +... +template.sendDefault(0, 2, "bar"); +ConsumerRecord received = KafkaTestUtils.getSingleRecord(consumer, "topic"); +... +``` + +When the embedded Kafka and embedded Zookeeper server are started by the `EmbeddedKafkaBroker`, a system property named `spring.embedded.kafka.brokers` is set to the address of the Kafka brokers and a system property named `spring.embedded.zookeeper.connect` is set to the address of Zookeeper. +Convenient constants (`EmbeddedKafkaBroker.SPRING_EMBEDDED_KAFKA_BROKERS` and `EmbeddedKafkaBroker.SPRING_EMBEDDED_ZOOKEEPER_CONNECT`) are provided for this property. + +With the `EmbeddedKafkaBroker.brokerProperties(Map)`, you can provide additional properties for the Kafka servers. +See [Kafka Config](https://kafka.apache.org/documentation/#brokerconfigs) for more information about possible broker properties. + +#### 4.3.3. Configuring Topics + +The following example configuration creates topics called `cat` and `hat` with five partitions, a topic called `thing1` with 10 partitions, and a topic called `thing2` with 15 partitions: + +``` +public class MyTests { + + @ClassRule + private static EmbeddedKafkaRule embeddedKafka = new EmbeddedKafkaRule(1, false, 5, "cat", "hat"); + + @Test + public void test() { + embeddedKafkaRule.getEmbeddedKafka() + .addTopics(new NewTopic("thing1", 10, (short) 1), new NewTopic("thing2", 15, (short) 1)); + ... + } + +} +``` + +By default, `addTopics` will throw an exception when problems arise (such as adding a topic that already exists). +Version 2.6 added a new version of that method that returns a `Map`; the key is the topic name and the value is `null` for success, or an `Exception` for a failure. + +#### for Multiple Test Classes + +There is no built-in support for doing so, but you can use the same broker for multiple test classes with something similar to the following: + +``` +public final class EmbeddedKafkaHolder { + + private static EmbeddedKafkaBroker embeddedKafka = new EmbeddedKafkaBroker(1, false) + .brokerListProperty("spring.kafka.bootstrap-servers"); + + private static boolean started; + + public static EmbeddedKafkaBroker getEmbeddedKafka() { + if (!started) { + try { + embeddedKafka.afterPropertiesSet(); + } + catch (Exception e) { + throw new KafkaException("Embedded broker failed to start", e); + } + started = true; + } + return embeddedKafka; + } + + private EmbeddedKafkaHolder() { + super(); + } + +} +``` + +This assumes a Spring Boot environment and the embedded broker replaces the bootstrap servers property. + +Then, in each test class, you can use something similar to the following: + +``` +static { + EmbeddedKafkaHolder.getEmbeddedKafka().addTopics("topic1", "topic2"); +} + +private static final EmbeddedKafkaBroker broker = EmbeddedKafkaHolder.getEmbeddedKafka(); +``` + +If you are not using Spring Boot, you can obtain the bootstrap servers using `broker.getBrokersAsString()`. + +| |The preceding example provides no mechanism for shutting down the broker(s) when all tests are complete.
This could be a problem if, say, you run your tests in a Gradle daemon.
You should not use this technique in such a situation, or you should use something to call `destroy()` on the `EmbeddedKafkaBroker` when your tests are complete.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 4.3.5. @EmbeddedKafka Annotation + +We generally recommend that you use the rule as a `@ClassRule` to avoid starting and stopping the broker between tests (and use a different topic for each test). +Starting with version 2.0, if you use Spring’s test application context caching, you can also declare a `EmbeddedKafkaBroker` bean, so a single broker can be used across multiple test classes. +For convenience, we provide a test class-level annotation called `@EmbeddedKafka` to register the `EmbeddedKafkaBroker` bean. +The following example shows how to use it: + +``` +@RunWith(SpringRunner.class) +@DirtiesContext +@EmbeddedKafka(partitions = 1, + topics = { + KafkaStreamsTests.STREAMING_TOPIC1, + KafkaStreamsTests.STREAMING_TOPIC2 }) +public class KafkaStreamsTests { + + @Autowired + private EmbeddedKafkaBroker embeddedKafka; + + @Test + public void someTest() { + Map consumerProps = KafkaTestUtils.consumerProps("testGroup", "true", this.embeddedKafka); + consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); + ConsumerFactory cf = new DefaultKafkaConsumerFactory<>(consumerProps); + Consumer consumer = cf.createConsumer(); + this.embeddedKafka.consumeFromAnEmbeddedTopic(consumer, KafkaStreamsTests.STREAMING_TOPIC2); + ConsumerRecords replies = KafkaTestUtils.getRecords(consumer); + assertThat(replies.count()).isGreaterThanOrEqualTo(1); + } + + @Configuration + @EnableKafkaStreams + public static class KafkaStreamsConfiguration { + + @Value("${" + EmbeddedKafkaBroker.SPRING_EMBEDDED_KAFKA_BROKERS + "}") + private String brokerAddresses; + + @Bean(name = KafkaStreamsDefaultConfiguration.DEFAULT_STREAMS_CONFIG_BEAN_NAME) + public KafkaStreamsConfiguration kStreamsConfigs() { + Map props = new HashMap<>(); + props.put(StreamsConfig.APPLICATION_ID_CONFIG, "testStreams"); + props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, this.brokerAddresses); + return new KafkaStreamsConfiguration(props); + } + + } + +} +``` + +Starting with version 2.2.4, you can also use the `@EmbeddedKafka` annotation to specify the Kafka ports property. + +The following example sets the `topics`, `brokerProperties`, and `brokerPropertiesLocation` attributes of `@EmbeddedKafka` support property placeholder resolutions: + +``` +@TestPropertySource(locations = "classpath:/test.properties") +@EmbeddedKafka(topics = { "any-topic", "${kafka.topics.another-topic}" }, + brokerProperties = { "log.dir=${kafka.broker.logs-dir}", + "listeners=PLAINTEXT://localhost:${kafka.broker.port}", + "auto.create.topics.enable=${kafka.broker.topics-enable:true}" }, + brokerPropertiesLocation = "classpath:/broker.properties") +``` + +In the preceding example, the property placeholders `${kafka.topics.another-topic}`, `${kafka.broker.logs-dir}`, and `${kafka.broker.port}` are resolved from the Spring `Environment`. +In addition, the broker properties are loaded from the `broker.properties` classpath resource specified by the `brokerPropertiesLocation`. +Property placeholders are resolved for the `brokerPropertiesLocation` URL and for any property placeholders found in the resource. +Properties defined by `brokerProperties` override properties found in `brokerPropertiesLocation`. + +You can use the `@EmbeddedKafka` annotation with JUnit 4 or JUnit 5. + +#### 4.3.6. @EmbeddedKafka Annotation with JUnit5 + +Starting with version 2.3, there are two ways to use the `@EmbeddedKafka` annotation with JUnit5. +When used with the `@SpringJunitConfig` annotation, the embedded broker is added to the test application context. +You can auto wire the broker into your test, at the class or method level, to get the broker address list. + +When **not** using the spring test context, the `EmbdeddedKafkaCondition` creates a broker; the condition includes a parameter resolver so you can access the broker in your test method…​ + +``` +@EmbeddedKafka +public class EmbeddedKafkaConditionTests { + + @Test + public void test(EmbeddedKafkaBroker broker) { + String brokerList = broker.getBrokersAsString(); + ... + } + +} +``` + +A stand-alone (not Spring test context) broker will be created if the class annotated with `@EmbeddedBroker` is not also annotated (or meta annotated) with `ExtendedWith(SpringExtension.class)`.`@SpringJunitConfig` and `@SpringBootTest` are so meta annotated and the context-based broker will be used when either of those annotations are also present. + +| |When there is a Spring test application context available, the topics and broker properties can contain property placeholders, which will be resolved as long as the property is defined somewhere.
If there is no Spring context available, these placeholders won’t be resolved.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 4.3.7. Embedded Broker in `@SpringBootTest` Annotations + +[Spring Initializr](https://start.spring.io/) now automatically adds the `spring-kafka-test` dependency in test scope to the project configuration. + +| |If your application uses the Kafka binder in `spring-cloud-stream` and if you want to use an embedded broker for tests, you must remove the `spring-cloud-stream-test-support` dependency, because it replaces the real binder with a test binder for test cases.
If you wish some tests to use the test binder and some to use the embedded broker, tests that use the real binder need to disable the test binder by excluding the binder auto configuration in the test class.
The following example shows how to do so:

```
@RunWith(SpringRunner.class)
@SpringBootTest(properties = "spring.autoconfigure.exclude="
+ "org.springframework.cloud.stream.test.binder.TestSupportBinderAutoConfiguration")
public class MyApplicationTests {
...
}
```| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +There are several ways to use an embedded broker in a Spring Boot application test. + +They include: + +* [JUnit4 Class Rule](#kafka-testing-junit4-class-rule) + +* [`@EmbeddedKafka` Annotation or `EmbeddedKafkaBroker` Bean](#kafka-testing-embeddedkafka-annotation) + +##### JUnit4 Class Rule + +The following example shows how to use a JUnit4 class rule to create an embedded broker: + +``` +@RunWith(SpringRunner.class) +@SpringBootTest +public class MyApplicationTests { + + @ClassRule + public static EmbeddedKafkaRule broker = new EmbeddedKafkaRule(1, + false, "someTopic") + .brokerListProperty("spring.kafka.bootstrap-servers"); + } + + @Autowired + private KafkaTemplate template; + + @Test + public void test() { + ... + } + +} +``` + +Notice that, since this is a Spring Boot application, we override the broker list property to set Boot’s property. + +##### `@EmbeddedKafka` Annotation or `EmbeddedKafkaBroker` Bean + +The following example shows how to use an `@EmbeddedKafka` Annotation to create an embedded broker: + +``` +@RunWith(SpringRunner.class) +@EmbeddedKafka(topics = "someTopic", + bootstrapServersProperty = "spring.kafka.bootstrap-servers") +public class MyApplicationTests { + + @Autowired + private KafkaTemplate template; + + @Test + public void test() { + ... + } + +} +``` + +#### 4.3.8. Hamcrest Matchers + +The `o.s.kafka.test.hamcrest.KafkaMatchers` provides the following matchers: + +``` +/** + * @param key the key + * @param the type. + * @return a Matcher that matches the key in a consumer record. + */ +public static Matcher> hasKey(K key) { ... } + +/** + * @param value the value. + * @param the type. + * @return a Matcher that matches the value in a consumer record. + */ +public static Matcher> hasValue(V value) { ... } + +/** + * @param partition the partition. + * @return a Matcher that matches the partition in a consumer record. + */ +public static Matcher> hasPartition(int partition) { ... } + +/** + * Matcher testing the timestamp of a {@link ConsumerRecord} assuming the topic has been set with + * {@link org.apache.kafka.common.record.TimestampType#CREATE_TIME CreateTime}. + * + * @param ts timestamp of the consumer record. + * @return a Matcher that matches the timestamp in a consumer record. + */ +public static Matcher> hasTimestamp(long ts) { + return hasTimestamp(TimestampType.CREATE_TIME, ts); +} + +/** + * Matcher testing the timestamp of a {@link ConsumerRecord} + * @param type timestamp type of the record + * @param ts timestamp of the consumer record. + * @return a Matcher that matches the timestamp in a consumer record. + */ +public static Matcher> hasTimestamp(TimestampType type, long ts) { + return new ConsumerRecordTimestampMatcher(type, ts); +} +``` + +#### 4.3.9. AssertJ Conditions + +You can use the following AssertJ conditions: + +``` +/** + * @param key the key + * @param the type. + * @return a Condition that matches the key in a consumer record. + */ +public static Condition> key(K key) { ... } + +/** + * @param value the value. + * @param the type. + * @return a Condition that matches the value in a consumer record. + */ +public static Condition> value(V value) { ... } + +/** + * @param key the key. + * @param value the value. + * @param the key type. + * @param the value type. + * @return a Condition that matches the key in a consumer record. + * @since 2.2.12 + */ +public static Condition> keyValue(K key, V value) { ... } + +/** + * @param partition the partition. + * @return a Condition that matches the partition in a consumer record. + */ +public static Condition> partition(int partition) { ... } + +/** + * @param value the timestamp. + * @return a Condition that matches the timestamp value in a consumer record. + */ +public static Condition> timestamp(long value) { + return new ConsumerRecordTimestampCondition(TimestampType.CREATE_TIME, value); +} + +/** + * @param type the type of timestamp + * @param value the timestamp. + * @return a Condition that matches the timestamp value in a consumer record. + */ +public static Condition> timestamp(TimestampType type, long value) { + return new ConsumerRecordTimestampCondition(type, value); +} +``` + +#### 4.3.10. Example + +The following example brings together most of the topics covered in this chapter: + +``` +public class KafkaTemplateTests { + + private static final String TEMPLATE_TOPIC = "templateTopic"; + + @ClassRule + public static EmbeddedKafkaRule embeddedKafka = new EmbeddedKafkaRule(1, true, TEMPLATE_TOPIC); + + @Test + public void testTemplate() throws Exception { + Map consumerProps = KafkaTestUtils.consumerProps("testT", "false", + embeddedKafka.getEmbeddedKafka()); + DefaultKafkaConsumerFactory cf = + new DefaultKafkaConsumerFactory(consumerProps); + ContainerProperties containerProperties = new ContainerProperties(TEMPLATE_TOPIC); + KafkaMessageListenerContainer container = + new KafkaMessageListenerContainer<>(cf, containerProperties); + final BlockingQueue> records = new LinkedBlockingQueue<>(); + container.setupMessageListener(new MessageListener() { + + @Override + public void onMessage(ConsumerRecord record) { + System.out.println(record); + records.add(record); + } + + }); + container.setBeanName("templateTests"); + container.start(); + ContainerTestUtils.waitForAssignment(container, + embeddedKafka.getEmbeddedKafka().getPartitionsPerTopic()); + Map producerProps = + KafkaTestUtils.producerProps(embeddedKafka.getEmbeddedKafka()); + ProducerFactory pf = + new DefaultKafkaProducerFactory(producerProps); + KafkaTemplate template = new KafkaTemplate<>(pf); + template.setDefaultTopic(TEMPLATE_TOPIC); + template.sendDefault("foo"); + assertThat(records.poll(10, TimeUnit.SECONDS), hasValue("foo")); + template.sendDefault(0, 2, "bar"); + ConsumerRecord received = records.poll(10, TimeUnit.SECONDS); + assertThat(received, hasKey(2)); + assertThat(received, hasPartition(0)); + assertThat(received, hasValue("bar")); + template.send(TEMPLATE_TOPIC, 0, 2, "baz"); + received = records.poll(10, TimeUnit.SECONDS); + assertThat(received, hasKey(2)); + assertThat(received, hasPartition(0)); + assertThat(received, hasValue("baz")); + } + +} +``` + +The preceding example uses the Hamcrest matchers. +With `AssertJ`, the final part looks like the following code: + +``` +assertThat(records.poll(10, TimeUnit.SECONDS)).has(value("foo")); +template.sendDefault(0, 2, "bar"); +ConsumerRecord received = records.poll(10, TimeUnit.SECONDS); +// using individual assertions +assertThat(received).has(key(2)); +assertThat(received).has(value("bar")); +assertThat(received).has(partition(0)); +template.send(TEMPLATE_TOPIC, 0, 2, "baz"); +received = records.poll(10, TimeUnit.SECONDS); +// using allOf() +assertThat(received).has(allOf(keyValue(2, "baz"), partition(0))); +``` + +### 4.4. Non-Blocking Retries + +| |This is an experimental feature and the usual rule of no breaking API changes does not apply to this feature until the experimental designation is removed.
Users are encouraged to try out the feature and provide feedback via GitHub Issues or GitHub discussions.
This is regarding the API only; the feature is considered to be complete, and robust.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Achieving non-blocking retry / dlt functionality with Kafka usually requires setting up extra topics and creating and configuring the corresponding listeners. +Since 2.7 Spring for Apache Kafka offers support for that via the `@RetryableTopic` annotation and `RetryTopicConfiguration` class to simplify that bootstrapping. + +#### 4.4.1. How The Pattern Works + +If message processing fails, the message is forwarded to a retry topic with a back off timestamp. +The retry topic consumer then checks the timestamp and if it’s not due it pauses the consumption for that topic’s partition. +When it is due the partition consumption is resumed, and the message is consumed again. +If the message processing fails again the message will be forwarded to the next retry topic, and the pattern is repeated until a successful processing occurs, or the attempts are exhausted, and the message is sent to the Dead Letter Topic (if configured). + +To illustrate, if you have a "main-topic" topic, and want to setup non-blocking retry with an exponential backoff of 1000ms with a multiplier of 2 and 4 max attempts, it will create the main-topic-retry-1000, main-topic-retry-2000, main-topic-retry-4000 and main-topic-dlt topics and configure the respective consumers. +The framework also takes care of creating the topics and setting up and configuring the listeners. + +| |By using this strategy you lose Kafka’s ordering guarantees for that topic.| +|---|---------------------------------------------------------------------------| + +| |You can set the `AckMode` mode you prefer, but `RECORD` is suggested.| +|---|---------------------------------------------------------------------| + +| |At this time this functionality doesn’t support class level `@KafkaListener` annotations| +|---|----------------------------------------------------------------------------------------| + +#### 4.4.2. Back Off Delay Precision + +##### Overview and Guarantees + +All message processing and backing off is handled by the consumer thread, and, as such, delay precision is guaranteed on a best-effort basis. +If one message’s processing takes longer than the next message’s back off period for that consumer, the next message’s delay will be higher than expected. +Also, for short delays (about 1s or less), the maintenance work the thread has to do, such as committing offsets, may delay the message processing execution. +The precision can also be affected if the retry topic’s consumer is handling more than one partition, because we rely on waking up the consumer from polling and having full pollTimeouts to make timing adjustments. + +That being said, for consumers handling a single partition the message’s processing should happen under 100ms after it’s exact due time for most situations. + +| |It is guaranteed that a message will never be processed before its due time.| +|---|----------------------------------------------------------------------------| + +##### Tuning the Delay Precision + +The message’s processing delay precision relies on two `ContainerProperties`: `ContainerProperties.pollTimeout` and `ContainerProperties.idlePartitionEventInterval`. +Both properties will be automatically set in the retry topic and dlt’s `ListenerContainerFactory` to one quarter of the smallest delay value for that topic, with a minimum value of 250ms and a maximum value of 5000ms. +These values will only be set if the property has its default values - if you change either value yourself your change will not be overridden. +This way you can tune the precision and performance for the retry topics if you need to. + +| |You can have separate `ListenerContainerFactory` instances for the main and retry topics - this way you can have different settings to better suit your needs, such as having a higher polling timeout setting for the main topics and a lower one for the retry topics.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 4.4.3. Configuration + +##### Using the `@RetryableTopic` annotation + +To configure the retry topic and dlt for a `@KafkaListener` annotated method, you just have to add the `@RetryableTopic` annotation to it and Spring for Apache Kafka will bootstrap all the necessary topics and consumers with the default configurations. + +``` +@RetryableTopic(kafkaTemplate = "myRetryableTopicKafkaTemplate") +@KafkaListener(topics = "my-annotated-topic", groupId = "myGroupId") +public void processMessage(MyPojo message) { + // ... message processing +} +``` + +You can specify a method in the same class to process the dlt messages by annotating it with the `@DltHandler` annotation. +If no DltHandler method is provided a default consumer is created which only logs the consumption. + +``` +@DltHandler +public void processMessage(MyPojo message) { +// ... message processing, persistence, etc +} +``` + +| |If you don’t specify a kafkaTemplate name a bean with name `retryTopicDefaultKafkaTemplate` will be looked up.
If no bean is found an exception is thrown.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Using `RetryTopicConfiguration` beans + +You can also configure the non-blocking retry support by creating `RetryTopicConfiguration` beans in a `@Configuration` annotated class. + +``` +@Bean +public RetryTopicConfiguration myRetryTopic(KafkaTemplate template) { + return RetryTopicConfigurationBuilder + .newInstance() + .create(template); +} +``` + +This will create retry topics and a dlt, as well as the corresponding consumers, for all topics in methods annotated with '@KafkaListener' using the default configurations. The `KafkaTemplate` instance is required for message forwarding. + +To achieve more fine-grained control over how to handle non-blocking retrials for each topic, more than one `RetryTopicConfiguration` bean can be provided. + +``` +@Bean +public RetryTopicConfiguration myRetryTopic(KafkaTemplate template) { + return RetryTopicConfigurationBuilder + .newInstance() + .fixedBackoff(3000) + .maxAttempts(5) + .includeTopics("my-topic", "my-other-topic") + .create(template); +} + +@Bean +public RetryTopicConfiguration myOtherRetryTopic(KafkaTemplate template) { + return RetryTopicConfigurationBuilder + .newInstance() + .exponentialBackoff(1000, 2, 5000) + .maxAttempts(4) + .excludeTopics("my-topic", "my-other-topic") + .retryOn(MyException.class) + .create(template); +} +``` + +| |The retry topics' and dlt’s consumers will be assigned to a consumer group with a group id that is the combination of the one with you provide in the `groupId` parameter of the `@KafkaListener` annotation with the topic’s suffix. If you don’t provide any they’ll all belong to the same group, and rebalance on a retry topic will cause an unnecessary rebalance on the main topic.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |If the consumer is configured with an [`ErrorHandlingDeserializer`](#error-handling-deserializer), to handle deserilialization exceptions, it is important to configure the `KafkaTemplate` and its producer with a serializer that can handle normal objects as well as raw `byte[]` values, which result from deserialization exceptions.
The generic value type of the template should be `Object`.
One technique is to use the `DelegatingByTypeSerializer`; an example follows:| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +``` +@Bean +public ProducerFactory producerFactory() { + return new DefaultKafkaProducerFactory<>(producerConfiguration(), new StringSerializer(), + new DelegatingByTypeSerializer(Map.of(byte[].class, new ByteArraySerializer(), + MyNormalObject.class, new JsonSerializer()))); +} + +@Bean +public KafkaTemplate kafkaTemplate() { + return new KafkaTemplate<>(producerFactory()); +} +``` + +#### 4.4.4. Features + +Most of the features are available both for the `@RetryableTopic` annotation and the `RetryTopicConfiguration` beans. + +##### BackOff Configuration + +The BackOff configuration relies on the `BackOffPolicy` interface from the `Spring Retry` project. + +It includes: + +* Fixed Back Off + +* Exponential Back Off + +* Random Exponential Back Off + +* Uniform Random Back Off + +* No Back Off + +* Custom Back Off + +``` +@RetryableTopic(attempts = 5, + backoff = @Backoff(delay = 1000, multiplier = 2, maxDelay = 5000)) +@KafkaListener(topics = "my-annotated-topic") +public void processMessage(MyPojo message) { + // ... message processing +} +``` + +``` +@Bean +public RetryTopicConfiguration myRetryTopic(KafkaTemplate template) { + return RetryTopicConfigurationBuilder + .newInstance() + .fixedBackoff(3000) + .maxAttempts(4) + .build(); +} +``` + +You can also provide a custom implementation of Spring Retry’s `SleepingBackOffPolicy`: + +``` +@Bean +public RetryTopicConfiguration myRetryTopic(KafkaTemplate template) { + return RetryTopicConfigurationBuilder + .newInstance() + .customBackOff(new MyCustomBackOffPolicy()) + .maxAttempts(5) + .build(); +} +``` + +| |The default backoff policy is FixedBackOffPolicy with a maximum of 3 attempts and 1000ms intervals.| +|---|---------------------------------------------------------------------------------------------------| + +| |The first attempt counts against the maxAttempts, so if you provide a maxAttempts value of 4 there’ll be the original attempt plus 3 retries.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------| + +##### Single Topic Fixed Delay Retries + +If you’re using fixed delay policies such as `FixedBackOffPolicy` or `NoBackOffPolicy` you can use a single topic to accomplish the non-blocking retries. +This topic will be suffixed with the provided or default suffix, and will not have either the index or the delay values appended. + +``` +@RetryableTopic(backoff = @Backoff(2000), fixedDelayTopicStrategy = FixedDelayStrategy.SINGLE_TOPIC) +@KafkaListener(topics = "my-annotated-topic") +public void processMessage(MyPojo message) { + // ... message processing +} +``` + +``` +@Bean +public RetryTopicConfiguration myRetryTopic(KafkaTemplate template) { + return RetryTopicConfigurationBuilder + .newInstance() + .fixedBackoff(3000) + .maxAttempts(5) + .useSingleTopicForFixedDelays() + .build(); +} +``` + +| |The default behavior is creating separate retry topics for each attempt, appended with their index value: retry-0, retry-1, …​| +|---|------------------------------------------------------------------------------------------------------------------------------| + +##### Global timeout + +You can set the global timeout for the retrying process. +If that time is reached, the next time the consumer throws an exception the message goes straight to the DLT, or just ends the processing if no DLT is available. + +``` +@RetryableTopic(backoff = @Backoff(2000), timeout = 5000) +@KafkaListener(topics = "my-annotated-topic") +public void processMessage(MyPojo message) { + // ... message processing +} +``` + +``` +@Bean +public RetryTopicConfiguration myRetryTopic(KafkaTemplate template) { + return RetryTopicConfigurationBuilder + .newInstance() + .fixedBackoff(2000) + .timeoutAfter(5000) + .build(); +} +``` + +| |The default is having no timeout set, which can also be achieved by providing -1 as the timout value.| +|---|-----------------------------------------------------------------------------------------------------| + +##### Exception Classifier + +You can specify which exceptions you want to retry on and which not to. +You can also set it to traverse the causes to lookup nested exceptions. + +``` +@RetryableTopic(include = {MyRetryException.class, MyOtherRetryException.class}, traversingCauses = true) +@KafkaListener(topics = "my-annotated-topic") +public void processMessage(MyPojo message) { + throw new RuntimeException(new MyRetryException()); // Will retry +} +``` + +``` +@Bean +public RetryTopicConfiguration myRetryTopic(KafkaTemplate template) { + return RetryTopicConfigurationBuilder + .newInstance() + .notRetryOn(MyDontRetryException.class) + .create(template); +} +``` + +| |The default behavior is retrying on all exceptions and not traversing causes.| +|---|-----------------------------------------------------------------------------| + +Since 2.8.3 there’s a global list of fatal exceptions which will cause the record to be sent to the DLT without any retries. +See [DefaultErrorHandler](#default-eh) for the default list of fatal exceptions. +You can add or remove exceptions to and from this list with: + +``` +@Bean(name = RetryTopicInternalBeanNames.DESTINATION_TOPIC_CONTAINER_NAME) +public DefaultDestinationTopicResolver topicResolver(ApplicationContext applicationContext, + @Qualifier(RetryTopicInternalBeanNames + .INTERNAL_BACKOFF_CLOCK_BEAN_NAME) Clock clock) { + DefaultDestinationTopicResolver ddtr = new DefaultDestinationTopicResolver(clock, applicationContext); + ddtr.addNotRetryableExceptions(MyFatalException.class); + ddtr.removeNotRetryableException(ConversionException.class); + return ddtr; +} +``` + +| |To disable fatal exceptions' classification, clear the default list using the `setClassifications` method in `DefaultDestinationTopicResolver`.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------| + +##### Include and Exclude Topics + +You can decide which topics will and will not be handled by a `RetryTopicConfiguration` bean via the .includeTopic(String topic), .includeTopics(Collection\ topics) .excludeTopic(String topic) and .excludeTopics(Collection\ topics) methods. + +``` +@Bean +public RetryTopicConfiguration myRetryTopic(KafkaTemplate template) { + return RetryTopicConfigurationBuilder + .newInstance() + .includeTopics(List.of("my-included-topic", "my-other-included-topic")) + .create(template); +} + +@Bean +public RetryTopicConfiguration myOtherRetryTopic(KafkaTemplate template) { + return RetryTopicConfigurationBuilder + .newInstance() + .excludeTopic("my-excluded-topic") + .create(template); +} +``` + +| |The default behavior is to include all topics.| +|---|----------------------------------------------| + +##### Topics AutoCreation + +Unless otherwise specified the framework will auto create the required topics using `NewTopic` beans that are consumed by the `KafkaAdmin` bean. +You can specify the number of partitions and the replication factor with which the topics will be created, and you can turn this feature off. + +| |Note that if you’re not using Spring Boot you’ll have to provide a KafkaAdmin bean in order to use this feature.| +|---|----------------------------------------------------------------------------------------------------------------| + +``` +@RetryableTopic(numPartitions = 2, replicationFactor = 3) +@KafkaListener(topics = "my-annotated-topic") +public void processMessage(MyPojo message) { + // ... message processing +} + +@RetryableTopic(autoCreateTopics = false) +@KafkaListener(topics = "my-annotated-topic") +public void processMessage(MyPojo message) { + // ... message processing +} +``` + +``` +@Bean +public RetryTopicConfiguration myRetryTopic(KafkaTemplate template) { + return RetryTopicConfigurationBuilder + .newInstance() + .autoCreateTopicsWith(2, 3) + .create(template); +} + +@Bean +public RetryTopicConfiguration myOtherRetryTopic(KafkaTemplate template) { + return RetryTopicConfigurationBuilder + .newInstance() + .doNotAutoCreateRetryTopics() + .create(template); +} +``` + +| |By default the topics are autocreated with one partition and a replication factor of one.| +|---|-----------------------------------------------------------------------------------------| + +##### Failure Header Management + +When considering how to manage failure headers (original headers and exception headers), the framework delegates to the `DeadLetterPublishingRecover` to decide whether to append or replace the headers. + +By default, it explicitly sets `appendOriginalHeaders` to `false` and leaves `stripPreviousExceptionHeaders` to the default used by the `DeadLetterPublishingRecover`. + +This means that only the first "original" and last exception headers are retained with the default configuration. +This is to avoid creation of excessively large messages (due to the stack trace header, for example) when many retry steps are involved. + +See [Managing Dead Letter Record Headers](#dlpr-headers) for more information. + +To reconfigure the framework to use different settings for these properties, replace the standard `DeadLetterPublishingRecovererFactory` bean by adding a `recovererCustomizer`: + +``` +@Bean(RetryTopicInternalBeanNames.DEAD_LETTER_PUBLISHING_RECOVERER_FACTORY_BEAN_NAME) +DeadLetterPublishingRecovererFactory factory(DestinationTopicResolver resolver) { + DeadLetterPublishingRecovererFactory factory = new DeadLetterPublishingRecovererFactory(resolver); + factory.setDeadLetterPublishingRecovererCustomizer(dlpr -> { + dlpr.appendOriginalHeaders(true); + dlpr.setStripPreviousExceptionHeaders(false); + }); + return factory; +} +``` + +#### 4.4.5. Topic Naming + +Retry topics and DLT are named by suffixing the main topic with a provided or default value, appended by either the delay or index for that topic. + +Examples: + +"my-topic" → "my-topic-retry-0", "my-topic-retry-1", …​, "my-topic-dlt" + +"my-other-topic" → "my-topic-myRetrySuffix-1000", "my-topic-myRetrySuffix-2000", …​, "my-topic-myDltSuffix". + +##### Retry Topics and Dlt Suffixes + +You can specify the suffixes that will be used by the retry and dlt topics. + +``` +@RetryableTopic(retryTopicSuffix = "-my-retry-suffix", dltTopicSuffix = "-my-dlt-suffix") +@KafkaListener(topics = "my-annotated-topic") +public void processMessage(MyPojo message) { + // ... message processing +} +``` + +``` +@Bean +public RetryTopicConfiguration myRetryTopic(KafkaTemplate template) { + return RetryTopicConfigurationBuilder + .newInstance() + .retryTopicSuffix("-my-retry-suffix") + .dltTopicSuffix("-my-dlt-suffix") + .create(template); +} +``` + +| |The default suffixes are "-retry" and "-dlt", for retry topics and dlt respectively.| +|---|------------------------------------------------------------------------------------| + +##### Appending the Topic’s Index or Delay + +You can either append the topic’s index or delay values after the suffix. + +``` +@RetryableTopic(topicSuffixingStrategy = TopicSuffixingStrategy.SUFFIX_WITH_INDEX_VALUE) +@KafkaListener(topics = "my-annotated-topic") +public void processMessage(MyPojo message) { + // ... message processing +} +``` + +``` +@Bean +public RetryTopicConfiguration myRetryTopic(KafkaTemplate template) { + return RetryTopicConfigurationBuilder + .newInstance() + .suffixTopicsWithIndexValues() + .create(template); + } +``` + +| |The default behavior is to suffix with the delay values, except for fixed delay configurations with multiple topics, in which case the topics are suffixed with the topic’s index.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Custom naming strategies + +More complex naming strategies can be accomplished by registering a bean that implements `RetryTopicNamesProviderFactory`. The default implementation is `SuffixingRetryTopicNamesProviderFactory` and a different implementation can be registered in the following way: + +``` +@Bean +public RetryTopicNamesProviderFactory myRetryNamingProviderFactory() { + return new CustomRetryTopicNamesProviderFactory(); +} +``` + +As an example the following implementation, in addition to the standard suffix, adds a prefix to retry/dl topics names: + +``` +public class CustomRetryTopicNamesProviderFactory implements RetryTopicNamesProviderFactory { + + @Override + public RetryTopicNamesProvider createRetryTopicNamesProvider( + DestinationTopic.Properties properties) { + + if(properties.isMainEndpoint()) { + return new SuffixingRetryTopicNamesProvider(properties); + } + else { + return new SuffixingRetryTopicNamesProvider(properties) { + + @Override + public String getTopicName(String topic) { + return "my-prefix-" + super.getTopicName(topic); + } + + }; + } + } + +} +``` + +#### 4.4.6. Dlt Strategies + +The framework provides a few strategies for working with DLTs. You can provide a method for DLT processing, use the default logging method, or have no DLT at all. Also you can choose what happens if DLT processing fails. + +##### Dlt Processing Method + +You can specify the method used to process the Dlt for the topic, as well as the behavior if that processing fails. + +To do that you can use the `@DltHandler` annotation in a method of the class with the `@RetryableTopic` annotation(s). +Note that the same method will be used for all the `@RetryableTopic` annotated methods within that class. + +``` +@RetryableTopic +@KafkaListener(topics = "my-annotated-topic") +public void processMessage(MyPojo message) { + // ... message processing +} + +@DltHandler +public void processMessage(MyPojo message) { +// ... message processing, persistence, etc +} +``` + +The DLT handler method can also be provided through the RetryTopicConfigurationBuilder.dltHandlerMethod(String, String) method, passing as arguments the bean name and method name that should process the DLT’s messages. + +``` +@Bean +public RetryTopicConfiguration myRetryTopic(KafkaTemplate template) { + return RetryTopicConfigurationBuilder + .newInstance() + .dltProcessor("myCustomDltProcessor", "processDltMessage") + .create(template); +} + +@Component +public class MyCustomDltProcessor { + + private final MyDependency myDependency; + + public MyCustomDltProcessor(MyDependency myDependency) { + this.myDependency = myDependency; + } + + public void processDltMessage(MyPojo message) { + // ... message processing, persistence, etc + } +} +``` + +| |If no DLT handler is provided, the default RetryTopicConfigurer.LoggingDltListenerHandlerMethod is used.| +|---|--------------------------------------------------------------------------------------------------------| + +Starting with version 2.8, if you don’t want to consume from the DLT in this application at all, including by the default handler (or you wish to defer consumption), you can control whether or not the DLT container starts, independent of the container factory’s `autoStartup` property. + +When using the `@RetryableTopic` annotation, set the `autoStartDltHandler` property to `false`; when using the configuration builder, use `.autoStartDltHandler(false)` . + +You can later start the DLT handler via the `KafkaListenerEndpointRegistry`. + +##### DLT Failure Behavior + +Should the DLT processing fail, there are two possible behaviors available: `ALWAYS_RETRY_ON_ERROR` and `FAIL_ON_ERROR`. + +In the former the record is forwarded back to the DLT topic so it doesn’t block other DLT records' processing. +In the latter the consumer ends the execution without forwarding the message. + +``` +@RetryableTopic(dltProcessingFailureStrategy = + DltStrategy.FAIL_ON_ERROR) +@KafkaListener(topics = "my-annotated-topic") +public void processMessage(MyPojo message) { + // ... message processing +} +``` + +``` +@Bean +public RetryTopicConfiguration myRetryTopic(KafkaTemplate template) { + return RetryTopicConfigurationBuilder + .newInstance() + .dltProcessor(MyCustomDltProcessor.class, "processDltMessage") + .doNotRetryOnDltFailure() + .create(template); +} +``` + +| |The default behavior is to `ALWAYS_RETRY_ON_ERROR`.| +|---|---------------------------------------------------| + +| |Starting with version 2.8.3, `ALWAYS_RETRY_ON_ERROR` will NOT route a record back to the DLT if the record causes a fatal exception to be thrown,
such as a `DeserializationException` because, generally, such exceptions will always be thrown.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Exceptions that are considered fatal are: + +* `DeserializationException` + +* `MessageConversionException` + +* `ConversionException` + +* `MethodArgumentResolutionException` + +* `NoSuchMethodException` + +* `ClassCastException` + +You can add exceptions to and remove exceptions from this list using methods on the `DestinationTopicResolver` bean. + +See [Exception Classifier](#retry-topic-ex-classifier) for more information. + +##### Configuring No DLT + +The framework also provides the possibility of not configuring a DLT for the topic. +In this case after retrials are exhausted the processing simply ends. + +``` +@RetryableTopic(dltProcessingFailureStrategy = + DltStrategy.NO_DLT) +@KafkaListener(topics = "my-annotated-topic") +public void processMessage(MyPojo message) { + // ... message processing +} +``` + +``` +@Bean +public RetryTopicConfiguration myRetryTopic(KafkaTemplate template) { + return RetryTopicConfigurationBuilder + .newInstance() + .doNotConfigureDlt() + .create(template); +} +``` + +#### 4.4.7. Specifying a ListenerContainerFactory + +By default the RetryTopic configuration will use the provided factory from the `@KafkaListener` annotation, but you can specify a different one to be used to create the retry topic and dlt listener containers. + +For the `@RetryableTopic` annotation you can provide the factory’s bean name, and using the `RetryTopicConfiguration` bean you can either provide the bean name or the instance itself. + +``` +@RetryableTopic(listenerContainerFactory = "my-retry-topic-factory") +@KafkaListener(topics = "my-annotated-topic") +public void processMessage(MyPojo message) { + // ... message processing +} +``` + +``` +@Bean +public RetryTopicConfiguration myRetryTopic(KafkaTemplate template, + ConcurrentKafkaListenerContainerFactory factory) { + + return RetryTopicConfigurationBuilder + .newInstance() + .listenerFactory(factory) + .create(template); +} + +@Bean +public RetryTopicConfiguration myOtherRetryTopic(KafkaTemplate template) { + return RetryTopicConfigurationBuilder + .newInstance() + .listenerFactory("my-retry-topic-factory") + .create(template); +} +``` + +| |Since 2.8.3 you can use the same factory for retryable and non-retryable topics.| +|---|--------------------------------------------------------------------------------| + +If you need to revert the factory configuration behavior to prior 2.8.3, you can replace the standard `RetryTopicConfigurer` bean and set `useLegacyFactoryConfigurer` to `true`, such as: + +``` +@Bean(name = RetryTopicInternalBeanNames.RETRY_TOPIC_CONFIGURER) +public RetryTopicConfigurer retryTopicConfigurer(DestinationTopicProcessor destinationTopicProcessor, + ListenerContainerFactoryResolver containerFactoryResolver, + ListenerContainerFactoryConfigurer listenerContainerFactoryConfigurer, + BeanFactory beanFactory, + RetryTopicNamesProviderFactory retryTopicNamesProviderFactory) { + RetryTopicConfigurer retryTopicConfigurer = new RetryTopicConfigurer(destinationTopicProcessor, containerFactoryResolver, listenerContainerFactoryConfigurer, beanFactory, retryTopicNamesProviderFactory); + retryTopicConfigurer.useLegacyFactoryConfigurer(true); + return retryTopicConfigurer; +} +``` + +\==== Changing KafkaBackOffException Logging Level + +When a message in the retry topic is not due for consumption, a `KafkaBackOffException` is thrown. Such exceptions are logged by default at `DEBUG` level, but you can change this behavior by setting an error handler customizer in the `ListenerContainerFactoryConfigurer` in a `@Configuration` class. + +For example, to change the logging level to WARN you might add: + +``` +@Bean(name = RetryTopicInternalBeanNames.LISTENER_CONTAINER_FACTORY_CONFIGURER_NAME) +public ListenerContainerFactoryConfigurer listenerContainer(KafkaConsumerBackoffManager kafkaConsumerBackoffManager, + DeadLetterPublishingRecovererFactory deadLetterPublishingRecovererFactory, + @Qualifier(RetryTopicInternalBeanNames + .INTERNAL_BACKOFF_CLOCK_BEAN_NAME) Clock clock) { + ListenerContainerFactoryConfigurer configurer = new ListenerContainerFactoryConfigurer(kafkaConsumerBackoffManager, deadLetterPublishingRecovererFactory, clock); + configurer.setErrorHandlerCustomizer(commonErrorHandler -> ((DefaultErrorHandler) commonErrorHandler).setLogLevel(KafkaException.Level.WARN)); + return configurer; +} +``` + +\== Tips, Tricks and Examples + +\=== Manually Assigning All Partitions + +Let’s say you want to always read all records from all partitions (such as when using a compacted topic to load a distributed cache), it can be useful to manually assign the partitions and not use Kafka’s group management. +Doing so can be unwieldy when there are many partitions, because you have to list the partitions. +It’s also an issue if the number of partitions changes over time, because you would have to recompile your application each time the partition count changes. + +The following is an example of how to use the power of a SpEL expression to create the partition list dynamically when the application starts: + +``` +@KafkaListener(topicPartitions = @TopicPartition(topic = "compacted", + partitions = "#{@finder.partitions('compacted')}"), + partitionOffsets = @PartitionOffset(partition = "*", initialOffset = "0"))) +public void listen(@Header(KafkaHeaders.RECEIVED_MESSAGE_KEY) String key, String payload) { + ... +} + +@Bean +public PartitionFinder finder(ConsumerFactory consumerFactory) { + return new PartitionFinder(consumerFactory); +} + +public static class PartitionFinder { + + private final ConsumerFactory consumerFactory; + + public PartitionFinder(ConsumerFactory consumerFactory) { + this.consumerFactory = consumerFactory; + } + + public String[] partitions(String topic) { + try (Consumer consumer = consumerFactory.createConsumer()) { + return consumer.partitionsFor(topic).stream() + .map(pi -> "" + pi.partition()) + .toArray(String[]::new); + } + } + +} +``` + +Using this in conjunction with `ConsumerConfig.AUTO_OFFSET_RESET_CONFIG=earliest` will load all records each time the application is started. +You should also set the container’s `AckMode` to `MANUAL` to prevent the container from committing offsets for a `null` consumer group. +Howewever, starting with version 2.5.5, as shown above, you can apply an initial offset to all partitions; see [Explicit Partition Assignment](#manual-assignment) for more information. + +\=== Examples of Kafka Transactions with Other Transaction Managers + +The following Spring Boot application is an example of chaining database and Kafka transactions. +The listener container starts the Kafka transaction and the `@Transactional` annotation starts the DB transaction. +The DB transaction is committed first; if the Kafka transaction fails to commit, the record will be redelivered so the DB update should be idempotent. + +``` +@SpringBootApplication +public class Application { + + public static void main(String[] args) { + SpringApplication.run(Application.class, args); + } + + @Bean + public ApplicationRunner runner(KafkaTemplate template) { + return args -> template.executeInTransaction(t -> t.send("topic1", "test")); + } + + @Bean + public DataSourceTransactionManager dstm(DataSource dataSource) { + return new DataSourceTransactionManager(dataSource); + } + + @Component + public static class Listener { + + private final JdbcTemplate jdbcTemplate; + + private final KafkaTemplate kafkaTemplate; + + public Listener(JdbcTemplate jdbcTemplate, KafkaTemplate kafkaTemplate) { + this.jdbcTemplate = jdbcTemplate; + this.kafkaTemplate = kafkaTemplate; + } + + @KafkaListener(id = "group1", topics = "topic1") + @Transactional("dstm") + public void listen1(String in) { + this.kafkaTemplate.send("topic2", in.toUpperCase()); + this.jdbcTemplate.execute("insert into mytable (data) values ('" + in + "')"); + } + + @KafkaListener(id = "group2", topics = "topic2") + public void listen2(String in) { + System.out.println(in); + } + + } + + @Bean + public NewTopic topic1() { + return TopicBuilder.name("topic1").build(); + } + + @Bean + public NewTopic topic2() { + return TopicBuilder.name("topic2").build(); + } + +} +``` + +``` +spring.datasource.url=jdbc:mysql://localhost/integration?serverTimezone=UTC +spring.datasource.username=root +spring.datasource.driver-class-name=com.mysql.cj.jdbc.Driver + +spring.kafka.consumer.auto-offset-reset=earliest +spring.kafka.consumer.enable-auto-commit=false +spring.kafka.consumer.properties.isolation.level=read_committed + +spring.kafka.producer.transaction-id-prefix=tx- + +#logging.level.org.springframework.transaction=trace +#logging.level.org.springframework.kafka.transaction=debug +#logging.level.org.springframework.jdbc=debug +``` + +``` +create table mytable (data varchar(20)); +``` + +For producer-only transactions, transaction synchronization works: + +``` +@Transactional("dstm") +public void someMethod(String in) { + this.kafkaTemplate.send("topic2", in.toUpperCase()); + this.jdbcTemplate.execute("insert into mytable (data) values ('" + in + "')"); +} +``` + +The `KafkaTemplate` will synchronize its transaction with the DB transaction and the commit/rollback occurs after the database. + +If you wish to commit the Kafka transaction first, and only commit the DB transaction if the Kafka transaction is successful, use nested `@Transactional` methods: + +``` +@Transactional("dstm") +public void someMethod(String in) { + this.jdbcTemplate.execute("insert into mytable (data) values ('" + in + "')"); + sendToKafka(in); +} + +@Transactional("kafkaTransactionManager") +public void sendToKafka(String in) { + this.kafkaTemplate.send("topic2", in.toUpperCase()); +} +``` + +\=== Customizing the JsonSerializer and JsonDeserializer + +The serializer and deserializer support a number of cusomizations using properties, see [JSON](#json-serde) for more information. +The `kafka-clients` code, not Spring, instantiates these objects, unless you inject them directly into the consumer and producer factories. +If you wish to configure the (de)serializer using properties, but wish to use, say, a custom `ObjectMapper`, simply create a subclass and pass the custom mapper into the `super` constructor. For example: + +``` +public class CustomJsonSerializer extends JsonSerializer { + + public CustomJsonSerializer() { + super(customizedObjectMapper()); + } + + private static ObjectMapper customizedObjectMapper() { + ObjectMapper mapper = JacksonUtils.enhancedObjectMapper(); + mapper.disable(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS); + return mapper; + } + +} +``` + +\== Other Resources + +In addition to this reference documentation, we recommend a number of other resources that may help you learn about Spring and Apache Kafka. + +* [Apache Kafka Project Home Page](https://kafka.apache.org/) + +* [Spring for Apache Kafka Home Page](https://projects.spring.io/spring-kafka/) + +* [Spring for Apache Kafka GitHub Repository](https://github.com/spring-projects/spring-kafka) + +* [Spring Integration GitHub Repository (Apache Kafka Module)](https://github.com/spring-projects/spring-integration) + +\== Override Spring Boot Dependencies + +When using Spring for Apache Kafka in a Spring Boot application, the Apache Kafka dependency versions are determined by Spring Boot’s dependency management. +If you wish to use a different version of `kafka-clients` or `kafka-streams`, and use the embedded kafka broker for testing, you need to override their version used by Spring Boot dependency management and add two `test` artifacts for Apache Kafka. + +| |There is a bug in Apache Kafka 3.0.0 when running the embedded broker on Microsoft Windows [KAFKA-13391](https://issues.apache.org/jira/browse/KAFKA-13391).
To use the embedded broker on Windows, you need to downgrade the Apache Kafka version to 2.8.1 until 3.0.1 is available.
When using 2.8.1, you also need to exclude `zookeeper` dependency from `spring-kafka-test`.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Maven + +``` + + 2.8.1 + + + + org.springframework.kafka + spring-kafka + + + + org.apache.kafka + kafka-streams + + + + org.springframework.kafka + spring-kafka-test + test + + + + org.apache.zookeeper + zookeeper + + + + + + org.apache.kafka + kafka-clients + test + test + ${kafka.version} + + + + org.apache.kafka + kafka_2.13 + test + test + ${kafka.version} + +``` + +Gradle + +``` +ext['kafka.version'] = '2.8.1' + +dependencies { + implementation 'org.springframework.kafka:spring-kafka' + implementation "org.apache.kafka:kafka-streams" // optional - only needed when using kafka-streams + testImplementation ('org.springframework.kafka:spring-kafka-test') { + // needed if downgrading to Apache Kafka 2.8.1 + exclude group: 'org.apache.zookeeper', module: 'zookeeper' + } + testImplementation "org.apache.kafka:kafka-clients:${kafka.version}:test" + testImplementation "org.apache.kafka:kafka_2.13:${kafka.version}:test" +} +``` + +The test scope dependencies are only needed if you are using the embedded Kafka broker in tests. + +\== Change History + +\=== Changes between 2.6 and 2.7 + +\==== Kafka Client Version + +This version requires the 2.7.0 `kafka-clients`. +It is also compatible with the 2.8.0 clients, since version 2.7.1; see [[update-deps]](#update-deps). + +\==== Non-Blocking Delayed Retries Using Topics + +This significant new feature is added in this release. +When strict ordering is not important, failed deliveries can be sent to another topic to be consumed later. +A series of such retry topics can be configured, with increasing delays. +See [Non-Blocking Retries](#retry-topic) for more information. + +\==== Listener Container Changes + +The `onlyLogRecordMetadata` container property is now `true` by default. + +A new container property `stopImmediate` is now available. + +See [Listener Container Properties](#container-props) for more information. + +Error handlers that use a `BackOff` between delivery attempts (e.g. `SeekToCurrentErrorHandler` and `DefaultAfterRollbackProcessor`) will now exit the back off interval soon after the container is stopped, rather than delaying the stop. +See [After-rollback Processor](#after-rollback) and [[seek-to-current]](#seek-to-current) for more information. + +Error handlers and after rollback processors that extend `FailedRecordProcessor` can now be configured with one or more `RetryListener` s to receive information about retry and recovery progress. + +See See [After-rollback Processor](#after-rollback), [[seek-to-current]](#seek-to-current), and [[recovering-batch-eh]](#recovering-batch-eh) for more information. + +The `RecordInterceptor` now has additional methods called after the listener returns (normally, or by throwing an exception). +It also has a sub-interface `ConsumerAwareRecordInterceptor`. +In addition, there is now a `BatchInterceptor` for batch listeners. +See [Message Listener Containers](#message-listener-container) for more information. + +\==== `@KafkaListener` Changes + +You can now validate the payload parameter of `@KafkaHandler` methods (class-level listeners). +See [`@KafkaListener` `@Payload` Validation](#kafka-validation) for more information. + +You can now set the `rawRecordHeader` property on the `MessagingMessageConverter` and `BatchMessagingMessageConverter` which causes the raw `ConsumerRecord` to be added to the converted `Message`. +This is useful, for example, if you wish to use a `DeadLetterPublishingRecoverer` in a listener error handler. +See [Listener Error Handlers](#listener-error-handlers) for more information. + +You can now modify `@KafkaListener` annotations during application initialization. +See [`@KafkaListener` Attribute Modification](#kafkalistener-attrs) for more information. + +\==== `DeadLetterPublishingRecover` Changes + +Now, if both the key and value fail deserialization, the original values are published to the DLT. +Previously, the value was populated but the key `DeserializationException` remained in the headers. +There is a breaking API change, if you subclassed the recoverer and overrode the `createProducerRecord` method. + +In addition, the recoverer verifies that the partition selected by the destination resolver actually exists before publishing to it. + +See [Publishing Dead-letter Records](#dead-letters) for more information. + +\==== `ChainedKafkaTransactionManager` is Deprecated + +See [Transactions](#transactions) for more information. + +\==== `ReplyingKafkaTemplate` Changes + +There is now a mechanism to examine a reply and fail the future exceptionally if some condition exists. + +Support for sending and receiving `spring-messaging` `Message` s has been added. + +See [Using `ReplyingKafkaTemplate`](#replying-template) for more information. + +\==== Kafka Streams Changes + +By default, the `StreamsBuilderFactoryBean` is now configured to not clean up local state. +See [Configuration](#streams-config) for more information. + +\==== `KafkaAdmin` Changes + +New methods `createOrModifyTopics` and `describeTopics` have been added.`KafkaAdmin.NewTopics` has been added to facilitate configuring multiple topics in a single bean. +See [Configuring Topics](#configuring-topics) for more information. + +\==== `MessageConverter` Changes + +It is now possible to add a `spring-messaging` `SmartMessageConverter` to the `MessagingMessageConverter`, allowing content negotiation based on the `contentType` header. +See [Spring Messaging Message Conversion](#messaging-message-conversion) for more information. + +\==== Sequencing `@KafkaListener` s + +See [Starting `@KafkaListener` s in Sequence](#sequencing) for more information. + +\==== `ExponentialBackOffWithMaxRetries` + +A new `BackOff` implementation is provided, making it more convenient to configure the max retries. +See [`ExponentialBackOffWithMaxRetries` Implementation](#exp-backoff) for more information. + +\==== Conditional Delegating Error Handlers + +These new error handlers can be configured to delegate to different error handlers, depending on the exception type. +See [Delegating Error Handler](#cond-eh) for more information. + +\=== Changes between 2.5 and 2.6 + +\==== Kafka Client Version + +This version requires the 2.6.0 `kafka-clients`. + +\==== Listener Container Changes + +The default `EOSMode` is now `BETA`. +See [Exactly Once Semantics](#exactly-once) for more information. + +Various error handlers (that extend `FailedRecordProcessor`) and the `DefaultAfterRollbackProcessor` now reset the `BackOff` if recovery fails. +In addition, you can now select the `BackOff` to use based on the failed record and/or exception. +See [[seek-to-current]](#seek-to-current), [[recovering-batch-eh]](#recovering-batch-eh), [Publishing Dead-letter Records](#dead-letters) and [After-rollback Processor](#after-rollback) for more information. + +You can now configure an `adviceChain` in the container properties. +See [Listener Container Properties](#container-props) for more information. + +When the container is configured to publish `ListenerContainerIdleEvent` s, it now publishes a `ListenerContainerNoLongerIdleEvent` when a record is received after publishing an idle event. +See [Application Events](#events) and [Detecting Idle and Non-Responsive Consumers](#idle-containers) for more information. + +\==== @KafkaListener Changes + +When using manual partition assignment, you can now specify a wildcard for determining which partitions should be reset to the initial offset. +In addition, if the listener implements `ConsumerSeekAware`, `onPartitionsAssigned()` is called after the manual assignment. +(Also added in version 2.5.5). +See [Explicit Partition Assignment](#manual-assignment) for more information. + +Convenience methods have been added to `AbstractConsumerSeekAware` to make seeking easier. +See [Seeking to a Specific Offset](#seek) for more information. + +\==== ErrorHandler Changes + +Subclasses of `FailedRecordProcessor` (e.g. `SeekToCurrentErrorHandler`, `DefaultAfterRollbackProcessor`, `RecoveringBatchErrorHandler`) can now be configured to reset the retry state if the exception is a different type to that which occurred previously with this record. +See [[seek-to-current]](#seek-to-current), [After-rollback Processor](#after-rollback), [[recovering-batch-eh]](#recovering-batch-eh) for more information. + +\==== Producer Factory Changes + +You can now set a maximum age for producers after which they will be closed and recreated. +See [Transactions](#transactions) for more information. + +You can now update the configuration map after the `DefaultKafkaProducerFactory` has been created. +This might be useful, for example, if you have to update SSL key/trust store locations after a credentials change. +See [Using `DefaultKafkaProducerFactory`](#producer-factory) for more information. + +\=== Changes between 2.4 and 2.5 + +This section covers the changes made from version 2.4 to version 2.5. +For changes in earlier version, see [[history]](#history). + +\==== Consumer/Producer Factory Changes + +The default consumer and producer factories can now invoke a callback whenever a consumer or producer is created or closed. +Implementations for native Micrometer metrics are provided. +See [Factory Listeners](#factory-listeners) for more information. + +You can now change bootstrap server properties at runtime, enabling failover to another Kafka cluster. +See [Connecting to Kafka](#connecting) for more information. + +\==== `StreamsBuilderFactoryBean` Changes + +The factory bean can now invoke a callback whenever a `KafkaStreams` created or destroyed. +An Implementation for native Micrometer metrics is provided. +See [KafkaStreams Micrometer Support](#streams-micrometer) for more information. + +\==== Kafka Client Version + +This version requires the 2.5.0 `kafka-clients`. + +\==== Class/Package Changes + +`SeekUtils` has been moved from the `o.s.k.support` package to `o.s.k.listener`. + +\==== Delivery Attempts Header + +There is now an option to to add a header which tracks delivery attempts when using certain error handlers and after rollback processors. +See [Delivery Attempts Header](#delivery-header) for more information. + +\==== @KafkaListener Changes + +Default reply headers will now be populated automatically if needed when a `@KafkaListener` return type is `Message`. +See [Reply Type Message\](#reply-message) for more information. + +The `KafkaHeaders.RECEIVED_MESSAGE_KEY` is no longer populated with a `null` value when the incoming record has a `null` key; the header is omitted altogether. + +`@KafkaListener` methods can now specify a `ConsumerRecordMetadata` parameter instead of using discrete headers for metadata such as topic, partition, etc. +See [Consumer Record Metadata](#consumer-record-metadata) for more information. + +\==== Listener Container Changes + +The `assignmentCommitOption` container property is now `LATEST_ONLY_NO_TX` by default. +See [Listener Container Properties](#container-props) for more information. + +The `subBatchPerPartition` container property is now `true` by default when using transactions. +See [Transactions](#transactions) for more information. + +A new `RecoveringBatchErrorHandler` is now provided. +See [[recovering-batch-eh]](#recovering-batch-eh) for more information. + +Static group membership is now supported. +See [Message Listener Containers](#message-listener-container) for more information. + +When incremental/cooperative rebalancing is configured, if offsets fail to commit with a non-fatal `RebalanceInProgressException`, the container will attempt to re-commit the offsets for the partitions that remain assigned to this instance after the rebalance is completed. + +The default error handler is now the `SeekToCurrentErrorHandler` for record listeners and `RecoveringBatchErrorHandler` for batch listeners. +See [Container Error Handlers](#error-handlers) for more information. + +You can now control the level at which exceptions intentionally thrown by standard error handlers are logged. +See [Container Error Handlers](#error-handlers) for more information. + +The `getAssignmentsByClientId()` method has been added, making it easier to determine which consumers in a concurrent container are assigned which partition(s). +See [Listener Container Properties](#container-props) for more information. + +You can now suppress logging entire `ConsumerRecord` s in error, debug logs etc. +See `onlyLogRecordMetadata` in [Listener Container Properties](#container-props). + +\==== KafkaTemplate Changes + +The `KafkaTemplate` can now maintain micrometer timers. +See [Monitoring](#micrometer) for more information. + +The `KafkaTemplate` can now be configured with `ProducerConfig` properties to override those in the producer factory. +See [Using `KafkaTemplate`](#kafka-template) for more information. + +A `RoutingKafkaTemplate` has now been provided. +See [Using `RoutingKafkaTemplate`](#routing-template) for more information. + +You can now use `KafkaSendCallback` instead of `ListenerFutureCallback` to get a narrower exception, making it easier to extract the failed `ProducerRecord`. +See [Using `KafkaTemplate`](#kafka-template) for more information. + +\==== Kafka String Serializer/Deserializer + +New `ToStringSerializer`/`StringDeserializer` s as well as an associated `SerDe` are now provided. +See [String serialization](#string-serde) for more information. + +\==== JsonDeserializer + +The `JsonDeserializer` now has more flexibility to determine the deserialization type. +See [Using Methods to Determine Types](#serdes-type-methods) for more information. + +\==== Delegating Serializer/Deserializer + +The `DelegatingSerializer` can now handle "standard" types, when the outbound record has no header. +See [Delegating Serializer and Deserializer](#delegating-serialization) for more information. + +\==== Testing Changes + +The `KafkaTestUtils.consumerProps()` helper record now sets `ConsumerConfig.AUTO_OFFSET_RESET_CONFIG` to `earliest` by default. +See [JUnit](#junit) for more information. + +\=== Changes between 2.3 and 2.4 + +\==== Kafka Client Version + +This version requires the 2.4.0 `kafka-clients` or higher and supports the new incremental rebalancing feature. + +\==== ConsumerAwareRebalanceListener + +Like `ConsumerRebalanceListener`, this interface now has an additional method `onPartitionsLost`. +Refer to the Apache Kafka documentation for more information. + +Unlike the `ConsumerRebalanceListener`, The default implementation does **not** call `onPartitionsRevoked`. +Instead, the listener container will call that method after it has called `onPartitionsLost`; you should not, therefore, do the same when implementing `ConsumerAwareRebalanceListener`. + +See the IMPORTANT note at the end of [Rebalancing Listeners](#rebalance-listeners) for more information. + +\==== GenericErrorHandler + +The `isAckAfterHandle()` default implementation now returns true by default. + +\==== KafkaTemplate + +The `KafkaTemplate` now supports non-transactional publishing alongside transactional. +See [`KafkaTemplate` Transactional and non-Transactional Publishing](#tx-template-mixed) for more information. + +\==== AggregatingReplyingKafkaTemplate + +The `releaseStrategy` is now a `BiConsumer`. +It is now called after a timeout (as well as when records arrive); the second parameter is `true` in the case of a call after a timeout. + +See [Aggregating Multiple Replies](#aggregating-request-reply) for more information. + +\==== Listener Container + +The `ContainerProperties` provides an `authorizationExceptionRetryInterval` option to let the listener container to retry after any `AuthorizationException` is thrown by the `KafkaConsumer`. +See its JavaDocs and [Using `KafkaMessageListenerContainer`](#kafka-container) for more information. + +\==== @KafkaListener + +The `@KafkaListener` annotation has a new property `splitIterables`; default true. +When a replying listener returns an `Iterable` this property controls whether the return result is sent as a single record or a record for each element is sent. +See [Forwarding Listener Results using `@SendTo`](#annotation-send-to) for more information + +Batch listeners can now be configured with a `BatchToRecordAdapter`; this allows, for example, the batch to be processed in a transaction while the listener gets one record at a time. +With the default implementation, a `ConsumerRecordRecoverer` can be used to handle errors within the batch, without stopping the processing of the entire batch - this might be useful when using transactions. +See [Transactions with Batch Listeners](#transactions-batch) for more information. + +\==== Kafka Streams + +The `StreamsBuilderFactoryBean` accepts a new property `KafkaStreamsInfrastructureCustomizer`. +This allows configuration of the builder and/or topology before the stream is created. +See [Spring Management](#streams-spring) for more information. + +\=== Changes Between 2.2 and 2.3 + +This section covers the changes made from version 2.2 to version 2.3. + +\==== Tips, Tricks and Examples + +A new chapter [[tips-n-tricks]](#tips-n-tricks) has been added. +Please submit GitHub issues and/or pull requests for additional entries in that chapter. + +\==== Kafka Client Version + +This version requires the 2.3.0 `kafka-clients` or higher. + +\==== Class/Package Changes + +`TopicPartitionInitialOffset` is deprecated in favor of `TopicPartitionOffset`. + +\==== Configuration Changes + +Starting with version 2.3.4, the `missingTopicsFatal` container property is false by default. +When this is true, the application fails to start if the broker is down; many users were affected by this change; given that Kafka is a high-availability platform, we did not anticipate that starting an application with no active brokers would be a common use case. + +\==== Producer and Consumer Factory Changes + +The `DefaultKafkaProducerFactory` can now be configured to create a producer per thread. +You can also provide `Supplier` instances in the constructor as an alternative to either configured classes (which require no-arg constructors), or constructing with `Serializer` instances, which are then shared between all Producers. +See [Using `DefaultKafkaProducerFactory`](#producer-factory) for more information. + +The same option is available with `Supplier` instances in `DefaultKafkaConsumerFactory`. +See [Using `KafkaMessageListenerContainer`](#kafka-container) for more information. + +\==== Listener Container Changes + +Previously, error handlers received `ListenerExecutionFailedException` (with the actual listener exception as the `cause`) when the listener was invoked using a listener adapter (such as `@KafkaListener` s). +Exceptions thrown by native `GenericMessageListener` s were passed to the error handler unchanged. +Now a `ListenerExecutionFailedException` is always the argument (with the actual listener exception as the `cause`), which provides access to the container’s `group.id` property. + +Because the listener container has it’s own mechanism for committing offsets, it prefers the Kafka `ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG` to be `false`. +It now sets it to false automatically unless specifically set in the consumer factory or the container’s consumer property overrides. + +The `ackOnError` property is now `false` by default. +See [[seek-to-current]](#seek-to-current) for more information. + +It is now possible to obtain the consumer’s `group.id` property in the listener method. +See [Obtaining the Consumer `group.id`](#listener-group-id) for more information. + +The container has a new property `recordInterceptor` allowing records to be inspected or modified before invoking the listener. +A `CompositeRecordInterceptor` is also provided in case you need to invoke multiple interceptors. +See [Message Listener Containers](#message-listener-container) for more information. + +The `ConsumerSeekAware` has new methods allowing you to perform seeks relative to the beginning, end, or current position and to seek to the first offset greater than or equal to a time stamp. +See [Seeking to a Specific Offset](#seek) for more information. + +A convenience class `AbstractConsumerSeekAware` is now provided to simplify seeking. +See [Seeking to a Specific Offset](#seek) for more information. + +The `ContainerProperties` provides an `idleBetweenPolls` option to let the main loop in the listener container to sleep between `KafkaConsumer.poll()` calls. +See its JavaDocs and [Using `KafkaMessageListenerContainer`](#kafka-container) for more information. + +When using `AckMode.MANUAL` (or `MANUAL_IMMEDIATE`) you can now cause a redelivery by calling `nack` on the `Acknowledgment`. +See [Committing Offsets](#committing-offsets) for more information. + +Listener performance can now be monitored using Micrometer `Timer` s. +See [Monitoring](#micrometer) for more information. + +The containers now publish additional consumer lifecycle events relating to startup. +See [Application Events](#events) for more information. + +Transactional batch listeners can now support zombie fencing. +See [Transactions](#transactions) for more information. + +The listener container factory can now be configured with a `ContainerCustomizer` to further configure each container after it has been created and configured. +See [Container factory](#container-factory) for more information. + +\==== ErrorHandler Changes + +The `SeekToCurrentErrorHandler` now treats certain exceptions as fatal and disables retry for those, invoking the recoverer on first failure. + +The `SeekToCurrentErrorHandler` and `SeekToCurrentBatchErrorHandler` can now be configured to apply a `BackOff` (thread sleep) between delivery attempts. + +Starting with version 2.3.2, recovered records' offsets will be committed when the error handler returns after recovering a failed record. + +See [[seek-to-current]](#seek-to-current) for more information. + +The `DeadLetterPublishingRecoverer`, when used in conjunction with an `ErrorHandlingDeserializer`, now sets the payload of the message sent to the dead-letter topic, to the original value that could not be deserialized. +Previously, it was `null` and user code needed to extract the `DeserializationException` from the message headers. +See [Publishing Dead-letter Records](#dead-letters) for more information. + +\==== TopicBuilder + +A new class `TopicBuilder` is provided for more convenient creation of `NewTopic` `@Bean` s for automatic topic provisioning. +See [Configuring Topics](#configuring-topics) for more information. + +\==== Kafka Streams Changes + +You can now perform additional configuration of the `StreamsBuilderFactoryBean` created by `@EnableKafkaStreams`. +See [Streams Configuration](#streams-config) for more information. + +A `RecoveringDeserializationExceptionHandler` is now provided which allows records with deserialization errors to be recovered. +It can be used in conjunction with a `DeadLetterPublishingRecoverer` to send these records to a dead-letter topic. +See [Recovery from Deserialization Exceptions](#streams-deser-recovery) for more information. + +The `HeaderEnricher` transformer has been provided, using SpEL to generate the header values. +See [Header Enricher](#streams-header-enricher) for more information. + +The `MessagingTransformer` has been provided. +This allows a Kafka streams topology to interact with a spring-messaging component, such as a Spring Integration flow. +See [`MessagingTransformer`](#streams-messaging) and See [[Calling a Spring Integration Flow from a `KStream`](https://docs.spring.io/spring-integration/docs/current/reference/html/kafka.html#streams-integration)] for more information. + +\==== JSON Component Changes + +Now all the JSON-aware components are configured by default with a Jackson `ObjectMapper` produced by the `JacksonUtils.enhancedObjectMapper()`. +The `JsonDeserializer` now provides `TypeReference`-based constructors for better handling of target generic container types. +Also a `JacksonMimeTypeModule` has been introduced for serialization of `org.springframework.util.MimeType` to plain string. +See its JavaDocs and [Serialization, Deserialization, and Message Conversion](#serdes) for more information. + +A `ByteArrayJsonMessageConverter` has been provided as well as a new super class for all Json converters, `JsonMessageConverter`. +Also, a `StringOrBytesSerializer` is now available; it can serialize `byte[]`, `Bytes` and `String` values in `ProducerRecord` s. +See [Spring Messaging Message Conversion](#messaging-message-conversion) for more information. + +The `JsonSerializer`, `JsonDeserializer` and `JsonSerde` now have fluent APIs to make programmatic configuration simpler. +See the javadocs, [Serialization, Deserialization, and Message Conversion](#serdes), and [Streams JSON Serialization and Deserialization](#serde) for more informaion. + +\==== ReplyingKafkaTemplate + +When a reply times out, the future is completed exceptionally with a `KafkaReplyTimeoutException` instead of a `KafkaException`. + +Also, an overloaded `sendAndReceive` method is now provided that allows specifying the reply timeout on a per message basis. + +\==== AggregatingReplyingKafkaTemplate + +Extends the `ReplyingKafkaTemplate` by aggregating replies from multiple receivers. +See [Aggregating Multiple Replies](#aggregating-request-reply) for more information. + +\==== Transaction Changes + +You can now override the producer factory’s `transactionIdPrefix` on the `KafkaTemplate` and `KafkaTransactionManager`. +See [`transactionIdPrefix`](#transaction-id-prefix) for more information. + +\==== New Delegating Serializer/Deserializer + +The framework now provides a delegating `Serializer` and `Deserializer`, utilizing a header to enable producing and consuming records with multiple key/value types. +See [Delegating Serializer and Deserializer](#delegating-serialization) for more information. + +\==== New Retrying Deserializer + +The framework now provides a delegating `RetryingDeserializer`, to retry serialization when transient errors such as network problems might occur. +See [Retrying Deserializer](#retrying-deserialization) for more information. + +\=== Changes Between 2.1 and 2.2 + +\==== Kafka Client Version + +This version requires the 2.0.0 `kafka-clients` or higher. + +\==== Class and Package Changes + +The `ContainerProperties` class has been moved from `org.springframework.kafka.listener.config` to `org.springframework.kafka.listener`. + +The `AckMode` enum has been moved from `AbstractMessageListenerContainer` to `ContainerProperties`. + +The `setBatchErrorHandler()` and `setErrorHandler()` methods have been moved from `ContainerProperties` to both `AbstractMessageListenerContainer` and `AbstractKafkaListenerContainerFactory`. + +\==== After Rollback Processing + +A new `AfterRollbackProcessor` strategy is provided. +See [After-rollback Processor](#after-rollback) for more information. + +\==== `ConcurrentKafkaListenerContainerFactory` Changes + +You can now use the `ConcurrentKafkaListenerContainerFactory` to create and configure any `ConcurrentMessageListenerContainer`, not only those for `@KafkaListener` annotations. +See [Container factory](#container-factory) for more information. + +\==== Listener Container Changes + +A new container property (`missingTopicsFatal`) has been added. +See [Using `KafkaMessageListenerContainer`](#kafka-container) for more information. + +A `ConsumerStoppedEvent` is now emitted when a consumer stops. +See [Thread Safety](#thread-safety) for more information. + +Batch listeners can optionally receive the complete `ConsumerRecords` object instead of a `List`. +See [Batch Listeners](#batch-listeners) for more information. + +The `DefaultAfterRollbackProcessor` and `SeekToCurrentErrorHandler` can now recover (skip) records that keep failing, and, by default, does so after 10 failures. +They can be configured to publish failed records to a dead-letter topic. + +Starting with version 2.2.4, the consumer’s group ID can be used while selecting the dead letter topic name. + +See [After-rollback Processor](#after-rollback), [[seek-to-current]](#seek-to-current), and [Publishing Dead-letter Records](#dead-letters) for more information. + +The `ConsumerStoppingEvent` has been added. +See [Application Events](#events) for more information. + +The `SeekToCurrentErrorHandler` can now be configured to commit the offset of a recovered record when the container is configured with `AckMode.MANUAL_IMMEDIATE` (since 2.2.4). +See [[seek-to-current]](#seek-to-current) for more information. + +\==== @KafkaListener Changes + +You can now override the `concurrency` and `autoStartup` properties of the listener container factory by setting properties on the annotation. +You can now add configuration to determine which headers (if any) are copied to a reply message. +See [`@KafkaListener` Annotation](#kafka-listener-annotation) for more information. + +You can now use `@KafkaListener` as a meta-annotation on your own annotations. +See [`@KafkaListener` as a Meta Annotation](#kafka-listener-meta) for more information. + +It is now easier to configure a `Validator` for `@Payload` validation. +See [`@KafkaListener` `@Payload` Validation](#kafka-validation) for more information. + +You can now specify kafka consumer properties directly on the annotation; these will override any properties with the same name defined in the consumer factory (since version 2.2.4). +See [Annotation Properties](#annotation-properties) for more information. + +\==== Header Mapping Changes + +Headers of type `MimeType` and `MediaType` are now mapped as simple strings in the `RecordHeader` value. +Previously, they were mapped as JSON and only `MimeType` was decoded.`MediaType` could not be decoded. +They are now simple strings for interoperability. + +Also, the `DefaultKafkaHeaderMapper` has a new `addToStringClasses` method, allowing the specification of types that should be mapped by using `toString()` instead of JSON. +See [Message Headers](#headers) for more information. + +\==== Embedded Kafka Changes + +The `KafkaEmbedded` class and its `KafkaRule` interface have been deprecated in favor of the `EmbeddedKafkaBroker` and its JUnit 4 `EmbeddedKafkaRule` wrapper. +The `@EmbeddedKafka` annotation now populates an `EmbeddedKafkaBroker` bean instead of the deprecated `KafkaEmbedded`. +This change allows the use of `@EmbeddedKafka` in JUnit 5 tests. +The `@EmbeddedKafka` annotation now has the attribute `ports` to specify the port that populates the `EmbeddedKafkaBroker`. +See [Testing Applications](#testing) for more information. + +\==== JsonSerializer/Deserializer Enhancements + +You can now provide type mapping information by using producer and consumer properties. + +New constructors are available on the deserializer to allow overriding the type header information with the supplied target type. + +The `JsonDeserializer` now removes any type information headers by default. + +You can now configure the `JsonDeserializer` to ignore type information headers by using a Kafka property (since 2.2.3). + +See [Serialization, Deserialization, and Message Conversion](#serdes) for more information. + +\==== Kafka Streams Changes + +The streams configuration bean must now be a `KafkaStreamsConfiguration` object instead of a `StreamsConfig` object. + +The `StreamsBuilderFactoryBean` has been moved from package `…​core` to `…​config`. + +The `KafkaStreamBrancher` has been introduced for better end-user experience when conditional branches are built on top of `KStream` instance. + +See [Apache Kafka Streams Support](#streams-kafka-streams) and [Configuration](#streams-config) for more information. + +\==== Transactional ID + +When a transaction is started by the listener container, the `transactional.id` is now the `transactionIdPrefix` appended with `..`. +This change allows proper fencing of zombies, [as described here](https://www.confluent.io/blog/transactions-apache-kafka/). + +\=== Changes Between 2.0 and 2.1 + +\==== Kafka Client Version + +This version requires the 1.0.0 `kafka-clients` or higher. + +The 1.1.x client is supported natively in version 2.2. + +\==== JSON Improvements + +The `StringJsonMessageConverter` and `JsonSerializer` now add type information in `Headers`, letting the converter and `JsonDeserializer` create specific types on reception, based on the message itself rather than a fixed configured type. +See [Serialization, Deserialization, and Message Conversion](#serdes) for more information. + +\==== Container Stopping Error Handlers + +Container error handlers are now provided for both record and batch listeners that treat any exceptions thrown by the listener as fatal/ +They stop the container. +See [Handling Exceptions](#annotation-error-handling) for more information. + +\==== Pausing and Resuming Containers + +The listener containers now have `pause()` and `resume()` methods (since version 2.1.3). +See [Pausing and Resuming Listener Containers](#pause-resume) for more information. + +\==== Stateful Retry + +Starting with version 2.1.3, you can configure stateful retry. +See [[stateful-retry]](#stateful-retry) for more information. + +\==== Client ID + +Starting with version 2.1.1, you can now set the `client.id` prefix on `@KafkaListener`. +Previously, to customize the client ID, you needed a separate consumer factory (and container factory) per listener. +The prefix is suffixed with `-n` to provide unique client IDs when you use concurrency. + +\==== Logging Offset Commits + +By default, logging of topic offset commits is performed with the `DEBUG` logging level. +Starting with version 2.1.2, a new property in `ContainerProperties` called `commitLogLevel` lets you specify the log level for these messages. +See [Using `KafkaMessageListenerContainer`](#kafka-container) for more information. + +\==== Default @KafkaHandler + +Starting with version 2.1.3, you can designate one of the `@KafkaHandler` annotations on a class-level `@KafkaListener` as the default. +See [`@KafkaListener` on a Class](#class-level-kafkalistener) for more information. + +\==== ReplyingKafkaTemplate + +Starting with version 2.1.3, a subclass of `KafkaTemplate` is provided to support request/reply semantics. +See [Using `ReplyingKafkaTemplate`](#replying-template) for more information. + +\==== ChainedKafkaTransactionManager + +Version 2.1.3 introduced the `ChainedKafkaTransactionManager`. +(It is now deprecated). + +\==== Migration Guide from 2.0 + +See the [2.0 to 2.1 Migration](https://github.com/spring-projects/spring-kafka/wiki/Spring-for-Apache-Kafka-2.0-to-2.1-Migration-Guide) guide. + +\=== Changes Between 1.3 and 2.0 + +\==== Spring Framework and Java Versions + +The Spring for Apache Kafka project now requires Spring Framework 5.0 and Java 8. + +\==== `@KafkaListener` Changes + +You can now annotate `@KafkaListener` methods (and classes and `@KafkaHandler` methods) with `@SendTo`. +If the method returns a result, it is forwarded to the specified topic. +See [Forwarding Listener Results using `@SendTo`](#annotation-send-to) for more information. + +\==== Message Listeners + +Message listeners can now be aware of the `Consumer` object. +See [Message Listeners](#message-listeners) for more information. + +\==== Using `ConsumerAwareRebalanceListener` + +Rebalance listeners can now access the `Consumer` object during rebalance notifications. +See [Rebalancing Listeners](#rebalance-listeners) for more information. + +\=== Changes Between 1.2 and 1.3 + +\==== Support for Transactions + +The 0.11.0.0 client library added support for transactions. +The `KafkaTransactionManager` and other support for transactions have been added. +See [Transactions](#transactions) for more information. + +\==== Support for Headers + +The 0.11.0.0 client library added support for message headers. +These can now be mapped to and from `spring-messaging` `MessageHeaders`. +See [Message Headers](#headers) for more information. + +\==== Creating Topics + +The 0.11.0.0 client library provides an `AdminClient`, which you can use to create topics. +The `KafkaAdmin` uses this client to automatically add topics defined as `@Bean` instances. + +\==== Support for Kafka Timestamps + +`KafkaTemplate` now supports an API to add records with timestamps. +New `KafkaHeaders` have been introduced regarding `timestamp` support. +Also, new `KafkaConditions.timestamp()` and `KafkaMatchers.hasTimestamp()` testing utilities have been added. +See [Using `KafkaTemplate`](#kafka-template), [`@KafkaListener` Annotation](#kafka-listener-annotation), and [Testing Applications](#testing) for more details. + +\==== `@KafkaListener` Changes + +You can now configure a `KafkaListenerErrorHandler` to handle exceptions. +See [Handling Exceptions](#annotation-error-handling) for more information. + +By default, the `@KafkaListener` `id` property is now used as the `group.id` property, overriding the property configured in the consumer factory (if present). +Further, you can explicitly configure the `groupId` on the annotation. +Previously, you would have needed a separate container factory (and consumer factory) to use different `group.id` values for listeners. +To restore the previous behavior of using the factory configured `group.id`, set the `idIsGroup` property on the annotation to `false`. + +\==== `@EmbeddedKafka` Annotation + +For convenience, a test class-level `@EmbeddedKafka` annotation is provided, to register `KafkaEmbedded` as a bean. +See [Testing Applications](#testing) for more information. + +\==== Kerberos Configuration + +Support for configuring Kerberos is now provided. +See [JAAS and Kerberos](#kerberos) for more information. + +\=== Changes Between 1.1 and 1.2 + +This version uses the 0.10.2.x client. + +\=== Changes Between 1.0 and 1.1 + +\==== Kafka Client + +This version uses the Apache Kafka 0.10.x.x client. + +\==== Batch Listeners + +Listeners can be configured to receive the entire batch of messages returned by the `consumer.poll()` operation, rather than one at a time. + +\==== Null Payloads + +Null payloads are used to “delete” keys when you use log compaction. + +\==== Initial Offset + +When explicitly assigning partitions, you can now configure the initial offset relative to the current position for the consumer group, rather than absolute or relative to the current end. + +\==== Seek + +You can now seek the position of each topic or partition. +You can use this to set the initial position during initialization when group management is in use and Kafka assigns the partitions. +You can also seek when an idle container is detected or at any arbitrary point in your application’s execution. +See [Seeking to a Specific Offset](#seek) for more information. \ No newline at end of file diff --git a/docs/en/spring-for-graphql/README.md b/docs/en/spring-for-graphql/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/en/spring-hateoas/README.md b/docs/en/spring-hateoas/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/en/spring-hateoas/spring-hateoas.md b/docs/en/spring-hateoas/spring-hateoas.md new file mode 100644 index 0000000000000000000000000000000000000000..1d9e2a0a34b3dd2b2f131c13a937c9dcd268234c --- /dev/null +++ b/docs/en/spring-hateoas/spring-hateoas.md @@ -0,0 +1,2146 @@ +# Spring HATEOAS - Reference Documentation + +## 1. Preface + +### 1.1. Migrating to Spring HATEOAS 1.0 + +For 1.0 we took the chance to re-evaluate some of the design and package structure choices we had made for the 0.x branch. +There had been an incredible amount of feedback on it and the major version bump seemed to be the most natural place to refactor those. + +#### 1.1.1. The changes + +The biggest changes in package structure were driven by the introduction of a hypermedia type registration API to support additional media types in Spring HATEOAS. +This lead to the clear separation of client and server APIs (packages named respectively) as well as media type implementations in the package `mediatype`. + +The easiest way to get your code base upgraded to the new API is by using the [migration script](#migrate-to-1.0.script). +Before we jump to that, here are the changes at a quick glance. + +##### Representation models + +The `ResourceSupport`/`Resource`/`Resources`/`PagedResources` group of classes never really felt appropriately named. +After all, these types do not actually manifest resources but rather representation models that can be enriched with hypermedia information and affordances. +Here’s how new names map to the old ones: + +* `ResourceSupport` is now `RepresentationModel` + +* `Resource` is now `EntityModel` + +* `Resources` is now `CollectionModel` + +* `PagedResources` is now `PagedModel` + +Consequently, `ResourceAssembler` has been renamed to `RepresentationModelAssembler` and its methods `toResource(…)` and `toResources(…)` have been renamed to `toModel(…)` and `toCollectionModel(…)` respectively. +Also the name changes have been reflected in the classes contained in `TypeReferences`. + +* `RepresentationModel.getLinks()` now exposes a `Links` instance (over a `List`) as that exposes additional API to concatenate and merge different `Links` instances using various strategies. + Also it has been turned into a self-bound generic type to allow the methods that add links to the instance return the instance itself. + +* The `LinkDiscoverer` API has been moved to the `client` package. + +* The `LinkBuilder` and `EntityLinks` APIs have been moved to the `server` package. + +* `ControllerLinkBuilder` has been moved into `server.mvc` and deprecated to be replaced by `WebMvcLinkBuilder`. + +* `RelProvider` has been renamed to `LinkRelationProvider` and returns `LinkRelation` instances instead of `String`s. + +* `VndError` has been moved to the `mediatype.vnderror` package. + +#### 1.1.2. The migration script + +You can find [a script](https://github.com/spring-projects/spring-hateoas/tree/master/etc) to run from your application root that will update all import statements and static method references to Spring HATEOAS types that moved in our source code repository. +Simply download that, run it from your project root. +By default it will inspect all Java source files and replace the legacy Spring HATEOAS type references with the new ones. + +Example 1. Sample application of the migration script + +``` +$ ./migrate-to-1.0.sh + +Migrating Spring HATEOAS references to 1.0 for files : *.java + +Adapting ./src/main/java/… +… + +Done! +``` + +Note that the script will not necessarily be able to entirely fix all changes, but it should cover the most important refactorings. + +Now verify the changes made to the files in your favorite Git client and commit as appropriate. +In case you find method or type references unmigrated, please open a ticket in out issue tracker. + +#### 1.1.3. Migrating from 1.0 M3 to 1.0 RC1 + +* `Link.andAffordance(…)` taking Affordance details have been moved to `Affordances`. To manually build up `Affordance` instances now use `Affordances.of(link).afford(…)`. Also note the new `AffordanceBuilder` type exposed from `Affordances` for fluent usage. See [Affordances](#server.affordances) for details. + +* `AffordanceModelFactory.getAffordanceModel(…)` now receives `InputPayloadMetadata` and `PayloadMetadata` instances instead of `ResolvableType`s to allow non-type-based implementations. Custom media type implementations have to be adapted to that accordingly. + +* HAL Forms now does not render property attributes if their value adheres to what’s defined as default in the spec. I.e. if previously `required` was explicitly set to `false`, we now just omit the entry for `required`. + We also now only force them to be non-required for templates that use `PATCH` as the HTTP method. + +## 2. Fundamentals + +This section covers the basics of Spring HATEOAS and its fundamental domain abstractions. + +### 2.1. Links + +The fundamental idea of hypermedia is to enrich the representation of a resource with hypermedia elements. +The simplest form of that are links. +They indicate a client that it can navigate to a certain resource. +The semantics of a related resource are defined in a so-called link relation. +You might have seen this in the header of an HTML file already: + +Example 2. A link in an HTML document + +``` + +``` + +As you can see the link points to a resource `theme.css` and indicates that it is a style sheet. +Links often carry additional information, like the media type that the resource pointed to will return. +However, the fundamental building blocks of a link are its reference and relation. + +Spring HATEOAS lets you work with links through its immutable `Link` value type. +Its constructor takes both a hypertext reference and a link relation, the latter being defaulted to the IANA link relation `self`. +Read more on the latter in [Link relations](#fundamentals.link-relations). + +Example 3. Using links + +``` +Link link = Link.of("/something"); +assertThat(link.getHref()).isEqualTo("/something"); +assertThat(link.getRel()).isEqualTo(IanaLinkRelations.SELF); + +link = Link.of("/something", "my-rel"); +assertThat(link.getHref()).isEqualTo("/something"); +assertThat(link.getRel()).isEqualTo(LinkRelation.of("my-rel")); +``` + +`Link` exposes other attributes as defined in [RFC-8288](https://tools.ietf.org/html/rfc8288). +You can set them by calling the corresponding wither method on a `Link` instance. + +Find more information on how to create links pointing to Spring MVC and Spring WebFlux controllers in [ Building links in Spring MVC](#server.link-builder.webmvc) and [Building links in Spring WebFlux](#server.link-builder.webflux). + +### 2.2. URI templates + +For a Spring HATEOAS `Link`, the hypertext reference can not only be a URI, but also a URI template according to [RFC-6570](https://tools.ietf.org/html/rfc6570). +A URI template contains so-called template variables and allows expansion of these parameters. +This allows clients to turn parameterized templates into URIs without having to know about the structure of the final URI, it only needs to know about the names of the variables. + +Example 4. Using links with templated URIs + +``` +Link link = Link.of("/{segment}/something{?parameter}"); +assertThat(link.isTemplated()).isTrue(); (1) +assertThat(link.getVariableNames()).contains("segment", "parameter"); (2) + +Map values = new HashMap<>(); +values.put("segment", "path"); +values.put("parameter", 42); + +assertThat(link.expand(values).getHref()) (3) + .isEqualTo("/path/something?parameter=42"); +``` + +|**1**|The `Link` instance indicates that is templated, i.e. it contains a URI template.| +|-----|---------------------------------------------------------------------------------| +|**2**| It exposes the parameters contained in the template. | +|**3**| It allows expansion of the parameters. | + +URI templates can be constructed manually and template variables added later on. + +Example 5. Working with URI templates + +``` +UriTemplate template = UriTemplate.of("/{segment}/something") + .with(new TemplateVariable("parameter", VariableType.REQUEST_PARAM); + +assertThat(template.toString()).isEqualTo("/{segment}/something{?parameter}"); +``` + +### 2.3. Link relations + +To indicate the relationship of the target resource to the current one so-called link relations are used. +Spring HATEOAS provides a `LinkRelation` type to easily create `String`-based instances of it. + +#### 2.3.1. IANA link relations + +The Internet Assigned Numbers Authority contains a set of [predefined link relations](https://www.iana.org/assignments/link-relations/link-relations.xhtml). +They can be referred to via `IanaLinkRelations`. + +Example 6. Using IANA link relations + +``` +Link link = Link.of("/some-resource"), IanaLinkRelations.NEXT); + +assertThat(link.getRel()).isEqualTo(LinkRelation.of("next")); +assertThat(IanaLinkRelation.isIanaRel(link.getRel())).isTrue(); +``` + +### Representation models + +To easily create hypermedia enriched representations, Spring HATEOAS provides a set of classes with `RepresentationModel` at their root. +It’s basically a container for a collection of `Link`s and has convenient methods to add those to the model. +The models can later be rendered into various media type formats that will define how the hypermedia elements look in the representation. +For more information on this, have a look at [Media types](#mediatypes). + +Example 7. The `RepresentationModel` class hierarchy + +diagram classes + +The default way to work with a `RepresentationModel` is to create a subclass of it to contain all the properties the representation is supposed to contain, create instances of that class, populate the properties and enrich it with links. + +Example 8. A sample representation model type + +``` +class PersonModel extends RepresentationModel { + + String firstname, lastname; +} +``` + +The generic self-typing is necessary to let `RepresentationModel.add(…)` return instances of itself. +The model type can now be used like this: + +Example 9. Using the person representation model + +``` +PersonModel model = new PersonModel(); +model.firstname = "Dave"; +model.lastname = "Matthews"; +model.add(Link.of("https://myhost/people/42")); +``` + +If you returned such an instance from a Spring MVC or WebFlux controller and the client sent an `Accept` header set to `application/hal+json`, the response would look as follows: + +Example 10. The HAL representation generated for the person representation model + +``` +{ + "_links" : { + "self" : { + "href" : "https://myhost/people/42" + } + }, + "firstname" : "Dave", + "lastname" : "Matthews" +} +``` + +#### 2.4.1. Item resource representation model + +For a resource that’s backed by a singular object or concept, a convenience `EntityModel` type exists. +Instead of creating a custom model type for each concept, you can just reuse an already existing type and wrap instances of it into the `EntityModel`. + +Example 11. Using `EntityModel` to wrap existing objects + +``` +Person person = new Person("Dave", "Matthews"); +EntityModel model = EntityModel.of(person); +``` + +#### 2.4.2. Collection resource representation model + +For resources that are conceptually collections, a `CollectionModel` is available. +Its elements can either be simple objects or `RepresentationModel` instances in turn. + +Example 12. Using `CollectionModel` to wrap a collection of existing objects + +``` +Collection people = Collections.singleton(new Person("Dave", "Matthews")); +CollectionModel model = CollectionModel.of(people); +``` + +## 3. Server-side support + +### Building links in Spring MVC + +Now we have the domain vocabulary in place, but the main challenge remains: how to create the actual URIs to be wrapped into `Link` instances in a less fragile way. Right now, we would have to duplicate URI strings all over the place. Doing so is brittle and unmaintainable. + +Assume you have your Spring MVC controllers implemented as follows: + +``` +@Controller +class PersonController { + + @GetMapping("/people") + HttpEntity showAll() { … } + + @GetMapping(value = "/{person}", method = RequestMethod.GET) + HttpEntity show(@PathVariable Long person) { … } +} +``` + +We see two conventions here. The first is a collection resource that is exposed through `@GetMapping` annotation of the controller method, with individual elements of that collection exposed as direct sub resources. The collection resource might be exposed at a simple URI (as just shown) or more complex ones (such as `/people/{id}/addresses`). Suppose you would like to link to the collection resource of all people. Following the approach from above would cause two problems: + +* To create an absolute URI, you would need to look up the protocol, hostname, port, servlet base, and other values. This is cumbersome and requires ugly manual string concatenation code. + +* You probably do not want to concatenate the `/people` on top of your base URI, because you would then have to maintain the information in multiple places. If you change the mapping, you then have to change all the clients pointing to it. + +Spring HATEOAS now provides a `WebMvcLinkBuilder` that lets you create links by pointing to controller classes. +The following example shows how to do so: + +``` +import static org.sfw.hateoas.server.mvc.WebMvcLinkBuilder.*; + +Link link = linkTo(PersonController.class).withRel("people"); + +assertThat(link.getRel()).isEqualTo(LinkRelation.of("people")); +assertThat(link.getHref()).endsWith("/people"); +``` + +The `WebMvcLinkBuilder` uses Spring’s `ServletUriComponentsBuilder` under the hood to obtain the basic URI information from the current request. Assuming your application runs at `[localhost:8080/your-app](http://localhost:8080/your-app)`, this is exactly the URI on top of which you are constructing additional parts. The builder now inspects the given controller class for its root mapping and thus ends up with `[localhost:8080/your-app/people](http://localhost:8080/your-app/people)`. You can also build more nested links as well. +The following example shows how to do so: + +``` +Person person = new Person(1L, "Dave", "Matthews"); +// /person / 1 +Link link = linkTo(PersonController.class).slash(person.getId()).withSelfRel(); +assertThat(link.getRel(), is(IanaLinkRelation.SELF.value())); +assertThat(link.getHref(), endsWith("/people/1")); +``` + +The builder also allows creating URI instances to build up (for example, response header values): + +``` +HttpHeaders headers = new HttpHeaders(); +headers.setLocation(linkTo(PersonController.class).slash(person).toUri()); + +return new ResponseEntity(headers, HttpStatus.CREATED); +``` + +#### 3.1.1. Building links that point to methods + +You can even build links that point to methods or create dummy controller method invocations. +The first approach is to hand a `Method` instance to the `WebMvcLinkBuilder`. +The following example shows how to do so: + +``` +Method method = PersonController.class.getMethod("show", Long.class); +Link link = linkTo(method, 2L).withSelfRel(); + +assertThat(link.getHref()).endsWith("/people/2")); +``` + +This is still a bit dissatisfying, as we have to first get a `Method` instance, which throws an exception and is generally quite cumbersome. At least we do not repeat the mapping. An even better approach is to have a dummy method invocation of the target method on a controller proxy, which we can create by using the `methodOn(…)` helper. +The following example shows how to do so: + +``` +Link link = linkTo(methodOn(PersonController.class).show(2L)).withSelfRel(); + +assertThat(link.getHref()).endsWith("/people/2"); +``` + +`methodOn(…)` creates a proxy of the controller class that records the method invocation and exposes it in a proxy created for the return type of the method. This allows the fluent expression of the method for which we want to obtain the mapping. However, there are a few constraints on the methods that can be obtained by using this technique: + +* The return type has to be capable of proxying, as we need to expose the method invocation on it. + +* The parameters handed into the methods are generally neglected (except the ones referred to through `@PathVariable`, because they make up the URI). + +### 3.2. Building links in Spring WebFlux + +TODO + +### 3.3. Affordances + +> +> +> +> +> The affordances of the environment are what it offers …​ what it provides or furnishes, either for good or ill. The verb 'to afford' is found in the dictionary, but the noun 'affordance' is not. I have made it up. +> +> +> +> + +— James J. Gibson + +> The Ecological Approach to Visual Perception (page 126) + +REST-based resources provide not just data but controls. +The last ingredient to form a flexible service are detailed **affordances** on how to use the various controls. +Because affordances are associated with links, Spring HATEOAS provides an API to attach as many related methods as needed to a link. +Just as you can create links by pointing to Spring MVC controller methods (see [ Building links in Spring MVC](#server.link-builder.webmvc) for details) you …​ + +The following code shows how to take a **self** link and associate two more affordances: + +Example 13. Connecting affordances to `GET /employees/{id}` + +``` +@GetMapping("/employees/{id}") +public EntityModel findOne(@PathVariable Integer id) { + + Class controllerClass = EmployeeController.class; + + // Start the affordance with the "self" link, i.e. this method. + Link findOneLink = linkTo(methodOn(controllerClass).findOne(id)).withSelfRel(); (1) + + // Return the affordance + a link back to the entire collection resource. + return EntityModel.of(EMPLOYEES.get(id), // + findOneLink // + .andAffordance(afford(methodOn(controllerClass).updateEmployee(null, id))) (2) + .andAffordance(afford(methodOn(controllerClass).partiallyUpdateEmployee(null, id)))); (3) +} +``` + +|**1**| Create the **self** link. | +|-----|--------------------------------------------------------------------| +|**2**| Associate the `updateEmployee` method with the `self` link. | +|**3**|Associate the `partiallyUpdateEmployee` method with the `self` link.| + +Using `.andAffordance(afford(…​))`, you can use the controller’s methods to connect a `PUT` and a `PATCH` operation to a `GET` operation. +Imagine that the related methods **afforded** above look like this: + +Example 14. `updateEmpoyee` method that responds to `PUT /employees/{id}` + +``` +@PutMapping("/employees/{id}") +public ResponseEntity updateEmployee( // + @RequestBody EntityModel employee, @PathVariable Integer id) +``` + +Example 15. `partiallyUpdateEmployee` method that responds to `PATCH /employees/{id}` + +``` +@PatchMapping("/employees/{id}") +public ResponseEntity partiallyUpdateEmployee( // + @RequestBody EntityModel employee, @PathVariable Integer id) +``` + +Pointing to those methods using the `afford(…)` methods will cause Spring HATEOAS to analyze the request body and response types and capture metadata to allow different media type implementations to use that information to translate that into descriptions of the input and outputs. + +#### 3.3.1. Building affordances manually + +While the primary way to register affordances for a link, it might be necessary to build some of them manually. +This can be achieved by using the `Affordances` API: + +Example 16. Using the `Affordances` API to manually register affordances + +``` +var methodInvocation = methodOn(EmployeeController.class).all(); + +var link = Affordances.of(linkTo(methodInvocation).withSelfRel()) (1) + + .afford(HttpMethod.POST) (2) + .withInputAndOutput(Employee.class) // + .withName("createEmployee") // + + .andAfford(HttpMethod.GET) (3) + .withOutput(Employee.class) // + .addParameters(// + QueryParameter.optional("name"), // + QueryParameter.optional("role")) // + .withName("search") // + + .toLink(); +``` + +|**1**| You start by creating an instance of `Affordances` from a `Link` instance creating the context for describing the affordances. | +|-----|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**|Each affordance starts with the HTTP method it’s supposed to support. We then register a type as payload description and name the affordance explicitly. The latter can be omitted and a default name will be derived from the HTTP method and input type name. This effectively creates the same affordance as the pointer to `EmployeeController.newEmployee(…)` created.| +|**3**| The next affordance is built to reflect what’s happening for the pointer to `EmployeeController.search(…)`. Here we define `Employee` to be the model for the response created and explicitly register `QueryParameter`s. | + +Affordances are backed by media type specific affordance models that translate the general affordance metadata into specific representations. +Please make sure to check the section on affordances in the [Media types](#mediatypes) section to find more details about how to control the exposure of that metadata. + +### 3.4. Forwarded header handling + +[RFC-7239 forwarding headers](https://tools.ietf.org/html/rfc7239) are most commonly used when your application is behind a proxy, behind a load balancer, or in the cloud. +The node that actually receives the web request is part of the infrastructure, and *forwards* the request to your application. + +Your application may be running on `localhost:8080`, but to the outside world you’re expected to be at `reallycoolsite.com` (and on the web’s standard port 80). +By having the proxy include extra headers (which many already do), Spring HATEOAS can generate links properly as it uses Spring Framework functionality to obtain the base URI of the original request. + +| |Anything that can change the root URI based on external inputs must be properly guarded.
That’s why, by default, forwarded header handling is **disabled**.
You MUST enable it to be operational.
If you are deploying to the cloud or into a configuration where you control the proxies and load balancers, then you’ll certainly want to use this feature.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +To enable forwarded header handling you need to register Spring’s `ForwardedHeaderFilter` for Spring MVC (details [here](https://docs.spring.io/spring/docs/current/spring-framework-reference/web.html#filters-forwarded-headers)) or `ForwardedHeaderTransformer` for Spring WebFlux (details [here](https://docs.spring.io/spring/docs/current/spring-framework-reference/web-reactive.html#webflux-forwarded-headers)) in your application. +In a Spring Boot application those components can be simply declared as Spring beans as described [here](https://docs.spring.io/spring-boot/docs/current/reference/html/boot-features-developing-web-applications.html#boot-features-embedded-container-servlets-filters-listeners-beans). + +Example 17. Registering a `ForwardedHeaderFilter` + +``` +@Bean +ForwardedHeaderFilter forwardedHeaderFilter() { + return new ForwardedHeaderFilter(); +} +``` + +This will create a servlet filter that processes all the `X-Forwarded-…` headers. +And it will register it properly with the servlet handlers. + +For a Spring WebFlux application, the reactive counterpart is `ForwardedHeaderTransformer`: + +Example 18. Registering a `ForwardedHeaderTransformer` + +``` +@Bean +ForwardedHeaderTransformer forwardedHeaderTransformer() { + return new ForwardedHeaderTransformer(); +} +``` + +This will create a function that transforms reactive web requests, processing `X-Forwarded-…` headers. +And it will register it properly with WebFlux. + +With configuration as shown above in place, a request passing `X-Forwarded-…` headers will see those reflected in the links generated: + +Example 19. A request using `X-Forwarded-…` headers + +``` +curl -v localhost:8080/employees \ + -H 'X-Forwarded-Proto: https' \ + -H 'X-Forwarded-Host: example.com' \ + -H 'X-Forwarded-Port: 9001' +``` + +Example 20. The corresponding response with the links generated to consider those headers + +``` +{ + "_embedded": { + "employees": [ + { + "id": 1, + "name": "Bilbo Baggins", + "role": "burglar", + "_links": { + "self": { + "href": "https://example.com:9001/employees/1" + }, + "employees": { + "href": "https://example.com:9001/employees" + } + } + } + ] + }, + "_links": { + "self": { + "href": "https://example.com:9001/employees" + }, + "root": { + "href": "https://example.com:9001" + } + } +} +``` + +### Using the EntityLinks interface + +| |`EntityLinks` and its various implementations are NOT currently provided out-of-the-box for Spring WebFlux applications.
The contract defined in the `EntityLinks` SPI was originally aimed at Spring Web MVC and doesn’t consider Reactor types.
Developing a comparable contract that supports reactive programming is still in progress.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +So far, we have created links by pointing to the web framework implementations (that is, the Spring MVC controllers) and inspected the mapping. +In many cases, these classes essentially read and write representations backed by a model class. + +The `EntityLinks` interface now exposes an API to look up a `Link` or `LinkBuilder` based on the model types. +The methods essentially return links that point either to the collection resource (such as `/people`) or to an item resource (such as `/people/1`). +The following example shows how to use `EntityLinks`: + +``` +EntityLinks links = …; +LinkBuilder builder = links.linkFor(Customer.class); +Link link = links.linkToItemResource(Customer.class, 1L); +``` + +`EntityLinks` is available via dependency injection by activating `@EnableHypermediaSupport` in your Spring MVC configuration. +This will cause a variety of default implementations of `EntityLinks` being registered. +The most fundamental one is `ControllerEntityLinks` that inspects SpringMVC controller classes. +If you want to register your own implementation of `EntityLinks`, check out [this section](#server.entity-links.spi). + +#### 3.5.1. EntityLinks based on Spring MVC controllers + +Activating entity links functionality causes all the Spring MVC controllers available in the current `ApplicationContext` to be inspected for the `@ExposesResourceFor(…)` annotation. +The annotation exposes which model type the controller manages. +Beyond that, we assume that you adhere to the following URI mapping setup and conventions: + +* A type level `@ExposesResourceFor(…)` declaring which entity type the controller exposes collection and item resources for. + +* A class level base mapping that represents the collection resource. + +* An additional method level mapping that extends the mapping to append an identifier as additional path segment. + +The following example shows an implementation of an `EntityLinks`-capable controller: + +``` +@Controller +@ExposesResourceFor(Order.class) (1) +@RequestMapping("/orders") (2) +class OrderController { + + @GetMapping (3) + ResponseEntity orders(…) { … } + + @GetMapping("{id}") (4) + ResponseEntity order(@PathVariable("id") … ) { … } +} +``` + +|**1**| The controller indicates it’s exposing collection and item resources for the entity `Order`. | +|-----|------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| Its collection resource is exposed under `/orders` | +|**3**| That collection resource can handle `GET` requests. Add more methods for other HTTP methods at your convenience. | +|**4**|An additional controller method to handle a subordinate resource taking a path variable to expose an item resource, i.e. a single `Order`.| + +With this in place, when you enable `EntityLinks` `@EnableHypermediaSupport` in your Spring MVC configuration, you can create links to the controller as follows: + +``` +@Controller +class PaymentController { + + private final EntityLinks entityLinks; + + PaymentController(EntityLinks entityLinks) { (1) + this.entityLinks = entityLinks; + } + + @PutMapping(…) + ResponseEntity payment(@PathVariable Long orderId) { + + Link link = entityLinks.linkToItemResource(Order.class, orderId); (2) + … + } +} +``` + +|**1**|Inject `EntityLinks` made available by `@EnableHypermediaSupport` in your configuration.| +|-----|----------------------------------------------------------------------------------------| +|**2**| Use the APIs to build links by using the entity types instead of controller classes. | + +As you can see, you can refer to resources managing `Order` instances without referring to `OrderController` explicitly. + +#### 3.5.2. EntityLinks API in detail + +Fundamentally, `EntityLinks` allows to build `LinkBuilder`s and `Link` instances to collection and item resources of an entity type. +Methods starting with `linkFor…` will produce `LinkBuilder` instances for you to extend and augment with additional path segments, parameters, etc. +Methods starting with `linkTo` produce fully prepared `Link` instances. + +While for collection resources providing an entity type is sufficient, links to item resources will need an identifier provided. +This usually looks like this: + +Example 21. Obtaining a link to an item resource + +``` +entityLinks.linkToItemResource(order, order.getId()); +``` + +If you find yourself repeating those method calls the identifier extraction step can be pulled out into a reusable `Function` to be reused throughout different invocations: + +``` +Function idExtractor = Order::getId; (1) + +entityLinks.linkToItemResource(order, idExtractor); (2) +``` + +|**1**|The identifier extraction is externalized so that it can be held in a field or constant.| +|-----|----------------------------------------------------------------------------------------| +|**2**| The link lookup using the extractor. | + +##### TypedEntityLinks + +As controller implementations are often grouped around entity types, you’ll very often find yourself using the same extractor function (see [EntityLinks API in detail](#server.entity-links.api) for details) all over the controller class. +We can centralize the identifier extraction logic even more by obtaining a `TypedEntityLinks` instance providing the extractor once, so that the actual lookups don’t have to deal with the extraction anymore at all. + +Example 22. Using TypedEntityLinks + +``` +class OrderController { + + private final TypedEntityLinks links; + + OrderController(EntityLinks entityLinks) { (1) + this.links = entityLinks.forType(Order::getId); (2) + } + + @GetMapping + ResponseEntity someMethod(…) { + + Order order = … // lookup order + + Link link = links.linkToItemResource(order); (3) + } +} +``` + +|**1**| Inject an `EntityLinks` instance. | +|-----|------------------------------------------------------------------------------------------------| +|**2**|Indicate you’re going to look up `Order` instances with a certain identifier extractor function.| +|**3**| Look up item resource links based on a sole `Order` instance. | + +#### 3.5.3. EntityLinks as SPI + +The `EntityLinks` instance created by `@EnableHypermediaSupport` is of type `DelegatingEntityLinks` which will in turn pick up all other `EntityLinks` implementations available as beans in the `ApplicationContext`. +It’s registered as primary bean so that it’s always the sole injection candidate when you inject `EntityLinks` in general.`ControllerEntityLinks` is the default implementation that will be included in the setup, but users are free to implement and register their own implementations. +Making those available to the `EntityLinks` instance available for injection is a matter of registering your implementation as Spring bean. + +Example 23. Declaring a custom EntityLinks implementation + +``` +@Configuration +class CustomEntityLinksConfiguration { + + @Bean + MyEntityLinks myEntityLinks(…) { + return new MyEntityLinks(…); + } +} +``` + +An example for the extensibility of this mechanism is Spring Data REST’s [`RepositoryEntityLinks`](https://github.com/spring-projects/spring-data-rest/blob/3a0cba94a2cc8739375ecf24086da2f7c3bbf038/spring-data-rest-webmvc/src/main/java/org/springframework/data/rest/webmvc/support/RepositoryEntityLinks.java), which uses the repository mapping information to create links pointing to resources backed by Spring Data repositories. +At the same time, it even exposes additional lookup methods for other types of resources. +If you want to make use of these, simply inject `RepositoryEntityLinks` explicitly. + +### Representation model assembler + +As the mapping from an entity to a representation model must be used in multiple places, it makes sense to create a dedicated class responsible for doing so. The conversion contains very custom steps but also a few boilerplate steps: + +1. Instantiation of the model class + +2. Adding a link with a `rel` of `self` pointing to the resource that gets rendered. + +Spring HATEOAS now provides a `RepresentationModelAssemblerSupport` base class that helps reduce the amount of code you need to write. +The following example shows how to use it: + +``` +class PersonModelAssembler extends RepresentationModelAssemblerSupport { + + public PersonModelAssembler() { + super(PersonController.class, PersonModel.class); + } + + @Override + public PersonModel toModel(Person person) { + + PersonModel resource = createResource(person); + // … do further mapping + return resource; + } +} +``` + +| |`createResource(…​)` is code you write to instantiate a `PersonModel` object given a `Person` object. It should only focus on setting attributes, not populating `Links`.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Setting the class up as we did in the preceding example gives you the following benefits: + +* There are a handful of `createModelWithId(…)` methods that let you create an instance of the resource and have a `Link` with a rel of `self` added to it. The href of that link is determined by the configured controller’s request mapping plus the ID of the entity (for example, `/people/1`). + +* The resource type gets instantiated by reflection and expects a no-arg constructor. If you want to use a dedicated constructor or avoid the reflection performance overhead, you can override `instantiateModel(…)`. + +You can then use the assembler to either assemble a `RepresentationModel` or a `CollectionModel`. +The following example creates a `CollectionModel` of `PersonModel` instances: + +``` +Person person = new Person(…); +Iterable people = Collections.singletonList(person); + +PersonModelAssembler assembler = new PersonModelAssembler(); +PersonModel model = assembler.toModel(person); +CollectionModel model = assembler.toCollectionModel(people); +``` + +### 3.7. Representation Model Processors + +Sometimes you need to tweak and adjust hypermedia representations after they have been [assembled](#server.representation-model-assembler). + +A perfect example is when you have a controller that deals with order fulfillment, but you need to add links related to making payments. + +Imagine having your ordering system producing this type of hypermedia: + +``` +{ + "orderId" : "42", + "state" : "AWAITING_PAYMENT", + "_links" : { + "self" : { + "href" : "http://localhost/orders/999" + } + } +} +``` + +You wish to add a link so the client can make payment, but don’t want to mix details about your `PaymentController` into +the `OrderController`. + +Instead of polluting the details of your ordering system, you can write a `RepresentationModelProcessor` like this: + +``` +public class PaymentProcessor implements RepresentationModelProcessor> { (1) + + @Override + public EntityModel process(EntityModel model) { + + model.add( (2) + Link.of("/payments/{orderId}").withRel(LinkRelation.of("payments")) // + .expand(model.getContent().getOrderId())); + + return model; (3) + } +} +``` + +|**1**| This processor will only be applied to `EntityModel` objects. | +|-----|-------------------------------------------------------------------------------| +|**2**| Manipulate the existing `EntityModel` object by adding an unconditional link. | +|**3**|Return the `EntityModel` so it can be serialized into the requested media type.| + +Register the processor with your application: + +``` +@Configuration +public class PaymentProcessingApp { + + @Bean + PaymentProcessor paymentProcessor() { + return new PaymentProcessor(); + } +} +``` + +Now when you issue a hypermedia respresentation of an `Order`, the client receives this: + +``` +{ + "orderId" : "42", + "state" : "AWAITING_PAYMENT", + "_links" : { + "self" : { + "href" : "http://localhost/orders/999" + }, + "payments" : { (1) + "href" : "/payments/42" (2) + } + } +} +``` + +|**1**|You see the `LinkRelation.of("payments")` plugged in as this link’s relation.| +|-----|-----------------------------------------------------------------------------| +|**2**| The URI was provided by the processor. | + +This example is quite simple, but you can easily: + +* Use `WebMvcLinkBuilder` or `WebFluxLinkBuilder` to construct a dynamic link to your `PaymentController`. + +* Inject any services needed to conditionally add other links (e.g. `cancel`, `amend`) that are driven by state. + +* Leverage cross cutting services like Spring Security to add, remove, or revise links based upon the current user’s context. + +Also, in this example, the `PaymentProcessor` alters the provided `EntityModel`. You also have the power to*replace* it with another object. Just be advised the API requires the return type to equal the input type. + +### Using the `LinkRelationProvider` API + +When building links, you usually need to determine the relation type to be used for the link. In most cases, the relation type is directly associated with a (domain) type. We encapsulate the detailed algorithm to look up the relation types behind a `LinkRelationProvider` API that lets you determine the relation types for single and collection resources. The algorithm for looking up the relation type follows: + +1. If the type is annotated with `@Relation`, we use the values configured in the annotation. + +2. If not, we default to the uncapitalized simple class name plus an appended `List` for the collection `rel`. + +3. If the [EVO inflector](https://github.com/atteo/evo-inflector) JAR is in the classpath, we use the plural of the single resource `rel` provided by the pluralizing algorithm. + +4. `@Controller` classes annotated with `@ExposesResourceFor` (see [ Using the EntityLinks interface](#server.entity-links) for details) transparently look up the relation types for the type configured in the annotation, so that you can use `LinkRelationProvider.getItemResourceRelFor(MyController.class)` and get the relation type of the domain type exposed. + +A `LinkRelationProvider` is automatically exposed as a Spring bean when you use `@EnableHypermediaSupport`. You can plug in custom providers by implementing the interface and exposing them as Spring beans in turn. + +## 4. Media types + +### 4.1. HAL – Hypertext Application Language + +[JSON Hypertext Application Language](https://tools.ietf.org/html/draft-kelly-json-hal-08) or HAL is one of the simplest +and most widely adopted hypermedia media types adopted when not discussing specific web stacks. + +It was the first spec-based media type adopted by Spring HATEOAS. + +#### 4.1.1. Building HAL representation models + +As of Spring HATEOAS 1.1, we ship a dedicated `HalModelBuilder` that allows to create `RepresentationModel` instances through a HAL-idiomatic API. +These are its fundamental assumptions: + +1. A HAL representation can be backed by an arbitrary object (an entity) that builds up the domain fields contained in the representation. + +2. The representation can be enriched by a variety of embedded documents, which can be either arbitrary objects or HAL representations themselves (i.e. containing nested embeddeds and links). + +3. Certain HAL specific patterns (e.g. previews) can be directly used in the API so that the code setting up the representation reads like you’d describe a HAL representation following those idioms. + +Here’s an example of the API used: + +``` +// An order +var order = new Order(…); (1) + +// The customer who placed the order +var customer = customer.findById(order.getCustomerId()); + +var customerLink = Link.of("/orders/{id}/customer") (2) + .expand(order.getId()) + .withRel("customer"); + +var additional = … + +var model = HalModelBuilder.halModelOf(order) + .preview(new CustomerSummary(customer)) (3) + .forLink(customerLink) (4) + .embed(additional) (5) + .link(Link.of(…, IanaLinkRelations.SELF)); + .build(); +``` + +|**1**| We set up some domain type. In this case, an order that has a relationship to the customer that placed it. | +|-----|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| We prepare a link pointing to a resource that will expose customer details | +|**3**| We start building a preview by providing the payload that’s supposed to be rendered inside the `_embeddable` clause. | +|**4**| We conclude that preview by providing the target link. It transparently gets added to the `_links` object and its link relation is used as the key for the object provided in the previous step. | +|**5**|Other objects can be added to show up under `_embedded`.
The key under which they’re listed is derived from the objects relation settings. They’re customizable via `@Relation` or a dedicated `LinkRelationProvider` (see [ Using the `LinkRelationProvider` API](#server.rel-provider) for details).| + +``` +{ + "_links" : { + "self" : { "href" : "…" }, (1) + "customer" : { "href" : "/orders/4711/customer" } (2) + }, + "_embedded" : { + "customer" : { … }, (3) + "additional" : { … } (4) + } +} +``` + +|**1**| The `self` link as explicitly provided. | +|-----|--------------------------------------------------------------------------| +|**2**|The `customer` link transparently added through `….preview(…).forLink(…)`.| +|**3**| The preview object provided. | +|**4**| Additional elements added via explicit `….embed(…)`. | + +In HAL `_embedded` is also used to represent top collections. +They’re usually grouped under the link relation derived from the object’s type. +I.e. a list of orders would look like this in HAL: + +``` +{ + "_embedded" : { + "orders : [ + … (1) + ] + } +} +``` + +|**1**|Individual order documents go here.| +|-----|-----------------------------------| + +Creating such a representation is as easy as this: + +``` +Collection orders = …; + +HalModelBuilder.emptyHalDocument() + .embed(orders); +``` + +That said, if the order is empty, there’s no way to derive the link relation to appear inside `_embedded`, so that the document will stay empty if the collection is empty. + +If you prefer to explicitly communicate an empty collection, a type can be handed into the overload of the `….embed(…)` method taking a `Collection`. +If the collection handed into the method is empty, this will cause a field rendered with its link relation derived from the given type. + +``` +HalModelBuilder.emptyHalModel() + .embed(Collections.emptyList(), Order.class); + // or + .embed(Collections.emptyList(), LinkRelation.of("orders")); +``` + +will create the following, more explicit representation. + +``` +{ + "_embedded" : { + "orders" : [] + } +} +``` + +#### 4.1.2. Configuring link rendering + +In HAL, the `_links` entry is a JSON object. The property names are [link relations](#fundamentals.link-relations) and +each value is either [a link object or an array of link objects](https://tools.ietf.org/html/draft-kelly-json-hal-07#section-4.1.1). + +For a given link relation that has two or more links, the spec is clear on representation: + +Example 24. HAL document with two links associated with one relation + +``` +{ + "_links": { + "item": [ + { "href": "https://myhost/cart/42" }, + { "href": "https://myhost/inventory/12" } + ] + }, + "customer": "Dave Matthews" +} +``` + +But if there is only one link for a given relation, the spec is ambiguous. You could render that as either a single object +or as a single-item array. + +By default, Spring HATEOAS uses the most terse approach and renders a single-link relation like this: + +Example 25. HAL document with single link rendered as an object + +``` +{ + "_links": { + "item": { "href": "https://myhost/inventory/12" } + }, + "customer": "Dave Matthews" +} +``` + +Some users prefer to not switch between arrays and objects when consuming HAL. They would prefer this type of rendering: + +Example 26. HAL with single link rendered as an array + +``` +{ + "_links": { + "item": [{ "href": "https://myhost/inventory/12" }] + }, + "customer": "Dave Matthews" +} +``` + +If you wish to customize this policy, all you have to do is inject a `HalConfiguration` bean into your application configuration. +There are multiple choices. + +Example 27. Global HAL single-link rendering policy + +``` +@Bean +public HalConfiguration globalPolicy() { + return new HalConfiguration() // + .withRenderSingleLinks(RenderSingleLinks.AS_ARRAY); (1) +} +``` + +|**1**|Override Spring HATEOAS’s default by rendering ALL single-link relations as arrays.| +|-----|-----------------------------------------------------------------------------------| + +If you prefer to only override some particular link relations, you can create a `HalConfiguration`bean like this: + +Example 28. Link relation-based HAL single-link rendering policy + +``` +@Bean +public HalConfiguration linkRelationBasedPolicy() { + return new HalConfiguration() // + .withRenderSingleLinksFor( // + IanaLinkRelations.ITEM, RenderSingleLinks.AS_ARRAY) (1) + .withRenderSingleLinksFor( // + LinkRelation.of("prev"), RenderSingleLinks.AS_SINGLE); (2) +} +``` + +|**1**| Always render `item` link relations as an array. | +|-----|----------------------------------------------------------------------| +|**2**|Render `prev` link relations as an object when there is only one link.| + +If neither of these match your needs, you can use an Ant-style path pattern: + +Example 29. Pattern-based HAL single-link rendering policy + +``` +@Bean +public HalConfiguration patternBasedPolicy() { + return new HalConfiguration() // + .withRenderSingleLinksFor( // + "http*", RenderSingleLinks.AS_ARRAY); (1) +} +``` + +|**1**|Render all link relations that start with `http` as an array.| +|-----|-------------------------------------------------------------| + +| |The pattern-based approach uses Spring’s `AntPathMatcher`.| +|---|----------------------------------------------------------| + +All of these `HalConfiguration` withers can be combined to form one comprehensive policy. Be sure to test your API +extensively to avoid surprises. + +#### 4.1.3. Link title internationalization + +HAL defines a `title` attribute for its link objects. +These titles can be populated by using Spring’s resource bundle abstraction and a resource bundle named `rest-messages` so that clients can use them in their UIs directly. +This bundle will be set up automatically and is used during HAL link serialization. + +To define a title for a link, use the key template `_links.$relationName.title` as follows: + +Example 30. A sample `rest-messages.properties` + +``` +_links.cancel.title=Cancel order +_links.payment.title=Proceed to checkout +``` + +This will result in the following HAL representation: + +Example 31. A sample HAL document with link titles defined + +``` +{ + "_links" : { + "cancel" : { + "href" : "…" + "title" : "Cancel order" + }, + "payment" : { + "href" : "…" + "title" : "Proceed to checkout" + } + } +} +``` + +#### Using the `CurieProvider` API + +The [Web Linking RFC](https://tools.ietf.org/html/rfc8288#section-2.1) describes registered and extension link relation types. Registered rels are well-known strings registered with the [IANA registry of link relation types](https://www.iana.org/assignments/link-relations/link-relations.xhtml). Extension `rel` URIs can be used by applications that do not wish to register a relation type. Each one is a URI that uniquely identifies the relation type. The `rel` URI can be serialized as a compact URI or [Curie](https://www.w3.org/TR/curie). For example, a curie of `ex:persons` stands for the link relation type `[example.com/rels/persons](https://example.com/rels/persons)` if `ex` is defined as `[example.com/rels/{rel}](https://example.com/rels/{rel})`. If curies are used, the base URI must be present in the response scope. + +The `rel` values created by the default `RelProvider` are extension relation types and, as a result, must be URIs, which can cause a lot of overhead. The `CurieProvider` API takes care of that: It lets you define a base URI as a URI template and a prefix that stands for that base URI. If a `CurieProvider` is present, the `RelProvider` prepends all `rel` values with the curie prefix. Furthermore a `curies` link is automatically added to the HAL resource. + +The following configuration defines a default curie provider: + +``` +@Configuration +@EnableWebMvc +@EnableHypermediaSupport(type= {HypermediaType.HAL}) +public class Config { + + @Bean + public CurieProvider curieProvider() { + return new DefaultCurieProvider("ex", new UriTemplate("https://www.example.com/rels/{rel}")); + } +} +``` + +Note that now the `ex:` prefix automatically appears before all rel values that are not registered with IANA, as in `ex:orders`. Clients can use the `curies` link to resolve a curie to its full form. +The following example shows how to do so: + +``` +{ + "_links": { + "self": { + "href": "https://myhost/person/1" + }, + "curies": { + "name": "ex", + "href": "https://example.com/rels/{rel}", + "templated": true + }, + "ex:orders": { + "href": "https://myhost/person/1/orders" + } + }, + "firstname": "Dave", + "lastname": "Matthews" +} +``` + +Since the purpose of the `CurieProvider` API is to allow for automatic curie creation, you can define only one `CurieProvider` bean per application scope. + +### 4.2. HAL-FORMS + +[HAL-FORMS](https://rwcbook.github.io/hal-forms/) is designed to add runtime FORM support to the [HAL media type](#mediatypes.hal). + +> +> +> +> +> HAL-FORMS "looks like HAL." However, it is important to keep in mind that HAL-FORMS is not the same as HAL — the two +> should not be thought of as interchangeable in any way. +> +> +> +> + +— Mike Amundsen + +> HAL-FORMS spec + +To enable this media type, put the following configuration in your code: + +Example 32. HAL-FORMS enabled application + +``` +@Configuration +@EnableHypermediaSupport(type = HypermediaType.HAL_FORMS) +public class HalFormsApplication { + +} +``` + +Anytime a client supplies an `Accept` header with `application/prs.hal-forms+json`, you can expect something like this: + +Example 33. HAL-FORMS sample document + +``` +{ + "firstName" : "Frodo", + "lastName" : "Baggins", + "role" : "ring bearer", + "_links" : { + "self" : { + "href" : "http://localhost:8080/employees/1" + } + }, + "_templates" : { + "default" : { + "method" : "put", + "contentType" : "", + "properties" : [ { + "name" : "firstName", + "required" : true + }, { + "name" : "lastName", + "required" : true + }, { + "name" : "role", + "required" : true + } ] + }, + "partiallyUpdateEmployee" : { + "method" : "patch", + "contentType" : "", + "properties" : [ { + "name" : "firstName", + "required" : false + }, { + "name" : "lastName", + "required" : false + }, { + "name" : "role", + "required" : false + } ] + } + } +} +``` + +Check out the [HAL-FORMS spec](https://rwcbook.github.io/hal-forms/) to understand the details of the **\_templates** attribute. +Read about the [Affordances API](#server.affordances) to augment your controllers with this extra metadata. + +As for single-item (`EntityModel`) and aggregate root collections (`CollectionModel`), Spring HATEOAS renders them +identically to [HAL documents](#mediatypes.hal). + +#### 4.2.1. Defining HAL-FORMS metadata + +HAL-FORMS allows to describe criterias for each form field. +Spring HATEOAS allows to customize those by shaping the model type for the input and output types and using annotations on them. + +|Attribute | Description | +|----------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|`readOnly`|Set to `true` if there’s no setter method for the property. If that is present, use Jackson’s `@JsonProperty(Access.READ_ONLY)` on the accessors or field explicitly. Not rendered by default, thus defaulting to `false`.| +| `regex` | Can be customized by using JSR-303’s `@Pattern` annotation either on the field or a type. In case of the latter the pattern will be used for every property declared as that particular type. Not rendered by default. | +|`required`| Can be customized by using JSR-303’s `@NotNull`. Not rendered by default and thus defaulting to `false`. Templates using `PATCH` as method will automatically have all properties set to not required. | + +For types that you cannot annotate manually, you can register a custom pattern via a `HalFormsConfiguration` bean present in the application context. + +``` +@Configuration +class CustomConfiguration { + + @Bean + HalFormsConfiguration halFormsConfiguration() { + + HalFormsConfiguration configuration = new HalFormsConfiguration(); + configuration.registerPatternFor(CreditCardNumber.class, "[0-9]{16}"); + } +} +``` + +This setup will cause the HAL-FORMS template properties for representation model properties of type `CreditCardNumber` to declare a `regex` field with value `[0-9]{16}`. + +#### 4.2.2. Internationalization of form attributes + +HAL-FORMS contains attributes that are intended for human interpretation, like a template’s title or property prompts. +These can be defined and internationalized using Spring’s resource bundle support and the `rest-messages` resource bundle configured by Spring HATEOAS by default. + +##### Template titles + +To define a template title use the following pattern: `_templates.$affordanceName.title`. Note that in HAL-FORMS, the name of a template is `default` if it is the only one. +This means that you’ll usually have to qualify the key with the local or fully qualified input type name that affordance describes. + +Example 34. Defining HAL-FORMS template titles + +``` +_templates.default.title=Some title (1) +_templates.putEmployee.title=Create employee (2) +Employee._templates.default.title=Create employee (3) +com.acme.Employee._templates.default.title=Create employee (4) +``` + +|**1**| A global definition for the title using `default` as key. | +|-----|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**|A global definition for the title using the actual affordance name as key. Unless defined explicitly when creating the affordance, this defaults to `$httpMethod + $simpleInputTypeName`.| +|**3**| A locally defined title to be applied to all types named `Employee`. | +|**4**| A title definition using the fully-qualified type name. | + +| |Keys using the actual affordance name enjoy preference over the defaulted ones.| +|---|-------------------------------------------------------------------------------| + +##### Property prompts + +Property prompts can also be resolved via the `rest-messages` resource bundle automatically configured by Spring HATEOAS. +The keys can be defined globally, locally or fully-qualified and need an `._prompt` concatenated to the actual property key: + +Example 35. Defining prompts for an `email` property + +``` +firstName._prompt=Firstname (1) +Employee.firstName._prompt=Firstname (2) +com.acme.Employee.firstName._prompt=Firstname (3) +``` + +|**1**|All properties named `firstName` will get "Firstname" rendered, independent of the type they’re declared in.| +|-----|------------------------------------------------------------------------------------------------------------| +|**2**| The `firstName` property in types named `Employee` will be prompted "Firstname". | +|**3**| The `firstName` property of `com.acme.Employee` will get a prompt of "Firstname" assigned. | + +A sample document with both template titles and property prompts defined would then look something like this: + +Example 36. A sample HAL-FORMS document with internationalized template titles and property prompts + +``` +{ + …, + "_templates" : { + "default" : { + "title" : "Create employee", + "method" : "put", + "contentType" : "", + "properties" : [ { + "name" : "firstName", + "prompt" : "Firstname", + "required" : true + }, { + "name" : "lastName", + "prompt" : "Lastname", + "required" : true + }, { + "name" : "role", + "prompt" : "Role", + "required" : true + } ] + } + } +} +``` + +### 4.3. HTTP Problem Details + +[Problem Details for HTTP APIs](https://tools.ietf.org/html/rfc7807) is a media type to carry machine-readable details of errors in a HTTP response to avoid the need to define new error response formats for HTTP APIs. + +HTTP Problem Details defines a set of JSON properties that carry additional information to describe error details to HTTP clients. +Find more details about those properties in particular in the relevant section of the [RFC document](https://tools.ietf.org/html/rfc7807#section-3.1). + +You can create such a JSON response by using the `Problem` media type domain type in your Spring MVC Controller: + +Reporting problem details using Spring HATEOAS' `Problem` type + +``` +@RestController +class PaymentController { + + @PutMapping + ResponseEntity issuePayment(@RequestBody PaymentRequest request) { + + PaymentResult result = payments.issuePayment(request.orderId, request.amount); + + if (result.isSuccess()) { + return ResponseEntity.ok(result); + } + + String title = messages.getMessage("payment.out-of-credit"); + String detail = messages.getMessage("payment.out-of-credit.details", // + new Object[] { result.getBalance(), result.getCost() }); + + Problem problem = Problem.create() (1) + .withType(OUT_OF_CREDIT_URI) // + .withTitle(title) (2) + .withDetail(detail) // + .withInstance(PAYMENT_ERROR_INSTANCE.expand(result.getPaymentId())) // + .withProperties(map -> { (3) + map.put("balance", result.getBalance()); + map.put("accounts", Arrays.asList( // + ACCOUNTS.expand(result.getSourceAccountId()), // + ACCOUNTS.expand(result.getTargetAccountId()) // + )); + }); + + return ResponseEntity.status(HttpStatus.FORBIDDEN) // + .body(problem); + } +} +``` + +|**1**| You start by creating an instance of `Problem` using the factory methods exposed. | +|-----|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**|You can define the values for the default properties defined by the media type, e.g. the type URI, the title and details using internationalization features of Spring (see above).| +|**3**| Custom properties can be added via a `Map` or an explicit object (see below). | + +To use a dedicated object for custom properties, declare a type, create and populate an instance of it and hand this into the `Problem` instance either via `….withProperties(…)` or on instance creation via `Problem.create(…)`. + +Using a dedicated type to capture extended problem properties + +``` +class AccountDetails { + int balance; + List accounts; +} + +problem.withProperties(result.getDetails()); + +// or + +Problem.create(result.getDetails()); +``` + +This will result in a response looking like this: + +A sample HTTP Problem Details response + +``` +{ + "type": "https://example.com/probs/out-of-credit", + "title": "You do not have enough credit.", + "detail": "Your current balance is 30, but that costs 50.", + "instance": "/account/12345/msgs/abc", + "balance": 30, + "accounts": ["/account/12345", + "/account/67890"] +} +``` + +### 4.4. Collection+JSON + +[Collection+JSON](http://amundsen.com/media-types/collection/format/) is a JSON spec registered with IANA-approved media type `application/vnd.collection+json`. + +> +> +> +> +> [Collection+JSON](http://amundsen.com/media-types/collection/) is a JSON-based read/write hypermedia-type designed to support +> management and querying of simple collections. +> +> +> +> + +— Mike Amundsen + +> Collection+JSON spec + +Collection+JSON provides a uniform way to represent both single item resources as well as collections. +To enable this media type, put the following configuration in your code: + +Example 37. Collection+JSON enabled application + +``` +@Configuration +@EnableHypermediaSupport(type = HypermediaType.COLLECTION_JSON) +public class CollectionJsonApplication { + +} +``` + +This configuration will make your application respond to requests that have an `Accept` header of `application/vnd.collection+json`as shown below. + +The following example from the spec shows a single item: + +Example 38. Collection+JSON single item example + +``` +{ + "collection": { + "version": "1.0", + "href": "https://example.org/friends/", (1) + "links": [ (2) + { + "rel": "feed", + "href": "https://example.org/friends/rss" + }, + { + "rel": "queries", + "href": "https://example.org/friends/?queries" + }, + { + "rel": "template", + "href": "https://example.org/friends/?template" + } + ], + "items": [ (3) + { + "href": "https://example.org/friends/jdoe", + "data": [ (4) + { + "name": "fullname", + "value": "J. Doe", + "prompt": "Full Name" + }, + { + "name": "email", + "value": "[email protected]", + "prompt": "Email" + } + ], + "links": [ (5) + { + "rel": "blog", + "href": "https://examples.org/blogs/jdoe", + "prompt": "Blog" + }, + { + "rel": "avatar", + "href": "https://examples.org/images/jdoe", + "prompt": "Avatar", + "render": "image" + } + ] + } + ] + } +} +``` + +|**1**| The `self` link is stored in the document’s `href` attribute. | +|-----|---------------------------------------------------------------------------------------------------------------| +|**2**| The document’s top `links` section contains collection-level links (minus the `self` link). | +|**3**|The `items` section contains a collection of data. Since this is a single-item document, it only has one entry.| +|**4**| The `data` section contains actual content. It’s made up of properties. | +|**5**| The item’s individual `links`. | + +| |The previous fragment was lifted from the spec. When Spring HATEOAS renders an `EntityModel`, it will:

* Put the `self` link into both the document’s `href` attribute and the item-level `href` attribute.

* Put the rest of the model’s links into both the top-level `links` as well as the item-level `links`.

* Extract the properties from the `EntityModel` and turn them into …​| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +When rendering a collection of resources, the document is almost the same, except there will be multiple entries inside +the `items` JSON array, one for each entry. + +Spring HATEOAS more specifically will: + +* Put the entire collection’s `self` link into the top-level `href` attribute. + +* The `CollectionModel` links (minus `self`) will be put into the top-level `links`. + +* Each item-level `href` will contain the corresponding `self` link for each entry from the `CollectionModel.content` collection. + +* Each item-level `links` will contain all other links for each entry from `CollectionModel.content`. + +### 4.5. UBER - Uniform Basis for Exchanging Representations + +[UBER](https://rawgit.com/uber-hypermedia/specification/master/uber-hypermedia.html) is an experimental JSON spec + +> +> +> +> +> The UBER document format is a minimal read/write hypermedia type designed to support simple state transfers and ad-hoc +> hypermedia-based transitions. +> +> +> +> + +— Mike Amundsen + +> UBER spec + +UBER provides a uniform way to represent both single item resources as well as collections. To enable this media type, put the following configuration in your code: + +Example 39. UBER+JSON enabled application + +``` +@Configuration +@EnableHypermediaSupport(type = HypermediaType.UBER) +public class UberApplication { + +} +``` + +This configuration will make your application respond to requests using the `Accept` header `application/vnd.amundsen-uber+json`as show below: + +Example 40. UBER sample document + +``` +{ + "uber" : { + "version" : "1.0", + "data" : [ { + "rel" : [ "self" ], + "url" : "/employees/1" + }, { + "name" : "employee", + "data" : [ { + "name" : "role", + "value" : "ring bearer" + }, { + "name" : "name", + "value" : "Frodo" + } ] + } ] + } +} +``` + +This media type is still under development as is the spec itself. Feel free to[open a ticket](https://github.com/spring-projects/spring-hateoas/issues) if you run into issues using it. + +| |**UBER media type** is not associated in any way with **Uber Technologies Inc.**, the ride sharing company.| +|---|-----------------------------------------------------------------------------------------------------------| + +### 4.6. ALPS - Application-Level Profile Semantics + +[ALPS](https://tools.ietf.org/html/draft-amundsen-richardson-foster-alps-01) is a media type for providing +profile-based metadata about another resource. + +> +> +> +> +> An ALPS document can be used as a profile to +> explain the application semantics of a document with an application- +> agnostic media type (such as HTML, HAL, Collection+JSON, Siren, +> etc.). This increases the reusability of profile documents across +> media types. +> +> +> +> + +— Mike Amundsen + +> ALPS spec + +ALPS requires no special activation. Instead you "build" an `Alps` record and return it from either a Spring MVC or a Spring WebFlux web method as shown below: + +Example 41. Building an `Alps` record + +``` +@GetMapping(value = "/profile", produces = ALPS_JSON_VALUE) +Alps profile() { + + return Alps.alps() // + .doc(doc() // + .href("https://example.org/samples/full/doc.html") // + .value("value goes here") // + .format(Format.TEXT) // + .build()) // + .descriptor(getExposedProperties(Employee.class).stream() // + .map(property -> Descriptor.builder() // + .id("class field [" + property.getName() + "]") // + .name(property.getName()) // + .type(Type.SEMANTIC) // + .ext(Ext.builder() // + .id("ext [" + property.getName() + "]") // + .href("https://example.org/samples/ext/" + property.getName()) // + .value("value goes here") // + .build()) // + .rt("rt for [" + property.getName() + "]") // + .descriptor(Collections.singletonList(Descriptor.builder().id("embedded").build())) // + .build()) // + .collect(Collectors.toList())) + .build(); +} +``` + +* This example leverages `PropertyUtils.getExposedProperties()` to extract metadata about the domain object’s attributes. + +This fragment has test data plugged in. It yields JSON like this: + +Example 42. ALPS JSON + +``` +{ + "version": "1.0", + "doc": { + "format": "TEXT", + "href": "https://example.org/samples/full/doc.html", + "value": "value goes here" + }, + "descriptor": [ + { + "id": "class field [name]", + "name": "name", + "type": "SEMANTIC", + "descriptor": [ + { + "id": "embedded" + } + ], + "ext": { + "id": "ext [name]", + "href": "https://example.org/samples/ext/name", + "value": "value goes here" + }, + "rt": "rt for [name]" + }, + { + "id": "class field [role]", + "name": "role", + "type": "SEMANTIC", + "descriptor": [ + { + "id": "embedded" + } + ], + "ext": { + "id": "ext [role]", + "href": "https://example.org/samples/ext/role", + "value": "value goes here" + }, + "rt": "rt for [role]" + } + ] +} +``` + +Instead of linking each field "automatically" to a domain object’s fields, you can write them by hand if you like. It’s also possible +to use Spring Framework’s message bundles and the `MessageSource` interface. This gives you the ability to delegate these values to +locale-specific message bundles and even internationalize the metadata. + +### 4.7. Community-based media types + +Thanks to the [ability to create your own media type](#mediatypes.custom), there are several community-led efforts to build additional media types. + +#### 4.7.1. JSON:API + +* [Specification](https://jsonapi.org) + +* Media type designation: `application/vnd.api+json` + +* Latest Release + + * [Reference documentation](https://toedter.github.io/spring-hateoas-jsonapi/release/reference/index.html) + + * [API documentation](https://toedter.github.io/spring-hateoas-jsonapi/release/api/index.html) + +* Current Snapshot + + * [Reference documentation](https://toedter.github.io/spring-hateoas-jsonapi/snapshot/reference/index.html) + + * [API documentation](https://toedter.github.io/spring-hateoas-jsonapi/snapshot/api/index.html) + +* [Source](https://github.com/toedter/spring-hateoas-jsonapi) + +* Project Lead: [Kai Toedter](https://github.com/toedter) + +Maven coordinates + +``` + + com.toedter + spring-hateoas-jsonapi + {see project page for current version} + +``` + +Gradle coordinates + +``` +implementation 'com.toedter:spring-hateoas-jsonapi:{see project page for current version}' +``` + +Visit the project page for more details if you want snapshot releases. + +#### 4.7.2. Siren + +* [Specification](https://github.com/kevinswiber/siren) + +* Media type designation: `application/vnd.siren+json` + +* [Reference documentation](https://spring-hateoas-siren.ingogriebsch.de) + +* [javadocs](https://spring-hateoas-siren.ingogriebsch.de/apidocs) + +* [Source](https://github.com/ingogriebsch/spring-hateoas-siren) + +* Project Lead: [Ingo Griebsch](https://github.com/ingogriebsch) + +Maven coordinates + +``` + + de.ingogriebsch.hateoas + spring-hateoas-siren + {see project page for current version} + compile + +``` + +Gradle coordinates + +``` +implementation 'de.ingogriebsch.hateoas:spring-hateoas-siren:{see project page for current version}' +``` + +### 4.8. Registering a custom media type + +Spring HATEOAS allows you to integrate custom media types through an SPI. +The building blocks of such an implementation are: + +1. Some form of Jackson `ObjectMapper` customization. In its most simple case that’s a Jackson `Module` implementation. + +2. A `LinkDiscoverer` implementation so that the client-side support is able to detect links in representations. + +3. A small bit of infrastructure configuration that will allow Spring HATEOAS to find the custom implementation and pick it up. + +#### 4.8.1. Custom media type configuration + +Custom media type implementations are picked up by Spring HATEOAS by scanning the application context for any implementations of the `HypermediaMappingInformation` interface. +Each media type must implement this interface in order to: + +* Be applied to [`WebClient`](#client.web-client), [`WebTestClient`](#client.web-test-client), or [`RestTemplate`](#client.rest-template) instances. + +* Support serving that media type from Spring Web MVC and Spring WebFlux controllers. + +To define your own media type could look as simple as this: + +``` +@Configuration +public class MyMediaTypeConfiguration implements HypermediaMappingInformation { + + @Override + public List getMediaTypes() { + return MediaType.parse("application/vnd-acme-media-type") (1) + } + + @Override + public Module getJacksonModule() { + return new Jackson2MyMediaTypeModule(); (2) + } + + @Bean + MyLinkDiscoverer myLinkDiscoverer() { + return new MyLinkDiscoverer(); (3) + } +} +``` + +|**1**|The configuration class returns the media type it supports. This applies to both server-side and client-side scenarios.| +|-----|-----------------------------------------------------------------------------------------------------------------------| +|**2**| It overrides `getJacksonModule()` to provide custom serializers to create the media type specific representations. | +|**3**| It also declares a custom `LinkDiscoverer` implementation for further client-side support. | + +The Jackson module usually declares `Serializer` and `Deserializer` implementations for the representation model types `RepresentationModel`, `EntityModel`, `CollectionModel` and `PagedModel`. +In case you need further customization of the Jackson `ObjectMapper` (like a custom `HandlerInstantiator`), you can alternatively override `configureObjectMapper(…)`. + +| |Prior versions of reference documentation has mentioned implementing the `MediaTypeConfigurationProvider` interface and registering it with `spring.factories`.
This is NOT necessary.
This SPI is ONLY used for out-of-the-box media types provided by Spring HATEOAS.
Merely implementing the `HypermediaMappingInformation` interface and registering it as a Spring bean is all that’s needed.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 4.8.2. Recommendations + +The preferred way to implement media type representations is by providing a type hierarchy that matches the expected format and can be serialized by Jackson as is. +In the `Serializer` and `Deserializer` implementations registered for `RepresentationModel`, convert the instances into the media type-specific model types and then lookup the Jackson serializer for those. + +The media types supported by default use the same configuration mechanism as third-party implementations would do. +So it’s worth studying the implementations in [the `mediatype` package](https://github.com/spring-projects/spring-hateoas/tree/master/src/main/java/org/springframework/hateoas/mediatype). +Note, that the built in media type implementations keep their configuration classes package private, as they’re activated via `@EnableHypermediaSupport`. +Custom implementations should probably make those public instead to make sure, users can import those configuration classes from their application packages. + +## 5. Configuration + +This section describes how to configure Spring HATEOAS. + +### 5.1. Using `@EnableHypermediaSupport` + +To let the `RepresentationModel` subtypes be rendered according to the specification of various hypermedia representation types, you can activate support for a particular hypermedia representation format through `@EnableHypermediaSupport`. The annotation takes a `HypermediaType` enumeration as its argument. Currently, we support [HAL](https://tools.ietf.org/html/draft-kelly-json-hal) as well as a default rendering. Using the annotation triggers the following: + +* It registers necessary Jackson modules to render `EntityModel` and `CollectionModel` in the hypermedia specific format. + +* If JSONPath is on the classpath, it automatically registers a `LinkDiscoverer` instance to look up links by their `rel` in plain JSON representations (see [Using `LinkDiscoverer` Instances](#client.link-discoverer)). + +* By default, it enables [entity links](#fundamentals.obtaining-links.entity-links) and automatically picks up `EntityLinks` implementations and bundles them into a `DelegatingEntityLinks` instance that you can autowire. + +* It automatically picks up all `RelProvider` implementations in the `ApplicationContext` and bundles them into a `DelegatingRelProvider` that you can autowire. It registers providers to consider `@Relation` on domain types as well as Spring MVC controllers. If the [EVO inflector](https://github.com/atteo/evo-inflector) is on the classpath, collection `rel` values are derived by using the pluralizing algorithm implemented in the library (see [[spis.rel-provider]](#spis.rel-provider)). + +#### 5.1.1. Explicitly enabling support for dedicated web stacks + +By default, `@EnableHypermediaSupport` will reflectively detect the web application stack you’re using and hook into the Spring components registered for those to enable support for hypermedia representations. +However, there are situations in which you’d only explicitly want to activate support for a particular stack. +E.g. if your Spring WebMVC based application uses WebFlux' `WebClient` to make outgoing requests and that one is not supposed to work with hypermedia elements, you can restrict the functionality to be enabled by explicitly declaring WebMVC in the configuration: + +Example 43. Explicitly activating hypermedia support for a particular web stack + +``` +@EnableHypermediaSupport(…, stacks = WebStack.WEBMVC) +class MyHypermediaConfiguration { … } +``` + +## 6. Client-side Support + +This section describes Spring HATEOAS’s support for clients. + +### 6.1. Traverson + +Spring HATEOAS provides an API for client-side service traversal. It is inspired by the [Traverson JavaScript library](https://blog.codecentric.de/en/2013/11/traverson/). +The following example shows how to use it: + +``` +Map parameters = new HashMap<>(); +parameters.put("user", 27); + +Traverson traverson = new Traverson(URI.create("http://localhost:8080/api/"), MediaTypes.HAL_JSON); +String name = traverson + .follow("movies", "movie", "actor").withTemplateParameters(parameters) + .toObject("$.name"); +``` + +You can set up a `Traverson` instance by pointing it to a REST server and configuring the media types you want to set as `Accept` headers. You can then define the relation names you want to discover and follow. Relation names can either be simple names or JSONPath expressions (starting with an `$`). + +The sample then hands a parameter map into the `Traverson` instance. The parameters are used to expand URIs (which are templated) found during the traversal. The traversal is concluded by accessing the representation of the final traversal. In the preceding example, we evaluate a JSONPath expression to access the actor’s name. + +The preceding example is the simplest version of traversal, where the `rel` values are strings and, at each hop, the same template parameters are applied. + +There are more options to customize template parameters at each level. +The following example shows these options. + +``` +ParameterizedTypeReference> resourceParameterizedTypeReference = new ParameterizedTypeReference>() {}; + +EntityModel itemResource = traverson.// + follow(rel("items").withParameter("projection", "noImages")).// + follow("$._embedded.items[0]._links.self.href").// + toObject(resourceParameterizedTypeReference); +``` + +The static `rel(…​)` function is a convenient way to define a single `Hop`. Using `.withParameter(key, value)` makes it simple to specify URI template variables. + +| |`.withParameter()` returns a new `Hop` object that is chainable. You can string together as many `.withParameter` as you like. The result is a single `Hop` definition.
The following example shows one way to do so:| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +``` +ParameterizedTypeReference> resourceParameterizedTypeReference = new ParameterizedTypeReference>() {}; + +Map params = Collections.singletonMap("projection", "noImages"); + +EntityModel itemResource = traverson.// + follow(rel("items").withParameters(params)).// + follow("$._embedded.items[0]._links.self.href").// + toObject(resourceParameterizedTypeReference); +``` + +You can also load an entire `Map` of parameters by using `.withParameters(Map)`. + +| |`follow()` is chainable, meaning you can string together multiple hops, as shown in the preceding examples. You can either put multiple string-based `rel` values (`follow("items", "item")`) or a single hop with specific parameters.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 6.1.1. `EntityModel` vs. `CollectionModel` + +The examples shown so far demonstrate how to sidestep Java’s type erasure and convert a single JSON-formatted resource into a `EntityModel` object. However, what if you get a collection like an `\_embedded` HAL collection? +You can do so with only one slight tweak, as the following example shows: + +``` +CollectionModelType collectionModelType = + TypeReferences.CollectionModelType() {}; + +CollectionModel itemResource = traverson.// + follow(rel("items")).// + toObject(collectionModelType); +``` + +Instead of fetching a single resource, this one deserializes a collection into `CollectionModel`. + +### 6.2. Using `LinkDiscoverer` Instances + +When working with hypermedia enabled representations, a common task is to find a link with a particular relation type in it. Spring HATEOAS provides [JSONPath](https://code.google.com/p/json-path)-based implementations of the `LinkDiscoverer` interface for either the default representation rendering or HAL out of the box. When using `@EnableHypermediaSupport`, we automatically expose an instance supporting the configured hypermedia type as a Spring bean. + +Alternatively, you can set up and use an instance as follows: + +``` +String content = "{'_links' : { 'foo' : { 'href' : '/foo/bar' }}}"; +LinkDiscoverer discoverer = new HalLinkDiscoverer(); +Link link = discoverer.findLinkWithRel("foo", content); + +assertThat(link.getRel(), is("foo")); +assertThat(link.getHref(), is("/foo/bar")); +``` + +### 6.3. Configuring WebClient instances + +If you need configure a `WebClient` to speak hypermedia, it’s easy. Get a hold of the `HypermediaWebClientConfigurer` as shown below: + +Example 44. Configuring a `WebClient` yourself + +``` +@Bean +WebClient.Builder hypermediaWebClient(HypermediaWebClientConfigurer configurer) { (1) + return configurer.registerHypermediaTypes(WebClient.builder()); (2) +} +``` + +|**1**|Inside your `@Configuration` class, get a copy of the `HypermediaWebClientConfigurer` bean Spring HATEOAS registers.| +|-----|--------------------------------------------------------------------------------------------------------------------| +|**2**| After creating a `WebClient.Builder`, use the configurer to register hypermedia types. | + +| |What `HypermediaWebClientConfigurer` does it register all the right encoders and decoders with a `WebClient.Builder`. To make use of it,
you need to inject the builder somewhere into your application, and run the `build()` method to produce a `WebClient`.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +If you’re using Spring Boot, there is another way: the `WebClientCustomizer`. + +Example 45. Letting Spring Boot configure things + +``` +@Bean (4) +WebClientCustomizer hypermediaWebClientCustomizer(HypermediaWebClientConfigurer configurer) { (1) + return webClientBuilder -> { (2) + configurer.registerHypermediaTypes(webClientBuilder); (3) + }; +} +``` + +|**1**| When creating a Spring bean, request a copy of Spring HATEOAS’s `HypermediaWebClientConfigurer` bean. | +|-----|----------------------------------------------------------------------------------------------------------------------------------| +|**2**| Use a Java 8 lambda expression to define a `WebClientCustomizer`. | +|**3**| Inside the function call, apply the `registerHypermediaTypes` method. | +|**4**|Return the whole thing as a Spring bean so Spring Boot can pick it up and apply it to its autoconfigured `WebClient.Builder` bean.| + +At this stage, whenever you need a concrete `WebClient`, simply inject `WebClient.Builder` into your code, and use `build()`. The `WebClient` instance +will be able to interact using hypermedia. + +### 6.4. Configuring `WebTestClient` Instances + +When working with hypermedia-enabled representations, a common task is to run various tests by using `WebTestClient`. + +To configure an instance of `WebTestClient` in a test case, check out this example: + +Example 46. Configuring `WebTestClient` when using Spring HATEOAS + +``` +@Test // #1225 +void webTestClientShouldSupportHypermediaDeserialization() { + + // Configure an application context programmatically. + AnnotationConfigApplicationContext context = new AnnotationConfigApplicationContext(); + context.register(HalConfig.class); (1) + context.refresh(); + + // Create an instance of a controller for testing + WebFluxEmployeeController controller = context.getBean(WebFluxEmployeeController.class); + controller.reset(); + + // Extract the WebTestClientConfigurer from the app context. + HypermediaWebTestClientConfigurer configurer = context.getBean(HypermediaWebTestClientConfigurer.class); + + // Create a WebTestClient by binding to the controller and applying the hypermedia configurer. + WebTestClient client = WebTestClient.bindToApplicationContext(context).build().mutateWith(configurer); (2) + + // Exercise the controller. + client.get().uri("http://localhost/employees").accept(HAL_JSON) // + .exchange() // + .expectStatus().isOk() // + .expectBody(new TypeReferences.CollectionModelType>() {}) (3) + .consumeWith(result -> { + CollectionModel> model = result.getResponseBody(); (4) + + // Assert against the hypermedia model. + assertThat(model.getRequiredLink(IanaLinkRelations.SELF)).isEqualTo(Link.of("http://localhost/employees")); + assertThat(model.getContent()).hasSize(2); + }); +} +``` + +|**1**| Register your configuration class that uses `@EnableHypermediaSupport` to enable HAL support. | +|-----|----------------------------------------------------------------------------------------------------------------------------------| +|**2**| Use `HypermediaWebTestClientConfigurer` to apply hypermedia support. | +|**3**|Ask for a response of `CollectionModel>` using Spring HATEOAS’s `TypeReferences.CollectionModelType` helper.| +|**4**| After getting the "body" in Spring HATEOAS format, assert against it! | + +| |`WebTestClient` is an immutable value type, so you can’t alter it in place. `HypermediaWebClientConfigurer` returns a mutated
variant that you must then capture to use it.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +If you are using Spring Boot, there are additional options, like this: + +Example 47. Configuring `WebTestClient` when using Spring Boot + +``` +@SpringBootTest +@AutoConfigureWebTestClient (1) +class WebClientBasedTests { + + @Test + void exampleTest(@Autowired WebTestClient.Builder builder, @Autowired HypermediaWebTestClientConfigurer configurer) { (2) + client = builder.apply(configurer).build(); (3) + + client.get().uri("/") // + .exchange() // + .expectBody(new TypeReferences.EntityModelType() {}) (4) + .consumeWith(result -> { + // assert against this EntityModel! + }); + } +} +``` + +|**1**| This is Spring Boot’s test annotation that will configure a `WebTestClient.Builder` for this test class. | +|-----|-------------------------------------------------------------------------------------------------------------------| +|**2**|Autowire Spring Boot’s `WebTestClient.Builder` into `builder` and Spring HATEOAS’s configurer as method parameters.| +|**3**| Use `HypermediaWebTestClientConfigurer` to register support for hypermedia. | +|**4**| Signal you want an `EntityModel` returned using `TypeReferences`. | + +Again, you can use similar assertions as the earlier example. + +There are many other ways to fashion test cases. `WebTestClient` can be bound to controllers, functions, and URLs. This section isn’t meant to show all that. Instead, this gives you some examples to get started. The important thing is that by applying `HypermediaWebTestClientConfigurer`, any instance of `WebTestClient` can be altered to handle hypermedia. + +### 6.5. Configuring RestTemplate instances + +If you want to create your own copy of `RestTemplate`, configured to speak hypermedia, you can use the `HypermediaRestTemplateConfigurer`: + +Example 48. Configuring `RestTemplate` yourself + +``` +/** + * Use the {@link HypermediaRestTemplateConfigurer} to configure a {@link RestTemplate}. + */ +@Bean +RestTemplate hypermediaRestTemplate(HypermediaRestTemplateConfigurer configurer) { (1) + return configurer.registerHypermediaTypes(new RestTemplate()); (2) +} +``` + +|**1**|Inside your `@Configuration` class, get a copy of the `HypermediaRestTemplateConfigurer` bean Spring HATEOAS registers.| +|-----|-----------------------------------------------------------------------------------------------------------------------| +|**2**| After creating a `RestTemplate`, use the configurer to apply hypermedia types. | + +You are free to apply this pattern to any instance of `RestTemplate` that you need, whether is to create a registered bean, or inside a service you define. + +If you’re using Spring Boot, there is another approach. + +In general, Spring Boot has moved away from the concept of registering a `RestTemplate` bean in the application context. + +* When talking to different services, you often need different credentials. + +* When `RestTemplate` uses an underlying connection pool, you run into additional issues. + +* Users often need different instances rather than a single bean. + +To compensate for this, Spring Boot provides a `RestTemplateBuilder`. This autoconfigured bean lets you define various beans used to fashion +a `RestTemplate` instance. You ask for a `RestTemplateBuilder` bean, call its `build()` method, and then apply final settings (such as credentials and other details). + +To register hypermedia-based message converters, add the following to your code: + +Example 49. Letting Spring Boot configure things + +``` +@Bean (4) +RestTemplateCustomizer hypermediaRestTemplateCustomizer(HypermediaRestTemplateConfigurer configurer) { (1) + return restTemplate -> { (2) + configurer.registerHypermediaTypes(restTemplate); (3) + }; +} +``` + +|**1**| When creating a Spring bean, request a copy of Spring HATEOAS’s `HypermediaRestTemplateConfigurer` bean. | +|-----|-------------------------------------------------------------------------------------------------------------------------------| +|**2**| Use a Java 8 lambda expression to define a `RestTemplateCustomizer`. | +|**3**| Inside the function call, apply the `registerHypermediaTypes` method. | +|**4**|Return the whole thing as a Spring bean so Spring Boot can pick it up and apply it to its autoconfigured `RestTemplateBuilder`.| + +At this stage, whenever you need a concrete `RestTemplate`, simply inject `RestTemplateBuilder` into your code, and use `build()`. The `RestTemplate` instance +will be able to interact using hypermedia. \ No newline at end of file diff --git a/docs/en/spring-integration/README.md b/docs/en/spring-integration/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/en/spring-ldap/README.md b/docs/en/spring-ldap/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/en/spring-ldap/spring-ldap.md b/docs/en/spring-ldap/spring-ldap.md new file mode 100644 index 0000000000000000000000000000000000000000..f8afbc483d9d552dddbb172ecfb036816349ad72 --- /dev/null +++ b/docs/en/spring-ldap/spring-ldap.md @@ -0,0 +1,2320 @@ +# Spring LDAP Reference + +## 1. Preface + +The Java Naming and Directory Interface (JNDI) is to LDAP programming what Java Database Connectivity (JDBC) is to SQL programming. There are several similarities between JDBC and JNDI/LDAP (Java LDAP). Despite being two completely different APIs with different pros and cons, they share a number of less flattering characteristics: + +* They require extensive plumbing code, even to perform the simplest of tasks. + +* All resources need to be correctly closed, no matter what happens. + +* Exception handling is difficult. + +These points often lead to massive code duplication in common use cases of the APIs. As we all know, code duplication is one of the worst “code smells”. All in all, it boils down to this: JDBC and LDAP programming in Java are both incredibly dull and repetitive. + +Spring JDBC, a core component of Spring Framework, provides excellent utilities for simplifying SQL programming. We need a similar framework for Java LDAP programming. + +## 2. Introduction + +This section offers a relatively quick introduction to Spring LDAP. + +### 2.1. Overview + +Spring LDAP is designed to simplify LDAP programming in Java. Some of the features provided by the library are: + +* [`JdbcTemplate`](https://docs.spring.io/spring/docs/current/javadoc-api/org/springframework/jdbc/core/JdbcTemplate.html)-style template simplifications to LDAP programming. + +* JPA- or Hibernate-style annotation-based object and directory mapping. + +* Spring Data repository support, including support for QueryDSL. + +* Utilities to simplify building LDAP queries and distinguished names. + +* Proper LDAP connection pooling. + +* Client-side LDAP compensating transaction support. + +### 2.2. Traditional Java LDAP versus `LdapTemplate` + +Consider a method that should search some storage for all persons and return their names in a list. +By using JDBC, we would create a *connection* and run a *query* by using a *statement*. We would then loop over the *result set* and retrieve the *column* we want, adding it to a list. + +Working against an LDAP database with JNDI, we would create a *context* and perform a *search* by using a *search filter*. We would then loop over the resulting *naming enumeration*, retrieve the *attribute* we want, and add it to a list. + +The traditional way of implementing this person-name search method in Java LDAP looks like the next example. Note the code marked **bold** - this is the code that +actually performs tasks related to the business purpose of the method. The rest is plumbing. + +``` +package com.example.repository; + +public class TraditionalPersonRepoImpl implements PersonRepo { + public List getAllPersonNames() { + Hashtable env = new Hashtable(); + env.put(Context.INITIAL_CONTEXT_FACTORY, "com.sun.jndi.ldap.LdapCtxFactory"); + env.put(Context.PROVIDER_URL, "ldap://localhost:389/dc=example,dc=com"); + + DirContext ctx; + try { + ctx = new InitialDirContext(env); + } catch (NamingException e) { + throw new RuntimeException(e); + } + + List list = new LinkedList(); + NamingEnumeration results = null; + try { + SearchControls controls = new SearchControls(); + controls.setSearchScope(SearchControls.SUBTREE_SCOPE); + results = ctx.search("", "(objectclass=person)", controls); + + while (results.hasMore()) { + SearchResult searchResult = (SearchResult) results.next(); + Attributes attributes = searchResult.getAttributes(); + Attribute attr = attributes.get("cn"); + String cn = attr.get().toString(); + list.add(cn); + } + } catch (NameNotFoundException e) { + // The base context was not found. + // Just clean up and exit. + } catch (NamingException e) { + throw new RuntimeException(e); + } finally { + if (results != null) { + try { + results.close(); + } catch (Exception e) { + // Never mind this. + } + } + if (ctx != null) { + try { + ctx.close(); + } catch (Exception e) { + // Never mind this. + } + } + } + return list; + } +} +``` + +By using the Spring LDAP `AttributesMapper` and `LdapTemplate` classes, we get the exact same functionality with the following code: + +``` +package com.example.repo; +import static org.springframework.ldap.query.LdapQueryBuilder.query; + +public class PersonRepoImpl implements PersonRepo { + private LdapTemplate ldapTemplate; + + public void setLdapTemplate(LdapTemplate ldapTemplate) { + this.ldapTemplate = ldapTemplate; + } + + public List getAllPersonNames() { + return ldapTemplate.search( + query().where("objectclass").is("person"), + new AttributesMapper() { + public String mapFromAttributes(Attributes attrs) + throws NamingException { + return attrs.get("cn").get().toString(); + } + }); + } +} +``` + +The amount of boilerplate code is significantly less than in the traditional example. +The `LdapTemplate` search method makes sure a `DirContext` instance is created, performs the search, maps the attributes to a string by using the given `AttributesMapper`, +collects the strings in an internal list, and, finally, returns the list. It also makes sure that the `NamingEnumeration` and `DirContext` are properly closed and +takes care of any exceptions that might happen. + +Naturally, this being a Spring Framework sub-project, we use Spring to configure our application, as follows: + +``` + + + + + + + + + + + +``` + +| |To use the custom XML namespace to configure the Spring LDAP components, you need to include references to this namespace in your XML declaration, as in the preceding example.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 2.3. What’s new in 2.2 + +For complete details of 2.2, see the changelog for [2.2.0.RC1](https://github.com/spring-projects/spring-ldap/milestone/21?closed=1). +The highlights of Spring LDAP 2.2 are as follows: + +* [\#415](https://github.com/spring-projects/spring-ldap/issues/415): Added support for Spring 5 + +* [\#399](https://github.com/spring-projects/spring-ldap/pull/399): Embedded UnboundID LDAP Server support + +* [\#410](https://github.com/spring-projects/spring-ldap/pull/410): Added documentation for the Commons Pool 2 Support + +### 2.4. What’s new in 2.1 + +For complete details of 2.1, see the changelog for [2.1.0.RC1](https://github.com/spring-projects/spring-ldap/issues?q=milestone%3A2.1.0.RC1) and for [2.1.0](https://github.com/spring-projects/spring-ldap/issues?utf8=%E2%9C%93&q=milestone%3A2.1.0)The highlights of Spring LDAP 2.1 are as follows. + +* [\#390](https://github.com/spring-projects/spring-ldap/pull/390): Added Spring Data Hopper support + +* [\#351](https://github.com/spring-projects/spring-ldap/issues/351): Added support for commons-pool2 + +* [\#370](https://github.com/spring-projects/spring-ldap/issues/370): Added support property placeholders in the XML Namespace + +* [\#392](https://github.com/spring-projects/spring-ldap/pull/392): Added document Testing Support + +* [\#401](https://github.com/spring-projects/spring-ldap/pull/401): Added a switch to assertj + +* Migrated from JIRA to [GitHub Issues](https://github.com/spring-projects/spring-ldap/issues) + +* Added [Gitter Chat](https://gitter.im/spring-projects/spring-ldap) + +### 2.5. What’s new in 2.0 + +While quite significant modernizations have been made to the Spring LDAP API in version 2.0, great care has been taken to ensure backward compatibility as far as possible. +Code that works with Spring LDAP 1.3.x should, with few exceptions, compile and run when you use the 2.0 libraries without any modifications. + +The exception is a small number of classes that have been moved to new packages in order to make a couple of important refactorings possible. +The moved classes are typically not part of the intended public API, and the migration procedure should be smooth. Whenever a Spring LDAP class cannot be found after upgrade, you should organize the imports in your IDE. + +You should expect to encounter some deprecation warnings, though, and there are also a lot of other API improvements. +The recommendation for getting as much as possible out of the 2.0 version is to move away from the deprecated classes and methods and migrate to the new, improved API utilities. + +The following list briefly describes the most important changes in Spring LDAP 2.0: + +* Java 6 is now required by Spring LDAP. Spring versions starting at 2.0 and up are still supported. + +* The central API has been updated with Java 5+ features such as generics and varargs. + As a consequence, the entire `spring-ldap-tiger` module has been deprecated, and we encourage you to migrate to using the core Spring LDAP classes. + The parameterization of the core interfaces causes lots of compilation warnings on existing code, and we encourage you to take appropriate action to get rid of these warnings. + +* The ODM (Object-Directory Mapping) functionality has been moved to core, and there are new methods in `LdapOperations` and `LdapTemplate` that use this automatic translation to and from ODM-annotated classes. See [[odm]](#odm) for more information. + +* A custom XML namespace is now (finally) provided to simplify configuration of Spring LDAP. See [[configuration]](#configuration) for more information. + +* Spring LDAP now provides support for Spring Data Repository and QueryDSL. See [[repositories]](#repositories) for more information. + +* `Name` instances as attribute values are now handled properly with regards to distinguished name equality in `DirContextAdapter` and ODM. + See [[dns-as-attribute-values]](#dns-as-attribute-values) and [[odm-dn-attributes]](#odm-dn-attributes) for more information. + +* `DistinguishedName` and associated classes have been deprecated in favor of the standard Java `LdapName`. + See [Dynamically Building Distinguished Names](#ldap-names) for information on how the library helps when working with `LdapName` objects. + +* Fluent LDAP query building support has been added. This makes for a more pleasant programming experience when working with LDAP searches in Spring LDAP. + See [Building LDAP Queries](#basic-queries) and [[query-builder-advanced]](#query-builder-advanced) for more information about the LDAP query builder support. + +* The old `authenticate` methods in `LdapTemplate` have been deprecated in favor of a couple of new `authenticate` methods that work with `LdapQuery` objects and *throw exceptions* on authentication failure, making it easier for the user to find out what caused an authentication attempt to fail. + +* The [samples](https://github.com/spring-projects/spring-ldap/tree/main/samples) have been polished and updated to make use of the features in 2.0. + Quite a bit of effort has been put into providing a useful example of an [LDAP user management application](https://github.com/spring-projects/spring-ldap/tree/main/samples/user-admin). + +### 2.6. Packaging Overview + +At a minimum, to use Spring LDAP you need the following: + +* `spring-ldap-core`: The Spring LDAP library + +* `spring-core`: Miscellaneous utility classes used internally by the framework + +* `spring-beans`: Interfaces and classes for manipulating Java beans + +* `spring-data-commons`: Base infrastructure for repository suppport and so on + +* `slf4j`: A simple logging facade, used internally + +In addition to the required dependencies, the following optional dependencies are required for certain functionality: + +* `spring-context`: Needed if your application is wired up by using the Spring Application Context. `spring-context` adds the ability for application objects to obtain resources by using a consistent API. It is definitely needed if you plan to use the `BaseLdapPathBeanPostProcessor`. + +* `spring-tx`: Needed if you plan to use the client-side compensating transaction support. + +* `spring-jdbc`: Needed if you plan to use the client-side compensating transaction support. + +* `commons-pool`: Needed if you plan to use the pooling functionality. + +* `spring-batch`: Needed if you plan to use the LDIF parsing functionality together with Spring Batch. + +### 2.7. Getting Started + +The [samples](https://github.com/spring-projects/spring-ldap/tree/main/samples) provide some useful examples of how to use Spring LDAP for common use cases. + +### 2.8. Support + +If you have questions, ask them on [Stack Overflow with the `spring-ldap` tag](https://stackoverflow.com/questions/tagged/spring-ldap). +The project web page is [https://spring.io/spring-ldap/](https://spring.io/spring-ldap/). + +### 2.9. Acknowledgements + +The initial effort when starting the Spring LDAP project was sponsored by [Jayway](https://www.jayway.com). +Current maintenance of the project is funded by [Pivotal](https://pivotal.io), which has since been acquired by [VMware](https://vmware.com). + +Thanks to [Structure101](https://structure101.com/) for providing an open source license that has come in handy for keeping the project structure in check. + +## 3. Basic Usage + +This section describes the basics of using Spring LDAP. It contains the following content: + +* [Search and Lookup Using `AttributesMapper`](#spring-ldap-basic-usage-search-lookup-attributesmapper) + +* [Building LDAP Queries](#basic-queries) + +* [Dynamically Building Distinguished Names](#ldap-names) + +* [Examples](#spring-ldap-basic-usage-examples) + +* [Binding and Unbinding](#spring-ldap-basic-usage-binding-unbinding) + +* [Updating](#spring-ldap-basic-usage-updating) + +### 3.1. Search and Lookup Using `AttributesMapper` + +The following example uses an [`AttributesMapper`](https://docs.spring.io/spring-ldap/docs/current/apidocs/org/springframework/ldap/core/AttributesMapper.html) to build a List of all the common names of all the person objects. + +Example 1. `AttributesMapper` that returns a single attribute + +``` +package com.example.repo; +import static org.springframework.ldap.query.LdapQueryBuilder.query; + +public class PersonRepoImpl implements PersonRepo { + private LdapTemplate ldapTemplate; + + public void setLdapTemplate(LdapTemplate ldapTemplate) { + this.ldapTemplate = ldapTemplate; + } + + public List getAllPersonNames() { + return ldapTemplate.search( + query().where("objectclass").is("person"), + new AttributesMapper() { + public String mapFromAttributes(Attributes attrs) + throws NamingException { + return (String) attrs.get("cn").get(); + } + }); + } +} +``` + +The inline implementation of `AttributesMapper` gets the desired attribute value from the `Attributes` object and returns it. Internally, `LdapTemplate` iterates over all entries found, calls the given `AttributesMapper` for each entry, and collects the results in a list. The list is then returned by the `search` method. + +Note that the `AttributesMapper` implementation could easily be modified to return a full `Person` object, as follows: + +Example 2. AttributesMapper that returns a Person object + +``` +package com.example.repo; +import static org.springframework.ldap.query.LdapQueryBuilder.query; + +public class PersonRepoImpl implements PersonRepo { + private LdapTemplate ldapTemplate; + ... + private class PersonAttributesMapper implements AttributesMapper { + public Person mapFromAttributes(Attributes attrs) throws NamingException { + Person person = new Person(); + person.setFullName((String)attrs.get("cn").get()); + person.setLastName((String)attrs.get("sn").get()); + person.setDescription((String)attrs.get("description").get()); + return person; + } + } + + public List getAllPersons() { + return ldapTemplate.search(query() + .where("objectclass").is("person"), new PersonAttributesMapper()); + } +} +``` + +Entries in LDAP are uniquely identified by their distinguished name (DN). +If you have the DN of an entry, you can retrieve the entry directly without searching for it. +This is called a “lookup” in Java LDAP. The following example shows a lookup for a `Person` object: + +Example 3. A lookup resulting in a Person object + +``` +package com.example.repo; + +public class PersonRepoImpl implements PersonRepo { + private LdapTemplate ldapTemplate; + ... + public Person findPerson(String dn) { + return ldapTemplate.lookup(dn, new PersonAttributesMapper()); + } +} +``` + +The preceding example looks up the specified DN and passes the found attributes to the supplied `AttributesMapper` — in this case, resulting in a `Person` object. + +### 3.2. Building LDAP Queries + +LDAP searches involve a number of parameters, including the following: + +* Base LDAP path: Where in the LDAP tree should the search start. + +* Search scope: How deep in the LDAP tree should the search go. + +* Attributes to return. + +* Search filter: The criteria to use when selecting elements within scope. + +Spring LDAP provides an [`LdapQueryBuilder`](https://docs.spring.io/spring-ldap/docs/current/apidocs/org/springframework/ldap/query/LdapQueryBuilder.html) with a fluent API for building LDAP Queries. + +Suppose you want to perform a search starting at the base DN `dc=261consulting,dc=com`, +limiting the returned attributes to `cn` and `sn`, with a filter of `(&(objectclass=person)(sn=?))`, where we want the `?` to be replaced with the value of the `lastName` parameter. +The following example shows how to do it by using the `LdapQueryBuilder`: + +Example 4. Building a search filter dynamically + +``` +package com.example.repo; +import static org.springframework.ldap.query.LdapQueryBuilder.query; + +public class PersonRepoImpl implements PersonRepo { + private LdapTemplate ldapTemplate; + ... + public List getPersonNamesByLastName(String lastName) { + + LdapQuery query = query() + .base("dc=261consulting,dc=com") + .attributes("cn", "sn") + .where("objectclass").is("person") + .and("sn").is(lastName); + + return ldapTemplate.search(query, + new AttributesMapper() { + public String mapFromAttributes(Attributes attrs) + throws NamingException { + + return (String) attrs.get("cn").get(); + } + }); + } +} +``` + +| |In addition to simplifying building of complex search parameters, the `LdapQueryBuilder` and its associated classes also provide proper escaping of any unsafe characters in search filters. This prevents “LDAP injection”, where a user might use such characters to inject unwanted operations into your LDAP operations.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |`LdapTemplate` includes many overloaded methods for performing LDAP searches. This is in order to accommodate as many different use cases and programming style preferences as possible. For the vast majority of use cases, the methods that take an `LdapQuery` as input are the recommended methods to use.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |The `AttributesMapper` is only one of the available callback interfaces you can use when handling search and lookup data. See [Simplifying Attribute Access and Manipulation with `DirContextAdapter`](#dirobjectfactory) for alternatives.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +For more information on the `LdapQueryBuilder`, see [[query-builder-advanced]](#query-builder-advanced). + +### 3.3. Dynamically Building Distinguished Names + +The standard Java implementation of Distinguished Name ([`LdapName`](https://docs.oracle.com/javase/6/docs/api/javax/naming/ldap/LdapName.html)) +performs well when it comes to parsing Distinguished Names. However, in practical use, this implementation has a number of shortcomings: + +* The `LdapName` implementation is mutable, which is badly suited for an object that represents identity. + +* Despite its mutable nature, the API for dynamically building or modifying Distinguished Names by using `LdapName` is cumbersome. + Extracting values of indexed or (particularly) named components is also a little bit awkward. + +* Many of the operations on `LdapName` throw checked exceptions, requiring `try-catch` statements for situations where the error is typically fatal and cannot be repaired in a meaningful manner. + +To simplify working with Distinguished Names, Spring LDAP provides an [`LdapNameBuilder`](https://docs.spring.io/spring-ldap/docs/current/apidocs/org/springframework/ldap/support/LdapNameBuilder.html), +as well as a number of utility methods in [`LdapUtils`](https://docs.spring.io/spring-ldap/docs/current/apidocs/org/springframework/ldap/support/LdapUtils.html) that help when working with `LdapName`. + +#### 3.3.1. Examples + +This section presents a few examples of the subjects covered in the preceding sections. +The first example dynamically builds an `LdapName` by using `LdapNameBuilder`: + +Example 5. Dynamically building an `LdapName` by using `LdapNameBuilder` + +``` +package com.example.repo; +import org.springframework.ldap.support.LdapNameBuilder; +import javax.naming.Name; + +public class PersonRepoImpl implements PersonRepo { + public static final String BASE_DN = "dc=example,dc=com"; + + protected Name buildDn(Person p) { + return LdapNameBuilder.newInstance(BASE_DN) + .add("c", p.getCountry()) + .add("ou", p.getCompany()) + .add("cn", p.getFullname()) + .build(); + } + ... +} +``` + +Assume that a `Person` has the following attributes: + +|Attribute Name|Attribute Value| +|--------------|---------------| +| `country` | Sweden | +| `company` | Some Company | +| `fullname` | Some Person | + +The preceding code would then result in the following distinguished name: + +``` +cn=Some Person, ou=Some Company, c=Sweden, dc=example, dc=com +``` + +The following example extracts values from a distinguished name by using `LdapUtils` + +Example 6. Extracting values from a distinguished name by using `LdapUtils` + +``` +package com.example.repo; +import org.springframework.ldap.support.LdapNameBuilder; +import javax.naming.Name; +public class PersonRepoImpl implements PersonRepo { +... + protected Person buildPerson(Name dn, Attributes attrs) { + Person person = new Person(); + person.setCountry(LdapUtils.getStringValue(dn, "c")); + person.setCompany(LdapUtils.getStringValue(dn, "ou")); + person.setFullname(LdapUtils.getStringValue(dn, "cn")); + // Populate rest of person object using attributes. + + return person; + } +} +``` + +Since Java versions prior to and including 1.4 did not provide any public Distinguished Name implementation at all, Spring LDAP 1.x provided its own implementation, `DistinguishedName`. +This implementation suffered from a couple of shortcomings of its own and has been deprecated in version 2.0. You should now use `LdapName` along with the utilities described earlier. + +### 3.4. Binding and Unbinding + +This section describes how to add and remove data. Updating is covered in the [next section](#spring-ldap-basic-usage-updating). + +#### 3.4.1. Adding Data + +Inserting data in Java LDAP is called binding. This is somewhat confusing, because in LDAP terminology, “bind” means something completely different. +A JNDI bind performs an LDAP Add operation, associating a new entry that has a specified distinguished name with a set of attributes. +The following example adds data by using `LdapTemplate`: + +Example 7. Adding data using Attributes + +``` +package com.example.repo; + +public class PersonRepoImpl implements PersonRepo { + private LdapTemplate ldapTemplate; + ... + public void create(Person p) { + Name dn = buildDn(p); + ldapTemplate.bind(dn, null, buildAttributes(p)); + } + + private Attributes buildAttributes(Person p) { + Attributes attrs = new BasicAttributes(); + BasicAttribute ocattr = new BasicAttribute("objectclass"); + ocattr.add("top"); + ocattr.add("person"); + attrs.put(ocattr); + attrs.put("cn", "Some Person"); + attrs.put("sn", "Person"); + return attrs; + } +} +``` + +Manual attributes building is — while dull and verbose — sufficient for many purposes. You can, however, simplify the binding operation further, as described in [Simplifying Attribute Access and Manipulation with `DirContextAdapter`](#dirobjectfactory). + +#### 3.4.2. Removing Data + +Removing data in Java LDAP is called unbinding. +A JNDI unbind performs an LDAP Delete operation, removing the entry associated with the specified distinguished name from the LDAP tree. +The following example removes data by using `LdapTemplate`: + +Example 8. Removing data + +``` +package com.example.repo; + +public class PersonRepoImpl implements PersonRepo { + private LdapTemplate ldapTemplate; + ... + public void delete(Person p) { + Name dn = buildDn(p); + ldapTemplate.unbind(dn); + } +} +``` + +### 3.5. Updating + +In Java LDAP, data can be modified in two ways: either by using `rebind` or by using `modifyAttributes`. + +#### 3.5.1. Updating by Using Rebind + +A `rebind` is a crude way to modify data. It is basically an `unbind` followed by a `bind`. +The following example uses `rebind`: + +Example 9. Modifying using rebind + +``` +package com.example.repo; + +public class PersonRepoImpl implements PersonRepo { + private LdapTemplate ldapTemplate; + ... + public void update(Person p) { + Name dn = buildDn(p); + ldapTemplate.rebind(dn, null, buildAttributes(p)); + } +} +``` + +#### 3.5.2. Updating by Using `modifyAttributes` + +A more sophisticated way of modifying data is to use `modifyAttributes`. This operation takes an array of explicit attribute modifications +and performs them on a specific entry, as follows: + +Example 10. Modifying using modifyAttributes + +``` +package com.example.repo; + +public class PersonRepoImpl implements PersonRepo { + private LdapTemplate ldapTemplate; + ... + public void updateDescription(Person p) { + Name dn = buildDn(p); + Attribute attr = new BasicAttribute("description", p.getDescription()) + ModificationItem item = new ModificationItem(DirContext.REPLACE_ATTRIBUTE, attr); + ldapTemplate.modifyAttributes(dn, new ModificationItem[] {item}); + } +} +``` + +Building `Attributes` and `ModificationItem` arrays is a lot of work. However, as we describe in [Simplifying Attribute Access and Manipulation with `DirContextAdapter`](#dirobjectfactory), +Spring LDAP provides more help for simplifying these operations. + +## 4. Simplifying Attribute Access and Manipulation with `DirContextAdapter` + +A little-known — and probably underestimated — feature of the Java LDAP API is the ability to register a `DirObjectFactory` to automatically create objects from found LDAP entries. +Spring LDAP makes use of this feature to return [`DirContextAdapter`](https://docs.spring.io/spring-ldap/docs/current/apidocs/org/springframework/ldap/core/DirContextAdapter.html) instances in certain search and lookup operations. + +`DirContextAdapter` is a useful tool for working with LDAP attributes, particularly when adding or modifying data. + +### 4.1. Search and Lookup Using `ContextMapper` + +Whenever an entry is found in the LDAP tree, its attributes and Distinguished Name (DN) are used by Spring LDAP to construct a `DirContextAdapter`. +This lets us use a [`ContextMapper`](https://docs.spring.io/spring-ldap/docs/current/apidocs/org/springframework/ldap/core/ContextMapper.html) instead of an `AttributesMapper`to transform found values, as follows: + +Example 11. Searching using a ContextMapper + +``` +package com.example.repo; + +public class PersonRepoImpl implements PersonRepo { + ... + private static class PersonContextMapper implements ContextMapper { + public Object mapFromContext(Object ctx) { + DirContextAdapter context = (DirContextAdapter)ctx; + Person p = new Person(); + p.setFullName(context.getStringAttribute("cn")); + p.setLastName(context.getStringAttribute("sn")); + p.setDescription(context.getStringAttribute("description")); + return p; + } + } + + public Person findByPrimaryKey( + String name, String company, String country) { + Name dn = buildDn(name, company, country); + return ldapTemplate.lookup(dn, new PersonContextMapper()); + } +} +``` + +As shown in the preceding example, we can retrieve the attribute values directly by name without having to go through the `Attributes` and `Attribute` classes. +This is particularly useful when working with multi-value attributes. +Extracting values from multi-value attributes normally requires looping through a `NamingEnumeration` of attribute values returned from the `Attributes` implementation.`DirContextAdapter` does this for you +in the [`getStringAttributes()`](https://docs.spring.io/spring-ldap/docs/current/apidocs/org/springframework/ldap/core/DirContextAdapter.html#getStringAttributes(java.lang.String))or [`getObjectAttributes()`](https://docs.spring.io/spring-ldap/docs/current/apidocs/org/springframework/ldap/core/DirContextAdapter.html#getObjectAttributes(java.lang.String)) methods. +The following example uses the `getStringAttributes` method: + +Example 12. Getting multi-value attribute values using `getStringAttributes()` + +``` +private static class PersonContextMapper implements ContextMapper { + public Object mapFromContext(Object ctx) { + DirContextAdapter context = (DirContextAdapter)ctx; + Person p = new Person(); + p.setFullName(context.getStringAttribute("cn")); + p.setLastName(context.getStringAttribute("sn")); + p.setDescription(context.getStringAttribute("description")); + // The roleNames property of Person is an String array + p.setRoleNames(context.getStringAttributes("roleNames")); + return p; + } +} +``` + +#### 4.1.1. Using `AbstractContextMapper` + +Spring LDAP provides an abstract base implementation of `ContextMapper`, called [`AbstractContextMapper`](https://docs.spring.io/spring-ldap/docs/current/apidocs/org/springframework/ldap/core/support/AbstractContextMapper.html). +This implementation automatically takes care of the casting of the supplied `Object` parameter to `DirContexOperations`. +Using `AbstractContextMapper`, the `PersonContextMapper` shown earlier can thus be re-written as follows: + +Example 13. Using an `AbstractContextMapper` + +``` +private static class PersonContextMapper extends AbstractContextMapper { + public Object doMapFromContext(DirContextOperations ctx) { + Person p = new Person(); + p.setFullName(ctx.getStringAttribute("cn")); + p.setLastName(ctx.getStringAttribute("sn")); + p.setDescription(ctx.getStringAttribute("description")); + return p; + } +} +``` + +### 4.2. Adding and Updating Data by Using `DirContextAdapter` + +` +While useful when extracting attribute values, `DirContextAdapter` is even more powerful for managing the details +involved in adding and updating data. + +#### 4.2.1. Adding Data by Using `DirContextAdapter` + +The following example uses `DirContextAdapter` to implement an improved implementation of the `create` repository method presented in [Adding Data](#basic-binding-data): + +Example 14. Binding using `DirContextAdapter` + +``` +package com.example.repo; + +public class PersonRepoImpl implements PersonRepo { + ... + public void create(Person p) { + Name dn = buildDn(p); + DirContextAdapter context = new DirContextAdapter(dn); + + context.setAttributeValues("objectclass", new String[] {"top", "person"}); + context.setAttributeValue("cn", p.getFullname()); + context.setAttributeValue("sn", p.getLastname()); + context.setAttributeValue("description", p.getDescription()); + + ldapTemplate.bind(context); + } +} +``` + +Note that we use the `DirContextAdapter` instance as the second parameter to bind, which should be a `Context`. +The third parameter is `null`, since we do not specify the attributes explicitly. + +Also note the use of the `setAttributeValues()` method when setting the `objectclass` attribute values. +The `objectclass` attribute is multi-value. Similar to the troubles of extracting muti-value attribute data, +building multi-value attributes is tedious and verbose work. By using the `setAttributeValues()` method, you can have `DirContextAdapter` handle that work for you. + +#### 4.2.2. Updating Data by Using `DirContextAdapter` + +We previously saw that updating by using `modifyAttributes` is the recommended approach, but that doing so requires us to perform +the task of calculating attribute modifications and constructing `ModificationItem` arrays accordingly.`DirContextAdapter` can do all of this for us, as follows: + +Updating using `DirContextAdapter` + +``` +package com.example.repo; + +public class PersonRepoImpl implements PersonRepo { + ... + public void update(Person p) { + Name dn = buildDn(p); + DirContextOperations context = ldapTemplate.lookupContext(dn); + + context.setAttributeValue("cn", p.getFullname()); + context.setAttributeValue("sn", p.getLastname()); + context.setAttributeValue("description", p.getDescription()); + + ldapTemplate.modifyAttributes(context); + } +} +``` + +When no mapper is passed to a `ldapTemplate.lookup()`, the result is a `DirContextAdapter` instance. +While the `lookup` method returns an `Object`, the `lookupContext` convenience method method automatically casts the return value to a `DirContextOperations`(the interface that `DirContextAdapter` implements). + +Notice that we have duplicate code in the `create` and `update` methods. This code maps from a domain object to a context. It can be extracted to a separate method, as follows: + +``` +package com.example.repo; + +public class PersonRepoImpl implements PersonRepo { + private LdapTemplate ldapTemplate; + + ... + public void create(Person p) { + Name dn = buildDn(p); + DirContextAdapter context = new DirContextAdapter(dn); + + context.setAttributeValues("objectclass", new String[] {"top", "person"}); + mapToContext(p, context); + ldapTemplate.bind(context); + } + + public void update(Person p) { + Name dn = buildDn(p); + DirContextOperations context = ldapTemplate.lookupContext(dn); + mapToContext(person, context); + ldapTemplate.modifyAttributes(context); + } + + protected void mapToContext (Person p, DirContextOperations context) { + context.setAttributeValue("cn", p.getFullName()); + context.setAttributeValue("sn", p.getLastName()); + context.setAttributeValue("description", p.getDescription()); + } +} +``` + +\=== `DirContextAdapter` and Distinguished Names as Attribute Values + +When managing security groups in LDAP, it is common to have attribute values that represent +distinguished names. Since distinguished name equality differs from String equality (for example, whitespace and case differences +are ignored in distinguished name equality), calculating attribute modifications using string equality does not work as expected. + +For instance, if a `member` attribute has a value of `cn=John Doe,ou=People` and we call `ctx.addAttributeValue("member", "CN=John Doe, OU=People")`, +the attribute is now considered to have two values, even though the strings actually represent the same +distinguished name. + +As of Spring LDAP 2.0, supplying `javax.naming.Name` instances to the attribute modification methods makes `DirContextAdapter`use distinguished name equality when calculating attribute modifications. If we modify the earlier example to be`ctx.addAttributeValue("member", LdapUtils.newLdapName("CN=John Doe, OU=People"))`, it does **not** render a modification, as the following example shows: + +``` +public class GroupRepo implements BaseLdapNameAware { + private LdapTemplate ldapTemplate; + private LdapName baseLdapPath; + + public void setLdapTemplate(LdapTemplate ldapTemplate) { + this.ldapTemplate = ldapTemplate; + } + + public void setBaseLdapPath(LdapName baseLdapPath) { + this.setBaseLdapPath(baseLdapPath); + } + + public void addMemberToGroup(String groupName, Person p) { + Name groupDn = buildGroupDn(groupName); + Name userDn = buildPersonDn( + person.getFullname(), + person.getCompany(), + person.getCountry()); + + DirContextOperation ctx = ldapTemplate.lookupContext(groupDn); + ctx.addAttributeValue("member", userDn); + + ldapTemplate.update(ctx); + } + + public void removeMemberFromGroup(String groupName, Person p) { + Name groupDn = buildGroupDn(String groupName); + Name userDn = buildPersonDn( + person.getFullname(), + person.getCompany(), + person.getCountry()); + + DirContextOperation ctx = ldapTemplate.lookupContext(groupDn); + ctx.removeAttributeValue("member", userDn); + + ldapTemplate.update(ctx); + } + + private Name buildGroupDn(String groupName) { + return LdapNameBuilder.newInstance("ou=Groups") + .add("cn", groupName).build(); + } + + private Name buildPersonDn(String fullname, String company, String country) { + return LdapNameBuilder.newInstance(baseLdapPath) + .add("c", country) + .add("ou", company) + .add("cn", fullname) + .build(); + } +} +``` + +In the preceding example, we implement `BaseLdapNameAware` to get the base LDAP path as described in [[base-context-configuration]](#base-context-configuration). +This is necessary because distinguished names as member attribute values must always be absolute from the directory root. + +\=== A Complete `PersonRepository` Class + +To illustrate the usefulness of Spring LDAP and `DirContextAdapter`, the following example shows a complete `Person` Repository implementation for LDAP: + +``` +package com.example.repo; +import java.util.List; + +import javax.naming.Name; +import javax.naming.NamingException; +import javax.naming.directory.Attributes; +import javax.naming.ldap.LdapName; + +import org.springframework.ldap.core.AttributesMapper; +import org.springframework.ldap.core.ContextMapper; +import org.springframework.ldap.core.LdapTemplate; +import org.springframework.ldap.core.DirContextAdapter; +import org.springframework.ldap.filter.AndFilter; +import org.springframework.ldap.filter.EqualsFilter; +import org.springframework.ldap.filter.WhitespaceWildcardsFilter; + +import static org.springframework.ldap.query.LdapQueryBuilder.query; + +public class PersonRepoImpl implements PersonRepo { + private LdapTemplate ldapTemplate; + + public void setLdapTemplate(LdapTemplate ldapTemplate) { + this.ldapTemplate = ldapTemplate; + } + + public void create(Person person) { + DirContextAdapter context = new DirContextAdapter(buildDn(person)); + mapToContext(person, context); + ldapTemplate.bind(context); + } + + public void update(Person person) { + Name dn = buildDn(person); + DirContextOperations context = ldapTemplate.lookupContext(dn); + mapToContext(person, context); + ldapTemplate.modifyAttributes(context); + } + + public void delete(Person person) { + ldapTemplate.unbind(buildDn(person)); + } + + public Person findByPrimaryKey(String name, String company, String country) { + Name dn = buildDn(name, company, country); + return ldapTemplate.lookup(dn, getContextMapper()); + } + + public List findByName(String name) { + LdapQuery query = query() + .where("objectclass").is("person") + .and("cn").whitespaceWildcardsLike("name"); + + return ldapTemplate.search(query, getContextMapper()); + } + + public List findAll() { + EqualsFilter filter = new EqualsFilter("objectclass", "person"); + return ldapTemplate.search(LdapUtils.emptyPath(), filter.encode(), getContextMapper()); + } + + protected ContextMapper getContextMapper() { + return new PersonContextMapper(); + } + + protected Name buildDn(Person person) { + return buildDn(person.getFullname(), person.getCompany(), person.getCountry()); + } + + protected Name buildDn(String fullname, String company, String country) { + return LdapNameBuilder.newInstance() + .add("c", country) + .add("ou", company) + .add("cn", fullname) + .build(); + } + + protected void mapToContext(Person person, DirContextOperations context) { + context.setAttributeValues("objectclass", new String[] {"top", "person"}); + context.setAttributeValue("cn", person.getFullName()); + context.setAttributeValue("sn", person.getLastName()); + context.setAttributeValue("description", person.getDescription()); + } + + private static class PersonContextMapper extends AbstractContextMapper { + public Person doMapFromContext(DirContextOperations context) { + Person person = new Person(); + person.setFullName(context.getStringAttribute("cn")); + person.setLastName(context.getStringAttribute("sn")); + person.setDescription(context.getStringAttribute("description")); + return person; + } + } +} +``` + +| |In several cases, the Distinguished Name (DN) of an object is constructed by using properties of the object.
In the preceding example, the country, company and full name of the `Person` are used in the DN, which means that updating any of these properties actually requires moving the entry in the LDAP tree by using the `rename()` operation in addition to updating the `Attribute` values.
Since this is highly implementation-specific, this is something you need to keep track of yourself, either by disallowing the user to change these properties or performing the `rename()` operation in your `update()` method if needed.
Note that, by using [[odm]](#odm), the library can automatically handle this for you if you annotate your domain classes appropriately.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +\== Object-Directory Mapping (ODM) + +Object-relational mapping frameworks (such as Hibernate and JPA) offer developers the ability to use annotations to map relational database tables to Java objects. +The Spring LDAP project offers a similar ability with respect to LDAP directories through a number of methods in `LdapOperations`: + +* ` T findByDn(Name dn, Class clazz)` + +* ` T findOne(LdapQuery query, Class clazz)` + +* ` List find(LdapQuery query, Class clazz)` + +* ` List findAll(Class clazz)` + +* ` List findAll(Name base, SearchControls searchControls, Class clazz)` + +* ` List findAll(Name base, Filter filter, SearchControls searchControls, Class clazz)` + +* `void create(Object entry)` + +* `void update(Object entry)` + +* `void delete(Object entry)` + +\=== Annotations + +Entity classes managed with the object mapping methods are required to be annotated with annotations from the `org.springframework.ldap.odm.annotations` package. The available annotations are: + +* `@Entry`: Class level annotation indicating the `objectClass` definitions to which the entity maps. *(required)* + +* `@Id`: Indicates the entity DN. The field declaring this attribute must be a derivative of the `javax.naming.Name` class. (required) + +* `@Attribute`: Indicates the mapping of a directory attribute to the object class field. + +* `@DnAttribute`: Indicates the mapping of a DN attribute to the object class field. + +* `@Transient`: Indicates the field is not persistent and should be ignored by the `OdmManager`. + +The `@Entry` and `@Id` annotations are required to be declared on managed classes.`@Entry` is used to specify which object classes the entity maps to and (optionally) the directory root of the LDAP entries represented by the class. +All object classes for which fields are mapped are required to be declared. Note that, when creating new entries of the managed class, +only the declared object classes are used. + +In order for a directory entry to be considered a match to the managed entity, all object classes declared by the directory entry must be declared by the `@Entry` annotation. +For example, assume that you have entries in your LDAP tree that have the following object classes: `inetOrgPerson,organizationalPerson,person,top`. +If you are interested only in changing the attributes defined in the `person` object class, you can annotate your `@Entry` with `@Entry(objectClasses = { "person", "top" })`. +However, if you want to manage attributes defined in the `inetOrgPerson` objectclass, you need to use the following: `@Entry(objectClasses = { "inetOrgPerson", "organizationalPerson", "person", "top" })`. + +The `@Id` annotation is used to map the distinguished name of the entry to a field. The field must be an instance of `javax.naming.Name`. + +The `@Attribute` annotation is used to map object class fields to entity fields.`@Attribute` is required to declare the name of the object class property to which the field maps and may optionally declare the syntax OID of the LDAP attribute, to guarantee exact matching.`@Attribute` also provides the type declaration, which lets you indicate whether the attribute is regarded as binary-based or string-based by the LDAP JNDI provider. + +The `@DnAttribute` annotation is used to map object class fields to and from components in the distinguished name of an entry. +Fields annotated with `@DnAttribute` are automatically populated with the appropriate value from the distinguished name when an entry is read from the directory tree. +Only fields of type `String` can be annotated with `@DnAttribute`. Other types are not supported. +If the `index` attribute of all `@DnAttribute` annotations in a class is specified, the DN can also be automatically calculated when creating and updating entries. +For update scenarios, this also automatically takes care of moving entries in the tree if attributes that are part of the distinguished name have changed. + +The `@Transient` annotation indicates that the field should be ignored by the object directory mapping and not mapped to an underlying LDAP property. Note that if a `@DnAttribute` is not to be bound to an `Attribute`. That is, it is only part of the Distinguished Name and not represented by an object attribute. It must also be annotated with `@Transient`. + +\=== Execution + +When all components have been properly configured and annotated, the object mapping methods of `LdapTemplate` can be used as follows: + +``` +@Entry(objectClasses = { "person", "top" }, base="ou=someOu") +public class Person { + @Id + private Name dn; + + @Attribute(name="cn") + @DnAttribute(value="cn", index=1) + private String fullName; + + // No @Attribute annotation means this will be bound to the LDAP attribute + // with the same value + private String description; + + @DnAttribute(value="ou", index=0) + @Transient + private String company; + + @Transient + private String someUnmappedField; + // ...more attributes below +} + +public class OdmPersonRepo { + @Autowired + private LdapTemplate ldapTemplate; + + public Person create(Person person) { + ldapTemplate.create(person); + return person; + } + + public Person findByUid(String uid) { + return ldapTemplate.findOne(query().where("uid").is(uid), Person.class); + } + + public void update(Person person) { + ldapTemplate.update(person); + } + + public void delete(Person person) { + ldapTemplate.delete(person); + } + + public List findAll() { + return ldapTemplate.findAll(Person.class); + } + + public List findByLastName(String lastName) { + return ldapTemplate.find(query().where("sn").is(lastName), Person.class); + } +} +``` + +\=== ODM and Distinguished Names as Attribute Values + +Security groups in LDAP commonly contain a multi-value attribute, where each of the values is the distinguished name +of a user in the system. The difficulties involved when handling these kinds of attributes are discussed in [[dns-as-attribute-values]](#dns-as-attribute-values). + +ODM also has support for `javax.naming.Name` attribute values, making group modifications easy, as the following example shows: + +``` +@Entry(objectClasses = {"top", "groupOfUniqueNames"}, base = "cn=groups") +public class Group { + + @Id + private Name dn; + + @Attribute(name="cn") + @DnAttribute("cn") + private String name; + + @Attribute(name="uniqueMember") + private Set members; + + public Name getDn() { + return dn; + } + + public void setDn(Name dn) { + this.dn = dn; + } + + public Set getMembers() { + return members; + } + + public void setMembers(Set members) { + this.members = members; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public void addMember(Name member) { + members.add(member); + } + + public void removeMember(Name member) { + members.remove(member); + } +} +``` + +When you modify group members by using `setMembers`, `addMember`, and `removeMember` and then calling `ldapTemplate.update()`, +attribute modifications are calculated by using distinguished name equality, meaning that the text formatting of +distinguished names is disregarded when figuring out whether they are equal. + +\== Advanced LDAP Queries + +This section covers various how to use LDAP queries with Spring LDAP. + +\=== LDAP Query Builder Parameters + +The `LdapQueryBuilder` and its associated classes are intended to support all of the parameters that can be supplied to an LDAP search. +The following parameters are supported: + +* `base`: Specifies the root DN in the LDAP tree where the search should start. + +* `searchScope`: Specifies how deep into the LDAP tree the search should traverse. + +* `attributes`: Specifies the attributes to return from the search. The default is all. + +* `countLimit`: Specifies the maximum number of entries to return from the search. + +* `timeLimit`: Specifies the maximum time that the search may take. + +* Search filter: The conditions that the entries we are looking for must meet. + +An `LdapQueryBuilder` is created with a call to the `query` method of `LdapQueryBuilder`. It is intended as a fluent builder API, where the base parameters are defined first, followed by the filter specification calls. Once filter conditions have been started to be defined with a call to the `where` method of `LdapQueryBuilder`, later attempts to call (for example) `base` are rejected. The base search parameters are optional, but at least one filter specification call is required. +The following query searches for all entries with an object class of `Person`: + +``` +import static org.springframework.ldap.query.LdapQueryBuilder.query; +... + +List persons = ldapTemplate.search( + query().where("objectclass").is("person"), + new PersonAttributesMapper()); +``` + +The following query searches for all entries with an object class of `person` and a `cn` (common name) of `John Doe`: + +``` +import static org.springframework.ldap.query.LdapQueryBuilder.query; +... + +List persons = ldapTemplate.search( + query().where("objectclass").is("person") + .and("cn").is("John Doe"), + new PersonAttributesMapper()); +``` + +The following query searches for all entries with an object class of `person` and starting at a `dc` (domain component) of `dc=261consulting,dc=com`: + +``` +import static org.springframework.ldap.query.LdapQueryBuilder.query; +... + +List persons = ldapTemplate.search( + query().base("dc=261consulting,dc=com") + .where("objectclass").is("person"), + new PersonAttributesMapper()); +``` + +The following query returns the `cn` (common name) attribute for all entries with an object class of `person` and starting at a `dc` (domain component) of `dc=261consulting,dc=com`: + +``` +import static org.springframework.ldap.query.LdapQueryBuilder.query; +... + +List persons = ldapTemplate.search( + query().base("dc=261consulting,dc=com") + .attributes("cn") + .where("objectclass").is("person"), + new PersonAttributesMapper()); +``` + +The following query uses `or` to search for multiple spellings of a common name (`cn`): + +``` +import static org.springframework.ldap.query.LdapQueryBuilder.query; +... +List persons = ldapTemplate.search( + query().where("objectclass").is("person"), + .and(query().where("cn").is("Doe").or("cn").is("Doo")); + new PersonAttributesMapper()); +``` + +\=== Filter Criteria + +The earlier examples demonstrate simple equals conditions in LDAP filters. The LDAP query builder has support for the following criteria types: + +* `is`: Specifies an equals (=) condition. + +* `gte`: Specifies a greater-than-or-equals (\>=) condition. + +* `lte`: Specifies a less-than-or-equals (⇐) condition. + +* `like`: Specifies a “like” condition where wildcards can be included in the query — for example, `where("cn").like("J*hn Doe")` results in the following filter: `(cn=J*hn Doe)`. + +* `whitespaceWildcardsLike`: Specifies a condition where all whitespace is replaced with wildcards — for example, `where("cn").whitespaceWildcardsLike("John Doe")` results in the following filter: `(cn=**John*Doe**)`. + +* `isPresent`: Specifies a condition that checks for the presence of an attribute — for example, `where("cn").isPresent()` results in the following filter: `(cn=*)`. + +* `not`: Specifies that the current condition should be negated — for example, `where("sn").not().is("Doe)` results in the following filter: `(!(sn=Doe))` + +\=== Hardcoded Filters + +There may be occasions when you want to specify a hardcoded filter as input to an `LdapQuery`. `LdapQueryBuilder` has two methods for this purpose: + +* `filter(String hardcodedFilter)`: Uses the specified string as a filter. Note that the specified input string is not touched in any way, meaning that this method is not particularly well suited if you are building filters from user input. + +* `filter(String filterFormat, String…​ params)`: Uses the specified string as input to `MessageFormat`, properly encoding the parameters and inserting them at the specified places in the filter string. + +* `filter(Filter filter)`: Uses the specified filter. + +You cannot mix the hardcoded filter methods with the `where` approach described earlier. It is either one or the other. If you specify a filter by using `filter()`, you get an exception if you try to call `where` afterwards. + +\== Configuration + +The recommended way of configuring Spring LDAP is to use the custom XML configuration namespace. To make this available, you need to include the Spring LDAP namespace declaration in your bean file, as follows: + +``` + +``` + +\=== `ContextSource` Configuration + +`ContextSource` is defined by using an `` tag. +The simplest possible `context-source` declaration requires you to specify a server URL, a username, and a password, as follows: + +``` + +``` + +The preceding example creates an `LdapContextSource` with default values (see the table after this paragraph) and the URL and authentication credentials as specified. +The configurable attributes on context-source are as follows (required attributes marked with \*): + +| Attribute | Default | Description | +|-----------------------------|----------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `id` | `contextSource` | The ID of the created bean. | +| `username` | | The username (principal) to use when authenticating with the LDAP server.
This is usually the distinguished name of an admin user (for example, `cn=Administrator`) but may differ depending on server and authentication method.
Required if `authentication-source-ref` is not explicitly configured. | +| `password` | | The password (credentials) to use when authenticating with the LDAP server. Required if `authentication-source-ref` is not explicitly configured. | +| `url` \* | | The URL of the LDAP server to use. The URL should be in the following format: `ldap://myserver.example.com:389`.
For SSL access, use the `ldaps` protocol and the appropriate port — for example, `ldaps://myserver.example.com:636`.
If you want fail-over functionality, you can specify more than one URL, separated by commas (`,`). | +| `base` | `LdapUtils.emptyLdapName()` |The base DN. When this attribute has been configured, all Distinguished Names supplied to and received from LDAP operations are relative to the specified LDAP path.
This can significantly simplify working against the LDAP tree. However, there are several occasions when you need to have access to the base path.
For more information on this, see [[base-context-configuration]](#base-context-configuration)| +| `anonymous-read-only` | `false` | Defines whether read-only operations are performed by using an anonymous (unauthenticated) context.
Note that setting this parameter to `true` together with the compensating transaction support is not supported and is rejected. | +| `referral` | `null` | Defines the strategy with which to handle referrals, as described [here](https://docs.oracle.com/javase/jndi/tutorial/ldap/referral/jndi.html). The valid values are:

* `ignore`

* `follow`

* `throw` | +| `native-pooling` | `false` | Specify whether native Java LDAP connection pooling should be used. Consider using Spring LDAP connection pooling instead. See [[pooling]](#pooling) for more information. | +| `authentication-source-ref` | A `SimpleAuthenticationSource` instance. | ID of the `AuthenticationSource` instance to use (see [[spring-ldap-custom-principal-credentials-management]](#spring-ldap-custom-principal-credentials-management)). | +|`authentication-strategy-ref`|A `SimpleDirContextAuthenticationStrategy` instance.| ID of the `DirContextAuthenticationStrategy` instance to use (see [[spring-ldap-custom-dircontext-authentication-processing]](#spring-ldap-custom-dircontext-authentication-processing)). | +| `base-env-props-ref` | | A reference to a `Map` of custom environment properties that should supplied with the environment sent to the `DirContext` on construction. | + +\==== `DirContext` Authentication + +When `DirContext` instances are created to be used for performing operations on an LDAP server, these contexts often need to be authenticated. +Spring LDAP offers various options for configuring this. + +| |This section refers to authenticating contexts in the core functionality of the `ContextSource`, to construct `DirContext` instances for use by `LdapTemplate`. LDAP is commonly used for the sole purpose of user authentication, and the `ContextSource` may be used for that as well. That process is discussed in [[user-authentication]](#user-authentication).| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +By default, authenticated contexts are created for both read-only and read-write operations. You should specify the `username` and `password` of the LDAP user to be used for authentication on the `context-source` element. + +| |If `username` is the Distinguished Name (DN) of an LDAP user, it needs to be the full DN of the user from the root of the LDAP tree, regardless of whether a `base` LDAP path has been specified on the `context-source` element.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Some LDAP server setups allow anonymous read-only access. If you want to use anonymous contexts for read-only operations, set the `anonymous-read-only` attribute to `true`. + +\===== Custom `DirContext` Authentication Processing + +The default authentication mechanism used in Spring LDAP is `SIMPLE` authentication. This means that the principal (as specified by the `username` attribute) and the credentials (as specified by the `password`) are set in the `Hashtable` that is sent to the `DirContext` implementation constructor. + +There are many occasions when this processing is not sufficient. For instance, LDAP Servers are commonly set up to accept communication only on a secure TLS channel. There might be a need to use the particular LDAP Proxy Auth mechanism or other concerns. + +You can specify an alternative authentication mechanism by supplying a `DirContextAuthenticationStrategy` implementation reference to the `context-source` element. To do so, set the `authentication-strategy-ref` attribute. + +\====== TLS + +Spring LDAP provides two different configuration options for LDAP servers that require TLS secure channel communication: `DefaultTlsDirContextAuthenticationStrategy` and `ExternalTlsDirContextAuthenticationStrategy`. +Both implementations negotiate a TLS channel on the target connection, but they differ in the actual authentication mechanism. +Where `DefaultTlsDirContextAuthenticationStrategy` applies SIMPLE authentication on the secure channel (by using the specified `username` and `password`), the `ExternalTlsDirContextAuthenticationStrategy` uses EXTERNAL SASL authentication, applying a client certificate that is configured by using system properties for authentication. + +Since different LDAP server implementations respond differently to explicit shutdown of the TLS channel (some servers require the connection be shut down gracefully, while others do not support it), the TLS `DirContextAuthenticationStrategy` implementations support specifying the shutdown behavior by using the `shutdownTlsGracefully` parameter. If this property is set to `false` (the default), no explicit TLS shutdown happens. If it is `true`, Spring LDAP tries to shut down the TLS channel gracefully before closing the target context. + +| |When working with TLS connections, you need to make sure that the native LDAP Pooling functionality (as specified by using the `native-pooling` attribute) is turned off. This is particularly important if `shutdownTlsGracefully` is set to `false`. However, since the TLS channel negotiation process is quite expensive, you can gain great performance benefits by using the Spring LDAP Pooling Support, described in [[pooling]](#pooling).| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +\===== Custom Principal and Credentials Management + +While the user name (that is, the user DN) and password used for creating an authenticated `Context` are statically defined by default (the ones defined in the `context-source` element configuration are used throughout the lifetime of the `ContextSource`), there are several cases where this is not the desired behavior. A common scenario is that the principal and credentials of the current user should be used when performing LDAP operations for that user. You can modify the default behavior by supplying a reference to an `AuthenticationSource` implementation to the `context-source` element by using the `authentication-source-ref` element, instead of explicitly specifying the `username` and `password`. The `AuthenticationSource` is queried by the `ContextSource` for principal and credentials each time an authenticated `Context` is to be created. + +If you use [Spring Security](https://spring.io/spring-security), you can make sure the principal and credentials of the currently logged-in user are used at all times by configuring your `ContextSource` with an instance of the `SpringSecurityAuthenticationSource` shipped with Spring Security. The following example shows how to do so: + +``` + +... + + + +... + +``` + +| |We do not specify any `username` or `password` for. our `context-source` when using an `AuthenticationSource`. These properties are needed only when the default behavior is used.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |When using the `SpringSecurityAuthenticationSource`, you need to use Spring Security’s `LdapAuthenticationProvider` to authenticate the users against LDAP.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------| + +\==== Native Java LDAP Pooling + +The internal Java LDAP provider provides some very basic pooling capabilities. You can turn this LDAP connection pooling on or off by using the `pooled` flag on `AbstractContextSource`. The default value is `false` (since release 1.3) — that is, the native Java LDAP pooling is turned off. The configuration of LDAP connection pooling is managed by using `System` properties, so you need to handle this manually, outside of the Spring Context configuration. You can find details of the native pooling configuration [here](https://java.sun.com/products/jndi/tutorial/ldap/connect/config.html). + +| |There are several serious deficiencies in the built-in LDAP connection pooling, which is why Spring LDAP provides a more sophisticated approach to LDAP connection pooling, described in [[pooling]](#pooling). If you need pooling functionality, this is the recommended approach.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Regardless of the pooling configuration, the `ContextSource#getContext(String principal, String credentials)` method always explicitly does not use native Java LDAP Pooling, in order for reset passwords to take effect as soon as possible.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +\==== Advanced `ContextSource` Configuration + +This section covers more advanced ways to configure a `ContextSource`. + +\===== Custom `DirContext` Environment Properties + +In some cases, you might want to specify additional environment setup properties, in addition to the ones directly configurable on `context-source`. You should set such properties in a `Map` and reference them in the `base-env-props-ref` attribute. + +\=== `LdapTemplate` Configuration + +The `LdapTemplate` is defined by using a `` element. The simplest possible `ldap-template` declaration is the element by itself: + +``` + +``` + +The element by itself creates an `LdapTemplate` instance with the default ID, referencing the default `ContextSource`, which is expected to have an ID of `contextSource` (the default for the `context-source` element). + +The following table describes the configurable attributes on `ldap-template`: + +| Attribute | Default | Description | +|-----------------------|---------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `id` |`ldapTemplate` | The ID of the created bean. | +| `context-source-ref` |`contextSource`| The ID of the `ContextSource` instance to use. | +| `count-limit` | `0` | The default count limit for searches. 0 means no limit. | +| `time-limit` | `0` | The default time limit for searches, in milliseconds. 0 means no limit. | +| `search-scope` | `SUBTREE` | The default search scope for searches. The valid values are:

* `OBJECT`

* `ONELEVEL`

* `SUBTREE` | +|`ignore-name-not-found`| `false` | Specifies whether a `NameNotFoundException` should be ignored in searches. Setting this attribute to `true` make errors that are caused by an invalid search base be silently swallowed. | +|`ignore-partial-result`| `false` |Specifies whether `PartialResultException` should be ignored in searches. Some LDAP servers have problems with referrals. These should normally be followed automatically. However, if this does not work, it manifests itself with a `PartialResultException`. Setting this attribute to `true` presents a work-around to this problem.| +| `odm-ref` | | The ID of the `ObjectDirectoryMapper` instance to use. The default is a default-configured `DefaultObjectDirectoryMapper`. | + +\=== Obtaining a Reference to the Base LDAP Path + +As described earlier, you can supply a base LDAP path to the `ContextSource`, specifying the root in the LDAP tree to which all operations are relative. This means that you are working only with relative distinguished names throughout your system, which is typically rather handy. There are, however, some cases in which you may need to have access to the base path in order to be able to construct full DNs, relative to the actual root of the LDAP tree. One example would be when working with LDAP groups (for example, the `groupOfNames` object class). In that case, each group member attribute value needs to be the full DN of the referenced member. + +For that reason, Spring LDAP has a mechanism by which any Spring-controlled bean may be supplied with the base path on startup. +For beans to be notified of the base path, two things need to be in place. First, the bean that wants the base path reference needs to implement the `BaseLdapNameAware` interface. +Second, you need to define a `BaseLdapPathBeanPostProcessor` in the application context. +The following example shows how to implement `BaseLdapNameAware`: + +``` +package com.example.service; +public class PersonService implements PersonService, BaseLdapNameAware { + ... + private LdapName basePath; + + public void setBaseLdapPath(LdapName basePath) { + this.basePath = basePath; + } + ... + private LdapName getFullPersonDn(Person person) { + return LdapNameBuilder.newInstance(basePath) + .add(person.getDn()) + .build(); + } + ... +} +``` + +The following example shows how to define a `BaseLdapPathBeanPostProcessor`: + +``` + + ... + + ... + + +``` + +The default behavior of the `BaseLdapPathBeanPostProcessor` is to use the base path of the single defined `BaseLdapPathSource` (`AbstractContextSource`) in the `ApplicationContext`. If more than one `BaseLdapPathSource` is defined, you need to specify which one to use by setting the `baseLdapPathSourceName` property. + +\== Spring LDAP Repositories + +Spring LDAP has built-in support for Spring Data repositories. The basic functionality and configuration is described [here](https://docs.spring.io/spring-data/data-commons/docs/current/reference/html/#repositories). When working with Spring LDAP repositories, you should remember the following: + +* You can enable Spring LDAP repositories by using an `` element in your XML configuration or by using an `@EnableLdapRepositories` annotation on a configuration class. + +* To include support for `LdapQuery` parameters in automatically generated repositories, have your interface extend `LdapRepository` rather than `CrudRepository`. + +* All Spring LDAP repositories must work with entities that are annotated with the ODM annotations, as described in [[odm]](#odm). + +* Since all ODM managed classes must have a Distinguished Name as the ID, all Spring LDAP repositories must have the ID type parameter set to `javax.naming.Name`. + The built-in `LdapRepository` takes only one type parameter: the managed entity class, defaulting the ID to `javax.naming.Name`. + +* Due to specifics of the LDAP protocol, paging and sorting are not supported for Spring LDAP repositories. + +\=== QueryDSL support +Basic QueryDSL support is included in Spring LDAP. This support includes the following: + +* An annotation processor, called `LdapAnnotationProcessor`, for generating QueryDSL classes based on Spring LDAP ODM annotations. See [[odm]](#odm) for more information on the ODM annotations. + +* A Query implementation, called `QueryDslLdapQuery`, for building and running QueryDSL queries in code. + +* Spring Data repository support for QueryDSL predicates. `QueryDslPredicateExecutor` includes a number of additional methods with appropriate parameters. You can extend this interface along with `LdapRepository` to include this support in your repository. + +\== Pooling Support + +Pooling LDAP connections helps mitigate the overhead of creating a new LDAP connection for each LDAP interaction. While [Java LDAP pooling support](https://java.sun.com/products/jndi/tutorial/ldap/connect/pool.html) exists, it is limited in its configuration options and features, such as connection validation and pool maintenance. Spring LDAP provides support for detailed pool configuration on a per-`ContextSource` basis. + +Pooling support is provided by supplying a `` child element to the `` element in the application context configuration. Read-only and read-write `DirContext` objects are pooled separately (if `anonymous-read-only` is specified). [Jakarta Commons-Pool](https://commons.apache.org/pool/index.html) is used to provide the underlying pool implementation. + +\=== `DirContext` Validation + +Validation of pooled connections is the primary motivation for using a custom pooling library versus the JDK-provided LDAP pooling functionality. Validation allows pooled `DirContext` connections to be checked to ensure that they are still properly connected and configured when checking them out of the pool, checking them into the pool, or while they are idle in the pool. + +If connection validation is configured, pooled connections are validated by using `DefaultDirContextValidator`.`DefaultDirContextValidator` does a `DirContext.search(String, String, SearchControls)`, with an empty name, a filter of `"objectclass=*"`, and `SearchControls` set to limit a single result with the only the `objectclass` attribute and a 500ms timeout. If the returned `NamingEnumeration` has results, the `DirContext` passes validation. If no results are returned or an exception is thrown, the `DirContext` fails validation. +The default settings should work with no configuration changes on most LDAP servers and provide the fastest way to validate the `DirContext`. +If you need customization, you can do so by using the validation configuration attributes, described in [[pool-configuration]](#pool-configuration). + +| |Connections are automatically invalidated if they throw an exception that is considered non-transient. For example, if a `DirContext` instance throws a `javax.naming.CommunicationException`, it is interpreted as a non-transient error and the instance is automatically invalidated, without the overhead of an additional `testOnReturn` operation. The exceptions that are interpreted as non-transient are configured by using the `nonTransientExceptions` property of the `PoolingContextSource`.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +\=== Pool Configuration +The following attributes are available on the `` element for configuration of the DirContext pool: + +| Attribute | Default | Description | +|--------------------------------------|------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `max-active` | `8` | The maximum number of active connections of each type (read-only or read-write) that can be allocated from this pool at the same time. You can use a non-positive number for no limit. | +| `max-total` | `-1` | The overall maximum number of active connections (for all types) that can be allocated from this pool at the same time. You can use a non-positive number for no limit. | +| `max-idle` | `8` | The maximum number of active connections of each type (read-only or read-write) that can remain idle in the pool without extra connections being released. You can use a non-positive number for no limit. | +| `min-idle` | `0` | The minimum number of active connections of each type (read-only or read-write) that can remain idle in the pool without extra connections being created. You can use zero (the default) to create none. | +| `max-wait` | `-1` | The maximum number of milliseconds that the pool waits (when no connections are available) for a connection to be returned before throwing an exception. You can use a non-positive number to wait indefinitely. | +| `when-exhausted` | `BLOCK` |Specifies the behavior when the pool is exhausted.

* The `FAIL` option throws `NoSuchElementException` when the pool is exhausted.

* The `BLOCK` option waits until a new object is available. If `max-wait` is positive and no new object is available after the `max-wait` time expires, `NoSuchElementException` is thrown.

* The `GROW` option creates and returns a new object (essentially making `max-active` meaningless).| +| `test-on-borrow` | `false` | Whether objects are validated before being borrowed from the pool. If the object fails to validate, it is dropped from the pool, and an attempt to borrow another is made. | +| `test-on-return` | `false` | Whether objects are validated before being returned to the pool. | +| `test-while-idle` | `false` | Whether objects are validated by the idle object evictor (if any). If an object fails to validate, it is dropped from the pool. | +| `eviction-run-interval-millis` | `-1` | The number of milliseconds to sleep between runs of the idle object evictor thread. When non-positive, no idle object evictor thread is run. | +| `tests-per-eviction-run` | `3` | The number of objects to examine during each run of the idle object evictor thread (if any). | +| `min-evictable-time-millis` | `1000 * 60 * 30` (30 minutes) | The minimum amount of time an object may sit idle in the pool before it is eligible for eviction by the idle object evictor (if any). | +| `validation-query-base` | `LdapUtils.emptyName()` | The search base to be used when validating connections. Used only if `test-on-borrow`, `test-on-return`, or `test-while-idle` is specified. | +| `validation-query-filter` | `objectclass=*` | The search filter to be used when validating connections. Used only if `test-on-borrow`, `test-on-return`, or `test-while-idle` is specified. | +|`validation-query-search-controls-ref`|`null`; default search control settings are described above.| The ID of a `SearchControls` instance to be used when validating connections. Only used if `test-on-borrow`, `test-on-return`, or `test-while-idle` is specified. | +| `non-transient-exceptions` | `javax.naming.CommunicationException` | Comma-separated list of `Exception` classes. The listed exceptions are considered non-transient with regards to eager invalidation. Should any of the listed exceptions (or subclasses of them) be thrown by a call to a pooled `DirContext` instance, that object is automatically invalidated without any additional testOnReturn operation. | + +\=== Pool2 Configuration + +The following attributes are available on the `` element for configuring the `DirContext` pool: + +| Attribute | Default | Description | +|--------------------------------------|------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `max-total` | `-1` | The overall maximum number of active connections (for all types) that can be allocated from this pool at the same time. You can use a non-positive number for no limit. | +| `max-total-per-key` | `8` | The limit on the number of object instances allocated by the pool (checked out or idle), per key. When the limit is reached, the sub-pool is exhausted. A negative value indicates no limit. | +| `max-idle-per-key` | `8` | The maximum number of active connections of each type (read-only or read-write) that can remain idle in the pool, without extra connections being released. A negative value indicates no limit. | +| `min-idle-per-key` | `0` | The minimum number of active connections of each type (read-only or read-write) that can remain idle in the pool, without extra connections being created. You can use zero (the default) to create none. | +| `max-wait` | `-1` | The maximum number of milliseconds that the pool waits (when there are no available connections) for a connection to be returned before throwing an exception. You can use a non-positive number to wait indefinitely. | +| `block-when-exhausted` | `true` | Whether to wait until a new object is available. If max-wait is positive, a `NoSuchElementException` is thrown if no new object is available after the `maxWait` time expires. | +| `test-on-create` | `false` | Whether objects are validated before borrowing. If the object fails to validate, then borrowing fails. | +| `test-on-borrow` | `false` | The indicator for whether objects are validated before being borrowed from the pool. If the object fails to validate, it is dropped from the pool, and an attempt to borrow another is made. | +| `test-on-return` | `false` | The indicator for whether objects are validated before being returned to the pool. | +| `test-while-idle` | `false` | The indicator for whether objects are validated by the idle object evictor (if any). If an object fails to validate, it is dropped from the pool. | +| `eviction-run-interval-millis` | `-1` | The number of milliseconds to sleep between runs of the idle object evictor thread. When non-positive, no idle object evictor thread is run. | +| `tests-per-eviction-run` | `3` | The number of objects to examine during each run of the idle object evictor thread (if any). | +| `min-evictable-time-millis` | `1000 * 60 * 30` (30 minutes) | The minimum amount of time an object may sit idle in the pool before it is eligible for eviction by the idle object evictor (if any). | +| `soft-min-evictable-time-millis` | `-1` | The minimum amount of time an object may sit idle in the pool before it is eligible for eviction by the idle object evictor, with the extra condition that at least the minimum number of object instances per key remain in the pool. This setting is overridden by `min-evictable-time-millis` if it is set to a positive value. | +| `eviction-policy-class` | `org.apache.commons.pool2.impl.DefaultEvictionPolicy` | The eviction policy implementation that is used by this pool. The pool tries to load the class by using the thread context class loader. If that fails, the pool tries to load the class by using the class loader that loaded this class. | +| `fairness` | `false` | The pool serves threads that are waiting to borrow connections fairly. `true` means that waiting threads are served as if waiting in a FIFO queue. | +| `jmx-enable` | `true` | JMX is enabled with the platform MBean server for the pool. | +| `jmx-name-base` | `null` | The JMX name base that is used as part of the name assigned to JMX enabled pools. | +| `jmx-name-prefix` | `pool` | The JMX name prefix that is used as part of the name assigned to JMX enabled pools. | +| `lifo` | `true` | The indicator for whether the pool has LIFO (last in, first out) behavior with respect to idle objects or as a FIFO (first in, first out) queue. LIFO always returns the most recently used object from the pool, while FIFO always returns the oldest object in the idle object pool | +| `validation-query-base` | `LdapUtils.emptyPath()` | The base DN to use for validation searches. | +| `validation-query-filter` | `objectclass=*` | The filter to use for validation queries. | +|`validation-query-search-controls-ref`|`null`; default search control settings are described above.| The ID of a `SearchControls` instance to be used when validating connections. Used only if `test-on-borrow`, `test-on-return`, or `test-while-idle` is specified | +| `non-transient-exceptions` | `javax.naming.CommunicationException` |Comma-separated list of `Exception` classes. The listed exceptions are considered non-transient with regards to eager invalidation. Should any of the listed exceptions (or subclasses of them) be thrown by a call to a pooled `DirContext` instance, that object is automatically invalidated without any additional testOnReturn operation.| + +\=== Configuration + +Configuring pooling requires adding an `` element nested in the `` element, as follows: + +``` + + ... + + + + ... + +``` + +In a real-world situation, you would probably configure the pool options and enable connection validation. The preceding example demonstrates the general idea. + +\==== Validation Configuration + +The following example tests each `DirContext` before it is passed to the client application and tests `DirContext` objects that have been sitting idle in the pool: + +``` + + ... + + + + ... + +``` + +\=== Known Issues + +This section describes issues that sometimes arise when people use Spring LDAP. At present, it covers the following issues: + +* [[spring-ldap-known-issues-custom-authentication]](#spring-ldap-known-issues-custom-authentication) + +\==== Custom Authentication + +The `PoolingContextSource` assumes that all `DirContext` objects retrieved from `ContextSource.getReadOnlyContext()` have the same environment and, likewise, that all `DirContext` objects retrieved from `ContextSource.getReadWriteContext()` have the same environment. This means that wrapping an `LdapContextSource` configured with an `AuthenticationSource` in a `PoolingContextSource` does not function as expected. The pool would be populated by using the credentials of the first user, and, unless new connections were needed, subsequent context requests would not be filled for the user specified by the `AuthenticationSource` for the requesting thread. + +\== Adding Missing Overloaded API Methods + +This section covers how to add your own overloaded API methods to implement new functionality. + +\=== Implementing Custom Search Methods + +`LdapTemplate` contains several overloaded versions of the most common operations in `DirContext`. However, we have not provided an alternative for each and every method signature, mostly because there are so many of them. We have, however, provided a means to call whichever `DirContext` method you want and still get the benefits that `LdapTemplate` provides. + +Suppose you want to call the following `DirContext` method: + +``` +NamingEnumeration search(Name name, String filterExpr, Object[] filterArgs, SearchControls ctls) +``` + +There is no corresponding overloaded method in `LdapTemplate`. The way to solve this is to use a custom `SearchExecutor` implementation, as follows: + +``` +public interface SearchExecutor { + public NamingEnumeration executeSearch(DirContext ctx) throws NamingException; +} +``` + +In your custom executor, you have access to a `DirContext` object, which you can use to call the method you want. You can then provide a handler that is responsible for mapping attributes and collecting the results. You can, for example, use one of the available implementations of `CollectingNameClassPairCallbackHandler`, which collects the mapped results in an internal list. In order to actually perform the search, you need to call the `search` method in `LdapTemplate` that takes an executor and a handler as arguments. Finally, you need to return whatever your handler has collected. The following example shows how to do all of that: + +``` +package com.example.repo; + +public class PersonRepoImpl implements PersonRepo { + ... + public List search(final Name base, final String filter, final String[] params, + final SearchControls ctls) { + SearchExecutor executor = new SearchExecutor() { + public NamingEnumeration executeSearch(DirContext ctx) { + return ctx.search(base, filter, params, ctls); + } + }; + + CollectingNameClassPairCallbackHandler handler = + new AttributesMapperCallbackHandler(new PersonAttributesMapper()); + + ldapTemplate.search(executor, handler); + return handler.getList(); + } +} +``` + +If you prefer the `ContextMapper` to the `AttributesMapper`, the following example shows what it would look like: + +``` +package com.example.repo; + +public class PersonRepoImpl implements PersonRepo { + ... + public List search(final Name base, final String filter, final String[] params, + final SearchControls ctls) { + SearchExecutor executor = new SearchExecutor() { + public NamingEnumeration executeSearch(DirContext ctx) { + return ctx.search(base, filter, params, ctls); + } + }; + + CollectingNameClassPairCallbackHandler handler = + new ContextMapperCallbackHandler(new PersonContextMapper()); + + ldapTemplate.search(executor, handler); + return handler.getList(); + } +} +``` + +| |When you use the `ContextMapperCallbackHandler`, you must make sure that you have called `setReturningObjFlag(true)` on your `SearchControls` instance.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------| + +\=== Implementing Other Custom Context Methods + +In the same manner as for custom `search` methods, you can actually call any method in `DirContext` by using a `ContextExecutor`, as follows: + +``` +public interface ContextExecutor { + public Object executeWithContext(DirContext ctx) throws NamingException; +} +``` + +When implementing a custom `ContextExecutor`, you can choose between using the `executeReadOnly()` or the `executeReadWrite()` method. Suppose you want to call the following method: + +``` +Object lookupLink(Name name) +``` + +The method is available in `DirContext`, but there is no matching method in `LdapTemplate`. It is a lookup method, so it should be read-only. We can implement it as follows: + +``` +package com.example.repo; + +public class PersonRepoImpl implements PersonRepo { + ... + public Object lookupLink(final Name name) { + ContextExecutor executor = new ContextExecutor() { + public Object executeWithContext(DirContext ctx) { + return ctx.lookupLink(name); + } + }; + + return ldapTemplate.executeReadOnly(executor); + } +} +``` + +In the same manner, you can perform a read-write operation by using the `executeReadWrite()` method. + +\== Processing the `DirContext` + +This section covers how to process the `DirContext`, including pre- and post-processing. + +\=== Custom `DirContext` Pre- and Post-processing + +In some situations, you might like to perform operations on the `DirContext` before and after the search operation. The interface that is used for this is called `DirContextProcessor`. The following listing shows the `DirContextProcessor` interface: + +``` +public interface DirContextProcessor { + public void preProcess(DirContext ctx) throws NamingException; + public void postProcess(DirContext ctx) throws NamingException; +} +``` + +The `LdapTemplate` class has a search method that takes a `DirContextProcessor`, as follows: + +``` +public void search(SearchExecutor se, NameClassPairCallbackHandler handler, + DirContextProcessor processor) throws DataAccessException; +``` + +Before the search operation, the `preProcess` method is called on the given `DirContextProcessor` instance. After the search has run and the resulting `NamingEnumeration` has been processed, the `postProcess` method is called. This lets you perform operations on the `DirContext` to be used in the search and to check the `DirContext` when the search has been performed. This can be very useful (for example, when handling request and response controls). + +You can also use the following convenience methods when you do not need a custom `SearchExecutor`: + +``` +public void search(Name base, String filter, + SearchControls controls, NameClassPairCallbackHandler handler, DirContextProcessor processor) + +public void search(String base, String filter, + SearchControls controls, NameClassPairCallbackHandler handler, DirContextProcessor processor) + +public void search(Name base, String filter, + SearchControls controls, AttributesMapper mapper, DirContextProcessor processor) + +public void search(String base, String filter, + SearchControls controls, AttributesMapper mapper, DirContextProcessor processor) + +public void search(Name base, String filter, + SearchControls controls, ContextMapper mapper, DirContextProcessor processor) + +public void search(String base, String filter, + SearchControls controls, ContextMapper mapper, DirContextProcessor processor) +``` + +\=== Implementing a Request Control `DirContextProcessor` + +The LDAPv3 protocol uses “Controls” to send and receive additional data to affect the behavior of predefined operations. To simplify the implementation of a request control `DirContextProcessor`, Spring LDAP provides the `AbstractRequestControlDirContextProcessor` base class. This class handles the retrieval of the current request controls from the `LdapContext`, calls a template method for creating a request control, and adds it to the `LdapContext`. All you have to do in the subclass is to implement the template method called `createRequestControl` and the `postProcess` method for performing whatever you need to do after the search. The following listing shows the relevant signatures: + +``` +public abstract class AbstractRequestControlDirContextProcessor implements + DirContextProcessor { + + public void preProcess(DirContext ctx) throws NamingException { + ... + } + + public abstract Control createRequestControl(); +} +``` + +A typical `DirContextProcessor` is similar to the following example: + +``` +package com.example.control; + +public class MyCoolRequestControl extends AbstractRequestControlDirContextProcessor { + private static final boolean CRITICAL_CONTROL = true; + private MyCoolCookie cookie; + ... + public MyCoolCookie getCookie() { + return cookie; + } + + public Control createRequestControl() { + return new SomeCoolControl(cookie.getCookie(), CRITICAL_CONTROL); + } + + public void postProcess(DirContext ctx) throws NamingException { + LdapContext ldapContext = (LdapContext) ctx; + Control[] responseControls = ldapContext.getResponseControls(); + + for (int i = 0; i < responseControls.length; i++) { + if (responseControls[i] instanceof SomeCoolResponseControl) { + SomeCoolResponseControl control = (SomeCoolResponseControl) responseControls[i]; + this.cookie = new MyCoolCookie(control.getCookie()); + } + } + } +} +``` + +| |Make sure you use `LdapContextSource` when you use controls. The [`Control`](https://download.oracle.com/javase/1.5.0/docs/api/javax/naming/ldap/Control.html) interface is specific for LDAPv3 and requires that `LdapContext` is used instead of `DirContext`. If an `AbstractRequestControlDirContextProcessor` subclass is called with an argument that is not an `LdapContext`, it throws an `IllegalArgumentException`.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +\=== Paged Search Results + +Some searches may return large numbers of results. When there is no easy way to filter out a smaller amount, it is convenient to have the server return only a certain number of results each time it is called. This is known as “paged search results”. Each “page” of the result could then be displayed, with links to the next and previous page. Without this functionality, the client must either manually limit the search result into pages or retrieve the whole result and then chop it into pages of suitable size. The former would be rather complicated, and the latter would consume unnecessary amounts of memory. + +Some LDAP servers support `PagedResultsControl`, which requests that the results of a search operation are returned by the LDAP server in pages of a specified size. The user controls the rate at which the pages are returned, by controlling the rate at which the searches are called. However, you must keep track of a cookie between the calls. The server uses this cookie to keep track of where it left off the previous time it was called with a paged results request. + +Spring LDAP provides support for paged results by using the concept for pre- and post-processing of an `LdapContext`, as discussed in the previous sections. It does so by using the `PagedResultsDirContextProcessor` class. The `PagedResultsDirContextProcessor` class creates a `PagedResultsControl` with the requested page size and adds it to the `LdapContext`. After the search, it gets the `PagedResultsResponseControl` and retrieves the paged results cookie, which is needed to keep the context between consecutive paged results requests. + +The following example shows how the to use the paged search results functionality: + +``` +public List getAllPersonNames() { + final SearchControls searchControls = new SearchControls(); + searchControls.setSearchScope(SearchControls.SUBTREE_SCOPE); + + final PagedResultsDirContextProcessor processor = + new PagedResultsDirContextProcessor(PAGE_SIZE); + + return SingleContextSource.doWithSingleContext( + contextSource, new LdapOperationsCallback>() { + + @Override + public List doWithLdapOperations(LdapOperations operations) { + List result = new LinkedList(); + + do { + List oneResult = operations.search( + "ou=People", + "(&(objectclass=person))", + searchControls, + CN_ATTRIBUTES_MAPPER, + processor); + result.addAll(oneResult); + } while(processor.hasMore()); + + return result; + } + }); +} +``` + +| |For a paged results cookie to continue being valid, you must use the same underlying connection for each paged results call. You can do so by using the `SingleContextSource`, as demonstrated in the preceding example.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +\== Transaction Support + +Programmers used to working with relational databases coming to the LDAP world often express surprise at the fact that there is no notion of transactions. +It is not specified in the protocol, and no LDAP servers support it. +Recognizing that this may be a major problem, Spring LDAP provides support for client-side, compensating transactions on LDAP resources. + +LDAP transaction support is provided by `ContextSourceTransactionManager`, a `PlatformTransactionManager` implementation that manages Spring transaction support for LDAP operations. Along with its collaborators, it keeps track of the LDAP operations performed in a transaction, making a record of the state before each operation and taking steps to restore the initial state should the transaction need to be rolled back. + +In addition to the actual transaction management, Spring LDAP transaction support also makes sure that the same `DirContext` instance is used throughout the same transaction. That is, the `DirContext` is not actually closed until the transaction is finished, allowing for more efficient resources usage. + +| |While the approach used by Spring LDAP to provide transaction support is sufficient for many cases, it is by no means “real” transactions in the traditional sense.
The server is completely unaware of the transactions, so (for example), if the connection is broken, there is no way to roll back the transaction.
While this should be carefully considered, it should also be noted that the alternative is to operate without any transaction support whatsoever. Spring LDAP’s transaction support is pretty much as good as it gets.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |The client-side transaction support adds some overhead in addition to the work required by the original operations.
While this overhead should not be something to worry about in most cases,
if your application does not perform several LDAP operations within the same transaction (for example, `modifyAttributes` followed by `rebind`),
or if transaction synchronization with a JDBC data source is not required (see [[spring-ldap-jdbc-transaction-integration]](#spring-ldap-jdbc-transaction-integration)), you gain little by using the LDAP transaction support.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +\=== Configuration + +Configuring Spring LDAP transactions should look very familiar if you are used to configuring Spring transactions. You can annotate your transacted classes with `@Transactional`, create a `TransactionManager` instance, and include a `` element in your bean configuration. The following example shows how to do so: + +``` + + + + + + + + + + + + + + +... +``` + +| |While this setup works fine for most simple use cases, some more complex scenarios require additional configuration.
Specifically, if you need to create or delete subtrees within transactions, you need to use an alternative `TempEntryRenamingStrategy`, as described in [[renaming-strategies]](#renaming-strategies).| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +In a real-world situation, you would probably apply the transactions on the service-object level rather than the repository level. The preceding example demonstrates the general idea. + +\=== JDBC Transaction Integration + +A common use case when working against LDAP is that some of the data is stored in the LDAP tree but other data is stored in a relational database. In this case, transaction support becomes even more important, since the update of the different resources should be synchronized. + +While actual XA transactions is not supported, support is provided to conceptually wrap JDBC and LDAP access within the same transaction by supplying a `data-source-ref` attribute to the `` element. This creates a `ContextSourceAndDataSourceTransactionManager`, which then manages the two transactions virtually as if they were one. When performing a commit, the LDAP part of the operation is always performed first, letting both transactions be rolled back should the LDAP commit fail. The JDBC part of the transaction is managed exactly as in `DataSourceTransactionManager`, except that nested transactions are not supported. The following example shows an `ldap:transaction-manager` element with a `data-source-ref` attribute: + +``` + + + +``` + +| |The provided support is all client-side.
The wrapped transaction is not an XA transaction. No two-phase commit is performed, as the LDAP server cannot vote on its outcome.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +You can accomplish the same thing for Hibernate integration by supplying a `session-factory-ref` attribute to the `` element, as follows: + +``` + + + +``` + +\=== LDAP Compensating Transactions Explained + +Spring LDAP manages compensating transactions by making a record of the state in the LDAP tree before each modifying operation (`bind`, `unbind`, `rebind`, `modifyAttributes`, and `rename`). +This lets the system perform compensating operations should the transaction need to be rolled back. + +In many cases, the compensating operation is pretty straightforward. For example, the compensating rollback operation for a `bind` operation is to unbind the entry. +Other operations, however, require a different, more complicated approach because of some particular characteristics of LDAP databases. +Specifically, it is not always possible to get the values of all `Attributes` of an entry, making the aforementioned strategy insufficient for (for example) an `unbind` operation. + +This is why each modifying operation performed within a Spring LDAP managed transaction is internally split up into four distinct operations: a recording operation, +a preparation operation, a commit operation, and a rollback operation. The following table describes each LDAP operation: + +| LDAP Operation | Recording | Preparation | Commit | Rollback | +|------------------|----------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------|-------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------| +| `bind` | Make a record of the DN of the entry to bind. | Bind the entry. | No operation. | Unbind the entry by using the recorded DN. | +| `rename` | Make a record of the original and target DN. | Rename the entry. | No operation. | Rename the entry back to its original DN. | +| `unbind` | Make a record of the original DN and calculate a temporary DN. |Rename the entry to the temporary location.| Unbind the temporary entry. | Rename the entry from the temporary location back to its original DN. | +| `rebind` | Make a record of the original DN and the new `Attributes` and calculate a temporary DN. | Rename the entry to a temporary location. |Bind the new `Attributes` at the original DN and unbind the original entry from its temporary location.| Rename the entry from the temporary location back to its original DN. | +|`modifyAttributes`|Make a record of the DN of the entry to modify and calculate compensating `ModificationItem` instances for the modifications to be done.| Perform the `modifyAttributes` operation. | No operation. |Perform a `modifyAttributes` operation by using the calculated compensating `ModificationItem` instances.| + +A more detailed description of the internal workings of the Spring LDAP transaction support is available in the [Javadoc](https://docs.spring.io/spring-ldap/docs/current/apidocs/). + +\==== Renaming Strategies + +As described in the table in the preceding section, the transaction management of some operations requires the original entry affected by the operation to be temporarily renamed before the actual modification can be made in the commit. The manner in which the temporary DN of the entry is calculated is managed by a `TempEntryRenamingStrategy` that is specified in a child element of the `` declaration in the configuration. Spring LDAP includes two implementations: + +* `DefaultTempEntryRenamingStrategy` (the default): Specified by using an `` element. Adds a suffix to the least significant part of the entry DN. For example, for a DN of `cn=john doe, ou=users`, this strategy returns a temporary DN of `cn=john doe_temp, ou=users`. You can configure the suffix by setting the `temp-suffix` attribute. + +* `DifferentSubtreeTempEntryRenamingStrategy`: Specified by using an `` element. It appends a subtree DN to the least significant part of the DN. Doing so makes all temporary entries be placed at a specific location in the LDAP tree. The temporary subtree DN is configured by setting the `subtree-node` attribute. For example, if `subtree-node` is `ou=tempEntries` and the original DN of the entry is `cn=john doe, ou=users`, the temporary DN is `cn=john doe, ou=tempEntries`. Note that the configured subtree node needs to be present in the LDAP tree. + +| |The `DefaultTempEntryRenamingStrategy` does not work in some situations. For example, if you plan to do recursive deletes, you need to use `DifferentSubtreeTempEntryRenamingStrategy`. This is because the recursive delete operation actually consists of a depth-first delete of each node in the sub tree individually. Since you cannot rename an entry that has any children and `DefaultTempEntryRenamingStrategy` would leave each node in the same subtree (with a different name) instead of actually removing it, this operation would fail. When in doubt, use `DifferentSubtreeTempEntryRenamingStrategy`.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +\== User Authentication using Spring LDAP + +This section covers user authentication with Spring LDAP. It contains the following topics: + +* [[spring-ldap-user-authentication-basic]](#spring-ldap-user-authentication-basic) + +* [[operationsOnAuthenticatedContext]](#operationsOnAuthenticatedContext) + +* [[spring-ldap-authentication-obsolete]](#spring-ldap-authentication-obsolete) + +* [[spring-ldap-using-spring-security]](#spring-ldap-using-spring-security) + +\=== Basic Authentication + +While the core functionality of the `ContextSource` is to provide `DirContext` instances for use by `LdapTemplate`, you can also use it for authenticating users against an LDAP server. The `getContext(principal, credentials)` method of `ContextSource` does exactly that. It constructs a `DirContext` instance according to the `ContextSource` configuration and authenticates the context by using the supplied principal and credentials. A custom authenticate method could look like the following example: + +``` +public boolean authenticate(String userDn, String credentials) { + DirContext ctx = null; + try { + ctx = contextSource.getContext(userDn, credentials); + return true; + } catch (Exception e) { + // Context creation failed - authentication did not succeed + logger.error("Login failed", e); + return false; + } finally { + // It is imperative that the created DirContext instance is always closed + LdapUtils.closeContext(ctx); + } +} +``` + +The `userDn` supplied to the `authenticate` method needs to be the full DN of the user to authenticate (regardless of the `base` setting on the `ContextSource`). You typically need to perform an LDAP search based on (for example) the user name to get this DN. The following example shows how to do so: + +``` +private String getDnForUser(String uid) { + List result = ldapTemplate.search( + query().where("uid").is(uid), + new AbstractContextMapper() { + protected String doMapFromContext(DirContextOperations ctx) { + return ctx.getNameInNamespace(); + } + }); + + if(result.size() != 1) { + throw new RuntimeException("User not found or not unique"); + } + + return result.get(0); +} +``` + +There are some drawbacks to this approach. You are forced to concern yourself with the DN of the user, you can search only for the user’s uid, and the search always starts at the root of the tree (the empty path). A more flexible method would let you specify the search base, the search filter, and the credentials. Spring LDAP includes an authenticate method in `LdapTemplate` that provides this functionality: `boolean authenticate(LdapQuery query, String password);`. + +When you use this method, authentication becomes as simple as follows: + +``` +ldapTemplate.authenticate(query().where("uid").is("john.doe"), "secret"); +``` + +| |As described in the [next section](#operationsOnAuthenticatedContext), some setups may require you to perform additional operations to get actual authentication to occur. See [[operationsOnAuthenticatedContext]](#operationsOnAuthenticatedContext) for details.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Do not write your own custom authenticate methods. Use the ones provided in Spring LDAP.| +|---|----------------------------------------------------------------------------------------| + +\=== Performing Operations on the Authenticated Context + +Some authentication schemes and LDAP servers require some operation to be performed on the created `DirContext` instance for the actual authentication to occur. You should test and make sure how your server setup and authentication schemes behave. Failure to do so might result in users being admitted into your system regardless of the supplied DN and credentials. The following example shows a naïve implementation of an authenticate method where a hard-coded `lookup` operation is performed on the authenticated context: + +``` +public boolean authenticate(String userDn, String credentials) { + DirContext ctx = null; + try { + ctx = contextSource.getContext(userDn, credentials); + // Take care here - if a base was specified on the ContextSource + // that needs to be removed from the user DN for the lookup to succeed. + ctx.lookup(userDn); + return true; + } catch (Exception e) { + // Context creation failed - authentication did not succeed + logger.error("Login failed", e); + return false; + } finally { + // It is imperative that the created DirContext instance is always closed + LdapUtils.closeContext(ctx); + } +} +``` + +It would be better if the operation could be provided as an implementation of a callback interface, rather than limiting the operation to always be a `lookup`. Spring LDAP includes the `AuthenticatedLdapEntryContextMapper` callback interface and a corresponding `authenticate` method: ` T authenticate(LdapQuery query, String password, AuthenticatedLdapEntryContextMapper mapper);` + +This method lets any operation be performed on the authenticated context, as follows: + +``` +AuthenticatedLdapEntryContextMapper mapper = new AuthenticatedLdapEntryContextMapper() { + public DirContextOperations mapWithContext(DirContext ctx, LdapEntryIdentification ldapEntryIdentification) { + try { + return (DirContextOperations) ctx.lookup(ldapEntryIdentification.getRelativeName()); + } + catch (NamingException e) { + throw new RuntimeException("Failed to lookup " + ldapEntryIdentification.getRelativeName(), e); + } + } +}; + +ldapTemplate.authenticate(query().where("uid").is("john.doe"), "secret", mapper); +``` + +\=== Obsolete Authentication Methods + +In addition to the `authenticate` methods described in the preceding sections, you can use a number of deprecated methods for authentication. While these work fine, we recommend using the `LdapQuery` methods instead. + +\=== Using Spring Security + +While the approach described in the preceding sections may be sufficient for simple authentication scenarios, requirements in this area commonly expand rapidly. A multitude of aspects apply, including authentication, authorization, web integration, user context management, and others. If you suspect that the requirements might expand beyond just simple authentication, you should definitely consider using [Spring Security](https://spring.io/spring-security) for your security purposes instead. It is a full-featured, mature security framework that addresses the aforementioned aspects as well as several others. + +\== LDIF Parsing + +LDAP Directory Interchange Format (LDIF) files are the standard medium for describing directory data in a flat-file format. The most common uses of this format include information transfer and archival. However, the standard also defines a way to describe modifications to stored data in a flat-file format. LDIFs of this later type are typically referred to as *changetype* or *modify* LDIFs. + +The `org.springframework.ldap.ldif` package provides the classes needed to parse LDIF files and deserialize them into tangible objects. The `LdifParser` is the main class of the `org.springframework.ldap.ldif` package and is capable of parsing files that comply with RFC 2849. This class reads lines from a resource and assembles them into an `LdapAttributes` object. + +| |The `LdifParser` currently ignores *changetype* LDIF entries, as their usefulness in the context of an application has yet to be determined.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------| + +\=== Object Representation + +Two classes in the `org.springframework.ldap.core` package provide the means to represent an LDIF in code: + +* `LdapAttribute`: Extends `javax.naming.directory.BasicAttribute` adding support for LDIF options as defined in RFC2849. + +* `LdapAttributes`: Extends `javax.naming.directory.BasicAttributes` adding specialized support for DNs. + +`LdapAttribute` objects represent options as a `Set`. The DN support added to the `LdapAttributes` object employs the `javax.naming.ldap.LdapName` class. + +\=== The Parser + +The `Parser` interface provides the foundation for operation and employs three supporting policy definitions: + +* `SeparatorPolicy`: Establishes the mechanism by which lines are assembled into attributes. + +* `AttributeValidationPolicy`: Ensures that attributes are correctly structured prior to parsing. + +* `Specification`: Provides a mechanism by which object structure can be validated after assembly. + +The default implementations of these interfaces are as follows: + +* `org.springframework.ldap.ldif.parser.LdifParser` + +* `org.springframework.ldap.ldif.support.SeparatorPolicy` + +* `org.springframework.ldap.ldif.support.DefaultAttributeValidationPolicy` + +* `org.springframework.ldap.schema.DefaultSchemaSpecification` + +Together, these four classes parse a resource line by line and translate the data into `LdapAttributes` objects. + +The `SeparatorPolicy` determines how individual lines read from the source file should be interpreted, as the LDIF specification lets attributes span multiple lines. The default policy assesses lines in the context of the order in which they were read to determine the nature of the line in consideration. *control* attributes and *changetype* records are ignored. + +The `DefaultAttributeValidationPolicy` uses REGEX expressions to ensure that each attribute conforms to a valid attribute format (according to RFC 2849) once parsed. If an attribute fails validation, an `InvalidAttributeFormatException` is logged, and the record is skipped (the parser returns `null`). + +\=== Schema Validation + +A mechanism for validating parsed objects against a schema is available through the `Specification` interface in the `org.springframework.ldap.schema` package. The `DefaultSchemaSpecification` does not do any validation and is available for instances where records are known to be valid and need not be checked. This option saves the performance penalty that validation imposes. The `BasicSchemaSpecification` applies basic checks, such as ensuring DN and object class declarations have been provided. Currently, validation against an actual schema requires implementation of the `Specification` interface. + +\=== Spring Batch Integration + +While the `LdifParser` can be employed by any application that requires parsing of LDIF files, Spring offers a batch processing framework that offers many file-processing utilities for parsing delimited files such as CSV. The `org.springframework.ldap.ldif.batch` package offers the classes needed to use the `LdifParser` as a valid configuration option in the Spring Batch framework. +There are five classes in this package. Together, they offer three basic use cases: + +* Reading LDIF records from a file and returning an `LdapAttributes` object. + +* Reading LDIF records from a file and mapping records to Java objects (POJOs). + +* Writing LDIF records to a file. + +The first use case is accomplished with `LdifReader`. This class extends Spring Batch’s `AbstractItemCountingItemStreamItemReader` and implements its `ResourceAwareItemReaderItemStream`. It fits naturally into the framework, and you can use it to read `LdapAttributes` objects from a file. + +You can use `MappingLdifReader` to map LDIF objects directly to any POJO. This class requires you to provide an implementation of the `RecordMapper` interface. This implementation should implement the logic for mapping objects to POJOs. + +You can implement `RecordCallbackHandler` and provide the implementation to either reader. You can use this handler to operate on skipped records. See the [Spring Batch API documentation](https://docs.spring.io/spring-batch/docs/current/api/org/springframework/batch/item/ldif/RecordCallbackHandler.html) for more information. + +The last member of this package, the `LdifAggregator`, can be used to write LDIF records to a file. This class invokes the `toString()` method of the `LdapAttributes` object. + +\== Utilities + +This section describes additional utilities that you can use with Spring LDAP. + +\=== Incremental Retrieval of Multi-Valued Attributes + +When there are a very large number of attribute values (\>1500) for a specific attribute, Active Directory typically refuses to return all these values at once. Instead, the attribute values are returned according to the [Incremental Retrieval of Multi-valued Properties](https://tools.ietf.org/html/draft-kashi-incremental-00) method. Doing so requires the calling part to inspect the returned attribute for specific markers and, if necessary, make additional lookup requests until all values are found. + +Spring LDAP’s `org.springframework.ldap.core.support.DefaultIncrementalAttributesMapper` helps when working with this kind of attributes, as follows: + +``` +Object[] attrNames = new Object[]{"oneAttribute", "anotherAttribute"}; +Attributes attrs = DefaultIncrementalAttributeMapper.lookupAttributes(ldapTemplate, theDn, attrNames); +``` + +The preceding example parses any returned attribute range markers and makes repeated requests as necessary until all values for all requested attributes have been retrieved. + +\== Testing + +This section covers testing with Spring LDAP. It contains the following topics: + +* [[spring-ldap-testing-embedded-server]](#spring-ldap-testing-embedded-server) + +* [[spring-ldap-testing-apacheds]](#spring-ldap-testing-apacheds) + +* [[spring-ldap-testing-unboundid]](#spring-ldap-testing-unboundid) + +\=== Using an Embedded Server + +`spring-ldap-test` supplies an embedded LDAP server that is based on [ApacheDS](https://directory.apache.org/apacheds/) or [UnboundID](https://www.ldap.com/unboundid-ldap-sdk-for-java). + +| |`spring-ldap-test` is compatible with ApacheDS 1.5.5. Newer versions of ApacheDS are not supported.| +|---|---------------------------------------------------------------------------------------------------| + +To get started, you need to include the `spring-ldap-test` dependency. + +The following listing shows how to include the `spring-ldap-test` for Maven: + +``` + + org.springframework.ldap + spring-ldap-test + 2.3.5.RELEASE + test + +``` + +The following listing shows how to include the `spring-ldap-test` for Gradle: + +``` +testCompile "org.springframework.ldap:spring-ldap-test:2.3.5.RELEASE" +``` + +\=== ApacheDS + +To use ApacheDS, you need to include a number of ApacheDS dependencies. + +The following example shows how to include the ApacheDS dependencies for Maven: + +``` + + org.apache.directory.server + apacheds-core + 1.5.5 + test + + + org.apache.directory.server + apacheds-core-entry + 1.5.5 + test + + + org.apache.directory.server + apacheds-protocol-shared + 1.5.5 + test + + + org.apache.directory.server + apacheds-protocol-ldap + 1.5.5 + test + + + org.apache.directory.server + apacheds-server-jndi + 1.5.5 + test + + + org.apache.directory.shared + shared-ldap + 0.9.15 + test + +``` + +The following example shows how to include the ApacheDS dependencies for Gradle: + +``` +testCompile "org.apache.directory.server:apacheds-core:1.5.5", + "org.apache.directory.server:apacheds-core-entry:1.5.5", + "org.apache.directory.server:apacheds-protocol-shared:1.5.5", + "org.apache.directory.server:apacheds-protocol-ldap:1.5.5", + "org.apache.directory.server:apacheds-server-jndi:1.5.5", + "org.apache.directory.shared:shared-ldap:0.9.15" +``` + +The following bean definition creates an embedded LDAP server: + +``` + + + + + +``` + +`spring-ldap-test` provides a mechanism to populate the LDAP server by using `org.springframework.ldap.test.LdifPopulator`. To use it, create a bean similar to the following: + +``` + + + + + + + +``` + +Another way to work against an embedded LDAP server is by using `org.springframework.ldap.test.TestContextSourceFactoryBean`, as follows: + +``` + + + + + + + + +``` + +Also, `org.springframework.ldap.test.LdapTestUtils` provides methods to programmatically work with an embedded LDAP server. + +\=== UnboundID + +To use UnboundID, you need to include an UnboundID dependency. + +The following example shows how to include the UnboundID dependency for Maven: + +``` + + com.unboundid + unboundid-ldapsdk + 3.1.1 + test + +``` + +The following example shows how to include the UnboundID dependency for Gradle: + +``` +testCompile "com.unboundid:unboundid-ldapsdk:3.1.1" +``` + +The following bean definition creates an embedded LDAP server: + +``` + + + + + +``` + +`spring-ldap-test` provides a way to populate the LDAP server by using `org.springframework.ldap.test.unboundid.LdifPopulator`. To use it, create a bean similar to the following: + +``` + + + + + + + +``` + +Another way to work against an embedded LDAP server is by using `org.springframework.ldap.test.unboundid.TestContextSourceFactoryBean`. +To use it, create a bean similar to the following: + +``` + + + + + + + + +``` \ No newline at end of file diff --git a/docs/en/spring-rest-docs/README.md b/docs/en/spring-rest-docs/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/en/spring-rest-docs/spring-restdocs.md b/docs/en/spring-rest-docs/spring-restdocs.md new file mode 100644 index 0000000000000000000000000000000000000000..1caab8e9e4ba64e60680ffd8b394ecba0296d168 --- /dev/null +++ b/docs/en/spring-rest-docs/spring-restdocs.md @@ -0,0 +1,2636 @@ +# Spring REST Docs + +Document RESTful services by combining hand-written documentation with auto-generated snippets produced with Spring MVC Test, WebTestClient, or REST Assured. + +## [Introduction](#introduction) + +The aim of Spring REST Docs is to help you produce accurate and readable documentation for your RESTful services. + +Writing high-quality documentation is difficult. +One way to ease that difficulty is to use tools that are well-suited to the job. +To this end, Spring REST Docs uses [Asciidoctor](https://asciidoctor.org) by default. +Asciidoctor processes plain text and produces HTML, styled and laid out to suit your needs. +If you prefer, you can also configure Spring REST Docs to use Markdown. + +Spring REST Docs uses snippets produced by tests written with Spring MVC’s [test framework](https://docs.spring.io/spring-framework/docs/5.0.x/spring-framework-reference/testing.html#spring-mvc-test-framework), Spring WebFlux’s [`WebTestClient`](https://docs.spring.io/spring-framework/docs/5.0.x/spring-framework-reference/testing.html#webtestclient) or [REST Assured 3](http://rest-assured.io). +This test-driven approach helps to guarantee the accuracy of your service’s documentation. +If a snippet is incorrect, the test that produces it fails. + +Documenting a RESTful service is largely about describing its resources. +Two key parts of each resource’s description are the details of the HTTP requests that it consumes and the HTTP responses that it produces. +Spring REST Docs lets you work with these resources and the HTTP requests and responses, shielding your documentation from the inner-details of your service’s implementation. +This separation helps you document your service’s API rather than its implementation. +It also frees you to evolve the implementation without having to rework the documentation. + +## [Getting started](#getting-started) + +This section describes how to get started with Spring REST Docs. + +### [Sample Applications](#getting-started-sample-applications) + +If you want to jump straight in, a number of sample applications are available: + +| Sample |Build system| Description | +|------------------------------------------------------------------------------------------------------------------------------|------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|[Spring Data REST](https://github.com/spring-projects/spring-restdocs/tree/v2.0.6.RELEASE/samples/rest-notes-spring-data-rest)| Maven |Demonstrates the creation of a getting started guide and an API guide for a service implemented by using [Spring Data REST](https://projects.spring.io/spring-data-rest/).| +| [Spring HATEOAS](https://github.com/spring-projects/spring-restdocs/tree/v2.0.6.RELEASE/samples/rest-notes-spring-hateoas) | Gradle | Demonstrates the creation of a getting started guide and an API guide for a service implemented by using [Spring HATEOAS](https://projects.spring.io/spring-hateoas/). | + +| Sample |Build system| Description | +|---------------------------------------------------------------------------------------------------------------|------------|-----------------------------------------------------------------------------| +|[WebTestClient](https://github.com/spring-projects/spring-restdocs/tree/v2.0.6.RELEASE/samples/web-test-client)| Gradle |Demonstrates the use of Spring REST docs with Spring WebFlux’s WebTestClient.| + +| Sample |Build system| Description | +|-----------------------------------------------------------------------------------------------------------|------------|-------------------------------------------------------------------------------------| +|[REST Assured](https://github.com/spring-projects/spring-restdocs/tree/v2.0.6.RELEASE/samples/rest-assured)| Gradle |Demonstrates the use of Spring REST Docs with [REST Assured](http://rest-assured.io).| + +| Sample |Build system| Description | +|--------------------------------------------------------------------------------------------------------|------------|---------------------------------------------------------------------------------------------------| +|[Slate](https://github.com/spring-projects/spring-restdocs/tree/v2.0.6.RELEASE/samples/rest-notes-slate)| Gradle |Demonstrates the use of Spring REST Docs with Markdown and[Slate](https://github.com/tripit/slate).| +| [TestNG](https://github.com/spring-projects/spring-restdocs/tree/v2.0.6.RELEASE/samples/testng) | Gradle | Demonstrates the use of Spring REST Docs with [TestNG](http://testng.org). | +| [JUnit 5](https://github.com/spring-projects/spring-restdocs/tree/v2.0.6.RELEASE/samples/junit5) | Gradle | Demonstrates the use of Spring REST Docs with [JUnit 5](https://junit.org/junit5/). | + +### [Requirements](#getting-started-requirements) + +Spring REST Docs has the following minimum requirements: + +* Java 8 + +* Spring Framework 5 (5.0.2 or later) + +Additionally, the `spring-restdocs-restassured` module requires REST Assured 3.0. + +### [Build configuration](#getting-started-build-configuration) + +The first step in using Spring REST Docs is to configure your project’s build. +The [Spring HATEOAS](https://github.com/spring-projects/spring-restdocs/tree/v2.0.6.RELEASE/samples/rest-notes-spring-hateoas) and [Spring Data REST](https://github.com/spring-projects/spring-restdocs/tree/v2.0.6.RELEASE/samples/rest-notes-spring-data-rest) samples contain a `build.gradle` and `pom.xml`, respectively, that you may wish to use as a reference. +The key parts of the configuration are described in the following listings: + +Maven + +``` + (1) + org.springframework.restdocs + spring-restdocs-mockmvc + {project-version} + test + + + + + (2) + org.asciidoctor + asciidoctor-maven-plugin + 1.5.8 + + + generate-docs + prepare-package (3) + + process-asciidoc + + + html + book + + + + + (4) + org.springframework.restdocs + spring-restdocs-asciidoctor + {project-version} + + + + + +``` + +|**1**| Add a dependency on `spring-restdocs-mockmvc` in the `test` scope.
If you want to use `WebTestClient` or REST Assured rather than MockMvc, add a dependency on `spring-restdocs-webtestclient` or `spring-restdocs-restassured` respectively instead. | +|-----|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| Add the Asciidoctor plugin. | +|**3**| Using `prepare-package` allows the documentation to be [included in the package](#getting-started-build-configuration-packaging-the-documentation). | +|**4**|Add `spring-restdocs-asciidoctor` as a dependency of the Asciidoctor plugin.
This will automatically configure the `snippets` attribute for use in your `.adoc` files to point to `target/generated-snippets`.
It will also allow you to use the `operation` block macro.| + +Gradle + +``` +plugins { (1) + id "org.asciidoctor.jvm.convert" version "3.3.2" +} + +configurations { + asciidoctorExt (2) +} + +dependencies { + asciidoctorExt 'org.springframework.restdocs:spring-restdocs-asciidoctor:{project-version}' (3) + testImplementation 'org.springframework.restdocs:spring-restdocs-mockmvc:{project-version}' (4) +} + +ext { (5) + snippetsDir = file('build/generated-snippets') +} + +test { (6) + outputs.dir snippetsDir +} + +asciidoctor { (7) + inputs.dir snippetsDir (8) + configurations 'asciidoctorExt' (9) + dependsOn test (10) +} +``` + +|**1** | Apply the Asciidoctor plugin. | +|------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2** | Declare the `asciidoctorExt` configuration for dependencies that extend Asciidoctor. | +|**3** |Add a dependency on `spring-restdocs-asciidoctor` in the `asciidoctorExt` configuration.
This will automatically configure the `snippets` attribute for use in your `.adoc` files to point to `build/generated-snippets`.
It will also allow you to use the `operation` block macro.| +|**4** | Add a dependency on `spring-restdocs-mockmvc` in the `testImplementation` configuration.
If you want to use `WebTestClient` or REST Assured rather than MockMvc, add a dependency on `spring-restdocs-webtestclient` or `spring-restdocs-restassured` respectively instead. | +|**5** | Configure a property to define the output location for generated snippets. | +|**6** | Configure the `test` task to add the snippets directory as an output. | +|**7** | Configure the `asciidoctor` task. | +|**8** | Configure the snippets directory as an input. | +|**9** | Configure the use of the `asciidoctorExt` configuration for extensions. | +|**10**| Make the task depend on the test task so that the tests are run before the documentation is created. | + +#### [Packaging the Documentation](#getting-started-build-configuration-packaging-the-documentation) + +You may want to package the generated documentation in your project’s jar file — for example, to have it [served as static content](https://docs.spring.io/spring-boot/docs/current/reference/htmlsingle/#boot-features-spring-mvc-static-content) by Spring Boot. +To do so, configure your project’s build so that: + +1. The documentation is generated before the jar is built + +2. The generated documentation is included in the jar + +The following listings show how to do so in both Maven and Gradle: + +Maven + +``` + (1) + org.asciidoctor + asciidoctor-maven-plugin + + + (2) + maven-resources-plugin + 2.7 + + + copy-resources + prepare-package + + copy-resources + + (3) + + ${project.build.outputDirectory}/static/docs + + + + + ${project.build.directory}/generated-docs + + + + + + + +``` + +|**1**| The existing declaration for the Asciidoctor plugin. | +|-----|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**|The resource plugin must be declared after the Asciidoctor plugin as they are bound to the same phase (`prepare-package`) and the resource plugin must run after the Asciidoctor plugin to ensure that the documentation is generated before it’s copied.| +|**3**| Copy the generated documentation into the build output’s `static/docs` directory, from where it will be included in the jar file. | + +Gradle + +``` +bootJar { + dependsOn asciidoctor (1) + from ("${asciidoctor.outputDir}/html5") { (2) + into 'static/docs' + } +} +``` + +|**1**|Ensure that the documentation has been generated before the jar is built.| +|-----|-------------------------------------------------------------------------| +|**2**|Copy the generated documentation into the jar’s `static/docs` directory. | + +### [Generating Documentation Snippets](#getting-started-documentation-snippets) + +Spring REST Docs uses Spring MVC’s [test framework](https://docs.spring.io/spring-framework/docs/5.0.x/spring-framework-reference/testing.html#spring-mvc-test-framework), Spring WebFlux’s [`WebTestClient`](https://docs.spring.io/spring-framework/docs/5.0.x/spring-framework-reference/testing.html#webtestclient), or [REST Assured](http://rest-assured.io/) to make requests to the service that you are documenting. +It then produces documentation snippets for the request and the resulting response. + +#### [Setting up Your Tests](#getting-started-documentation-snippets-setup) + +Exactly how you set up your tests depends on the test framework that you use. +Spring REST Docs provides first-class support for JUnit 4 and JUnit 5. +Other frameworks, such as TestNG, are also supported, although slightly more setup is required. + +##### [Setting up Your JUnit 4 Tests](#getting-started-documentation-snippets-setup-junit) + +When using JUnit 4, the first step in generating documentation snippets is to declare a `public` `JUnitRestDocumentation` field that is annotated as a JUnit `@Rule`. +The following example shows how to do so: + +``` +@Rule +public JUnitRestDocumentation restDocumentation = new JUnitRestDocumentation(); +``` + +By default, the `JUnitRestDocumentation` rule is automatically configured with an output directory based on your project’s build tool: + +|Build tool| Output directory | +|----------|---------------------------| +| Maven |`target/generated-snippets`| +| Gradle |`build/generated-snippets` | + +You can override the default by providing an output directory when you create the `JUnitRestDocumentation` instance. +The following example shows how to do so: + +``` +@Rule +public JUnitRestDocumentation restDocumentation = new JUnitRestDocumentation("custom"); +``` + +Next, you must provide an `@Before` method to configure MockMvc, WebTestClient or REST Assured. +The following examples show how to do so: + +MockMvc + +``` +private MockMvc mockMvc; + +@Autowired +private WebApplicationContext context; + +@Before +public void setUp() { + this.mockMvc = MockMvcBuilders.webAppContextSetup(this.context) + .apply(documentationConfiguration(this.restDocumentation)) (1) + .build(); +} +``` + +|**1**|The `MockMvc` instance is configured by using a `MockMvcRestDocumentationConfigurer`.
You can obtain an instance of this class from the static `documentationConfiguration()` method on `org.springframework.restdocs.mockmvc.MockMvcRestDocumentation`.| +|-----|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +WebTestClient + +``` +private WebTestClient webTestClient; + +@Autowired +private ApplicationContext context; + +@Before +public void setUp() { + this.webTestClient = WebTestClient.bindToApplicationContext(this.context).configureClient() + .filter(documentationConfiguration(this.restDocumentation)) (1) + .build(); +} +``` + +|**1**|The `WebTestClient` instance is configured by adding a `WebTestclientRestDocumentationConfigurer` as an `ExchangeFilterFunction`.
You can obtain an instance of this class from the static `documentationConfiguration()` method on `org.springframework.restdocs.webtestclient.WebTestClientRestDocumentation`.| +|-----|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +REST Assured + +``` +private RequestSpecification spec; + +@Before +public void setUp() { + this.spec = new RequestSpecBuilder().addFilter(documentationConfiguration(this.restDocumentation)) (1) + .build(); +} +``` + +|**1**|REST Assured is configured by adding a `RestAssuredRestDocumentationConfigurer` as a `Filter`.
You can obtain an instance of this class from the static `documentationConfiguration()` method on `RestAssuredRestDocumentation` in the `org.springframework.restdocs.restassured3` package.| +|-----|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The configurer applies sensible defaults and also provides an API for customizing the configuration. +See the [configuration section](#configuration) for more information. + +##### [Setting up Your JUnit 5 Tests](#getting-started-documentation-snippets-setup-junit-5) + +When using JUnit 5, the first step in generating documentation snippets is to apply the `RestDocumentationExtension` to your test class. +The following example shows how to do so: + +``` +@ExtendWith(RestDocumentationExtension.class) +public class JUnit5ExampleTests { +``` + +When testing a typical Spring application, you should also apply the `SpringExtension`: + +``` +@ExtendWith({RestDocumentationExtension.class, SpringExtension.class}) +public class JUnit5ExampleTests { +``` + +The `RestDocumentationExtension` is automatically configured with an output directory based on your project’s build tool: + +|Build tool| Output directory | +|----------|---------------------------| +| Maven |`target/generated-snippets`| +| Gradle |`build/generated-snippets` | + +If you are using JUnit 5.1, you can override the default by registering the extension as a field in your test class and providing an output directory when creating it. +The following example shows how to do so: + +``` +public class JUnit5ExampleTests { + + @RegisterExtension + final RestDocumentationExtension restDocumentation = new RestDocumentationExtension ("custom"); + +} +``` + +Next, you must provide a `@BeforeEach` method to configure MockMvc, WebTestClient, or REST Assured. +The following listings show how to do so: + +MockMvc + +``` +private MockMvc mockMvc; + +@BeforeEach +public void setUp(WebApplicationContext webApplicationContext, RestDocumentationContextProvider restDocumentation) { + this.mockMvc = MockMvcBuilders.webAppContextSetup(webApplicationContext) + .apply(documentationConfiguration(restDocumentation)) (1) + .build(); +} +``` + +|**1**|The `MockMvc` instance is configured by using a `MockMvcRestDocumentationConfigurer`.
You can obtain an instance of this class from the static `documentationConfiguration()` method on `org.springframework.restdocs.mockmvc.MockMvcRestDocumentation`.| +|-----|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +WebTestClient + +``` +private WebTestClient webTestClient; + +@BeforeEach +public void setUp(ApplicationContext applicationContext, RestDocumentationContextProvider restDocumentation) { + this.webTestClient = WebTestClient.bindToApplicationContext(applicationContext).configureClient() + .filter(documentationConfiguration(restDocumentation)) (1) + .build(); +} +``` + +|**1**|The `WebTestClient` instance is configured by adding a `WebTestClientRestDocumentationConfigurer` as an `ExchangeFilterFunction`.
You can obtain an instance of this class from the static `documentationConfiguration()` method on `org.springframework.restdocs.webtestclient.WebTestClientRestDocumentation`.| +|-----|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +REST Assured + +``` +private RequestSpecification spec; + +@BeforeEach +public void setUp(RestDocumentationContextProvider restDocumentation) { + this.spec = new RequestSpecBuilder().addFilter(documentationConfiguration(restDocumentation)) (1) + .build(); +} +``` + +|**1**|REST Assured is configured by adding a `RestAssuredRestDocumentationConfigurer` as a `Filter`.
You can obtain an instance of this class from the static `documentationConfiguration()` method on `RestAssuredRestDocumentation` in the `org.springframework.restdocs.restassured3` package.| +|-----|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The configurer applies sensible defaults and also provides an API for customizing the configuration. +See the [configuration section](#configuration) for more information. + +##### [Setting up your tests without JUnit](#getting-started-documentation-snippets-setup-manual) + +The configuration when JUnit is not being used is largely similar to when it is being used. +This section describes the key differences. +The [TestNG sample](https://github.com/spring-projects/spring-restdocs/tree/v2.0.6.RELEASE/samples/testng) also illustrates the approach. + +The first difference is that you should use `ManualRestDocumentation` in place of `JUnitRestDocumentation`. +Also, you do not need the `@Rule` annotation. +The following example shows how to use `ManualRestDocumentation`: + +``` +private ManualRestDocumentation restDocumentation = new ManualRestDocumentation(); +``` + +Secondly, you must call `ManualRestDocumentation.beforeTest(Class, String)` before each test. +You can do so as part of the method that configures MockMvc, WebTestClient, or REST Assured. +The following examples show how to do so: + +MockMvc + +``` +private MockMvc mockMvc; + +@Autowired +private WebApplicationContext context; + +@BeforeMethod +public void setUp(Method method) { + this.mockMvc = MockMvcBuilders.webAppContextSetup(this.context) + .apply(documentationConfiguration(this.restDocumentation)).build(); + this.restDocumentation.beforeTest(getClass(), method.getName()); +} +``` + +WebTestClient + +``` +private WebTestClient webTestClient; + +@Autowired +private ApplicationContext context; + +@BeforeMethod +public void setUp(Method method) { + this.webTestClient = WebTestClient.bindToApplicationContext(this.context).configureClient() + .filter(documentationConfiguration(this.restDocumentation)) (1) + .build(); + this.restDocumentation.beforeTest(getClass(), method.getName()); +} +``` + +REST Assured + +``` +private RequestSpecification spec; + +@BeforeMethod +public void setUp(Method method) { + this.spec = new RequestSpecBuilder().addFilter(documentationConfiguration(this.restDocumentation)).build(); + this.restDocumentation.beforeTest(getClass(), method.getName()); +} +``` + +Finally, you must call `ManualRestDocumentation.afterTest` after each test. +The following example shows how to do so with TestNG: + +``` +@AfterMethod +public void tearDown() { + this.restDocumentation.afterTest(); +} +``` + +#### [Invoking the RESTful Service](#getting-started-documentation-snippets-invoking-the-service) + +Now that you have configured the testing framework, you can use it to invoke the RESTful service and document the request and response. +The following examples show how to do so: + +MockMvc + +``` +this.mockMvc.perform(get("/").accept(MediaType.APPLICATION_JSON)) (1) + .andExpect(status().isOk()) (2) + .andDo(document("index")); (3) +``` + +|**1**| Invoke the root (`/`) of the service and indicate that an `application/json` response is required. | +|-----|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| Assert that the service produced the expected response. | +|**3**|Document the call to the service, writing the snippets into a directory named `index` (which is located beneath the configured output directory).
The snippets are written by a `RestDocumentationResultHandler`.
You can obtain an instance of this class from the static `document` method on `org.springframework.restdocs.mockmvc.MockMvcRestDocumentation`.| + +WebTestClient + +``` +this.webTestClient.get().uri("/").accept(MediaType.APPLICATION_JSON) (1) + .exchange().expectStatus().isOk() (2) + .expectBody().consumeWith(document("index")); (3) +``` + +|**1**| Invoke the root (`/`) of the service and indicate that an `application/json` response is required. | +|-----|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| Assert that the service produced the expected response. | +|**3**|Document the call to the service, writing the snippets into a directory named `index` (which is located beneath the configured output directory).
The snippets are written by a `Consumer` of the `ExchangeResult`.
You can obtain such a consumer from the static `document` method on `org.springframework.restdocs.webtestclient.WebTestClientRestDocumentation`.| + +REST Assured + +``` +RestAssured.given(this.spec) (1) + .accept("application/json") (2) + .filter(document("index")) (3) + .when().get("/") (4) + .then().assertThat().statusCode(is(200)); (5) +``` + +|**1**| Apply the specification that was initialized in the `@Before` method. | +|-----|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| Indicate that an `application/json` response is required. | +|**3**|Document the call to the service, writing the snippets into a directory named `index` (which is located beneath the configured output directory).
The snippets are written by a `RestDocumentationFilter`.
You can obtain an instance of this class from the static `document` method on `RestAssuredRestDocumentation` in the `org.springframework.restdocs.restassured3` package.| +|**4**| Invoke the root (`/`) of the service. | +|**5**| Assert that the service produce the expected response. | + +By default, six snippets are written: + +* `/index/curl-request.adoc` + +* `/index/http-request.adoc` + +* `/index/http-response.adoc` + +* `/index/httpie-request.adoc` + +* `/index/request-body.adoc` + +* `/index/response-body.adoc` + +See [Documenting your API](#documenting-your-api) for more information about these and other snippets that can be produced by Spring REST Docs. + +### [Using the Snippets](#getting-started-using-the-snippets) + +Before using the generated snippets, you must create an `.adoc` source file. +You can name the file whatever you like as long as it has a `.adoc` suffix. +The resulting HTML file has the same name but with a `.html` suffix. +The default location of the source files and the resulting HTML files depends on whether you use Maven or Gradle: + +|Build tool| Source files | Generated files | +|----------|--------------------------|------------------------------| +| Maven |`src/main/asciidoc/*.adoc`|`target/generated-docs/*.html`| +| Gradle |`src/docs/asciidoc/*.adoc`|`build/asciidoc/html5/*.html` | + +You can then include the generated snippets in the manually created Asciidoc file (described earlier in this section) by using the [include macro](https://asciidoctor.org/docs/asciidoc-syntax-quick-reference/#include-files). +You can use the `snippets` attribute that is automatically set by `spring-restdocs-asciidoctor` configured in the [build configuration](#getting-started-build-configuration) to reference the snippets output directory. +The following example shows how to do so: + +``` +include::{snippets}/index/curl-request.adoc[] +``` + +## [Documenting your API](#documenting-your-api) + +This section provides more details about using Spring REST Docs to document your API. + +### [Hypermedia](#documenting-your-api-hypermedia) + +Spring REST Docs provides support for documenting the links in a [hypermedia-based](https://en.wikipedia.org/wiki/HATEOAS) API. +The following examples show how to use it: + +MockMvc + +``` +this.mockMvc.perform(get("/").accept(MediaType.APPLICATION_JSON)).andExpect(status().isOk()) + .andDo(document("index", links((1) + linkWithRel("alpha").description("Link to the alpha resource"), (2) + linkWithRel("bravo").description("Link to the bravo resource")))); (3) +``` + +|**1**|Configure Spring REST docs to produce a snippet describing the response’s links.
Uses the static `links` method on `org.springframework.restdocs.hypermedia.HypermediaDocumentation`.| +|-----|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| Expect a link whose `rel` is `alpha`.
Uses the static `linkWithRel` method on `org.springframework.restdocs.hypermedia.HypermediaDocumentation`. | +|**3**| Expect a link whose `rel` is `bravo`. | + +WebTestClient + +``` +this.webTestClient.get().uri("/").accept(MediaType.APPLICATION_JSON).exchange().expectStatus().isOk() + .expectBody().consumeWith(document("index", links((1) + linkWithRel("alpha").description("Link to the alpha resource"), (2) + linkWithRel("bravo").description("Link to the bravo resource")))); (3) +``` + +|**1**|Configure Spring REST docs to produce a snippet describing the response’s links.
Uses the static `links` method on `org.springframework.restdocs.hypermedia.HypermediaDocumentation`.| +|-----|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| Expect a link whose `rel` is `alpha`.
Uses the static `linkWithRel` method on `org.springframework.restdocs.hypermedia.HypermediaDocumentation`. | +|**3**| Expect a link whose `rel` is `bravo`. | + +REST Assured + +``` +RestAssured.given(this.spec).accept("application/json").filter(document("index", links((1) + linkWithRel("alpha").description("Link to the alpha resource"), (2) + linkWithRel("bravo").description("Link to the bravo resource")))) (3) + .get("/").then().assertThat().statusCode(is(200)); +``` + +|**1**|Configure Spring REST docs to produce a snippet describing the response’s links.
Uses the static `links` method on `org.springframework.restdocs.hypermedia.HypermediaDocumentation`.| +|-----|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| Expect a link whose `rel` is `alpha`.
Uses the static `linkWithRel` method on `org.springframework.restdocs.hypermedia.HypermediaDocumentation`. | +|**3**| Expect a link whose `rel` is `bravo`. | + +The result is a snippet named `links.adoc` that contains a table describing the resource’s links. + +| |If a link in the response has a `title`, you can omit the description from its descriptor and the `title` is used.
If you omit the description and the link does not have a `title`, a failure occurs.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +When documenting links, the test fails if an undocumented link is found in the response. +Similarly, the test also fails if a documented link is not found in the response and the link has not been marked as optional. + +If you do not want to document a link, you can mark it as ignored. +Doing so prevents it from appearing in the generated snippet while avoiding the failure described above. + +You can also document links in a relaxed mode, where any undocumented links do not cause a test failure. +To do so, use the `relaxedLinks` method on `org.springframework.restdocs.hypermedia.HypermediaDocumentation`. +This can be useful when documenting a particular scenario where you only want to focus on a subset of the links. + +#### [Hypermedia Link Formats](#documenting-your-api-hypermedia-link-formats) + +Two link formats are understood by default: + +* Atom: Links are expected to be in an array named `links`. + This is used by default when the content type of the response is compatible with `application/json`. + +* HAL: Links are expected to be in a map named `_links`. + This is used by default when the content type of the response is compatible with `application/hal+json`. + +If you use Atom- or HAL-format links but with a different content type, you can provide one of the built-in `LinkExtractor` implementations to `links`. +The following examples show how to do so: + +MockMvc + +``` +.andDo(document("index", links(halLinks(), (1) + linkWithRel("alpha").description("Link to the alpha resource"), + linkWithRel("bravo").description("Link to the bravo resource")))); +``` + +|**1**|Indicate that the links are in HAL format.
Uses the static `halLinks` method on `org.springframework.restdocs.hypermedia.HypermediaDocumentation`.| +|-----|------------------------------------------------------------------------------------------------------------------------------------------------------| + +WebTestClient + +``` +.consumeWith(document("index", links(halLinks(), (1) + linkWithRel("alpha").description("Link to the alpha resource"), + linkWithRel("bravo").description("Link to the bravo resource")))); +``` + +|**1**|Indicate that the links are in HAL format.
Uses the static `halLinks` method on `org.springframework.restdocs.hypermedia.HypermediaDocumentation`.| +|-----|------------------------------------------------------------------------------------------------------------------------------------------------------| + +REST Assured + +``` +.filter(document("index", links(halLinks(), (1) + linkWithRel("alpha").description("Link to the alpha resource"), + linkWithRel("bravo").description("Link to the bravo resource")))) +``` + +|**1**|Indicate that the links are in HAL format.
Uses the static `halLinks` method on `org.springframework.restdocs.hypermedia.HypermediaDocumentation`.| +|-----|------------------------------------------------------------------------------------------------------------------------------------------------------| + +If your API represents its links in a format other than Atom or HAL, you can provide your own implementation of the `LinkExtractor` interface to extract the links from the response. + +#### [Ignoring Common Links](#documenting-your-api-hypermedia-ignoring-common-links) + +Rather than documenting links that are common to every response, such as `self` and `curies` when using HAL, you may want to document them once in an overview section and then ignore them in the rest of your API’s documentation. +To do so, you can build on the [support for reusing snippets](#documenting-your-api-reusing-snippets) to add link descriptors to a snippet that is preconfigured to ignore certain links. +The following example shows how to do so: + +``` +public static LinksSnippet links(LinkDescriptor... descriptors) { + return HypermediaDocumentation.links(linkWithRel("self").ignored().optional(), linkWithRel("curies").ignored()) + .and(descriptors); +} +``` + +### [Request and Response Payloads](#documenting-your-api-request-response-payloads) + +In addition to the hypermedia-specific support [described earlier](#documenting-your-api-hypermedia), support for general documentation of request and response payloads is also provided. + +By default, Spring REST Docs automatically generates snippets for the body of the request and the body of the response. +These snippets are named `request-body.adoc` and `response-body.adoc` respectively. + +#### [Request and Response Fields](#documenting-your-api-request-response-payloads-fields) + +To provide more detailed documentation of a request or response payload, support for documenting the payload’s fields is provided. + +Consider the following payload: + +``` +{ + "contact": { + "name": "Jane Doe", + "email": "[email protected]" + } +} +``` + +You can document the previous example’s fields as follows: + +MockMvc + +``` +this.mockMvc.perform(get("/user/5").accept(MediaType.APPLICATION_JSON)).andExpect(status().isOk()) + .andDo(document("index", responseFields((1) + fieldWithPath("contact.email").description("The user's email address"), (2) + fieldWithPath("contact.name").description("The user's name")))); (3) +``` + +|**1**|Configure Spring REST docs to produce a snippet describing the fields in the response payload.
To document a request, you can use `requestFields`.
Both are static methods on `org.springframework.restdocs.payload.PayloadDocumentation`.| +|-----|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| Expect a field with the path `contact.email`.
Uses the static `fieldWithPath` method on `org.springframework.restdocs.payload.PayloadDocumentation`. | +|**3**| Expect a field with the path `contact.name`. | + +WebTestClient + +``` +this.webTestClient.get().uri("user/5").accept(MediaType.APPLICATION_JSON) + .exchange().expectStatus().isOk().expectBody() + .consumeWith(document("user", + responseFields((1) + fieldWithPath("contact.email").description("The user's email address"), (2) + fieldWithPath("contact.name").description("The user's name")))); (3) +``` + +|**1**|Configure Spring REST docs to produce a snippet describing the fields in the response payload.
To document a request, you can use `requestFields`.
Both are static methods on `org.springframework.restdocs.payload.PayloadDocumentation`.| +|-----|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| Expect a field with the path `contact.email`.
Uses the static `fieldWithPath` method on `org.springframework.restdocs.payload.PayloadDocumentation`. | +|**3**| Expect a field with the path `contact.name`. | + +REST Assured + +``` +RestAssured.given(this.spec).accept("application/json").filter(document("user", responseFields((1) + fieldWithPath("contact.name").description("The user's name"), (2) + fieldWithPath("contact.email").description("The user's email address")))) (3) + .when().get("/user/5").then().assertThat().statusCode(is(200)); +``` + +|**1**|Configure Spring REST docs to produce a snippet describing the fields in the response payload.
To document a request, you can use `requestFields`.
Both are static methods on `org.springframework.restdocs.payload.PayloadDocumentation`.| +|-----|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| Expect a field with the path `contact.email`.
Uses the static `fieldWithPath` method on `org.springframework.restdocs.payload.PayloadDocumentation`. | +|**3**| Expect a field with the path `contact.name`. | + +The result is a snippet that contains a table describing the fields. +For requests, this snippet is named `request-fields.adoc`. +For responses, this snippet is named `response-fields.adoc`. + +When documenting fields, the test fails if an undocumented field is found in the payload. +Similarly, the test also fails if a documented field is not found in the payload and the field has not been marked as optional. + +If you do not want to provide detailed documentation for all of the fields, an entire subsection of a payload can be documented. +The following examples show how to do so: + +MockMvc + +``` +this.mockMvc.perform(get("/user/5").accept(MediaType.APPLICATION_JSON)).andExpect(status().isOk()) + .andDo(document("index", responseFields((1) + subsectionWithPath("contact").description("The user's contact details")))); (1) +``` + +|**1**|Document the subsection with the path `contact`.`contact.email` and `contact.name` are now seen as having also been documented.
Uses the static `subsectionWithPath` method on `org.springframework.restdocs.payload.PayloadDocumentation`.| +|-----|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +WebTestClient + +``` +this.webTestClient.get().uri("user/5").accept(MediaType.APPLICATION_JSON) + .exchange().expectStatus().isOk().expectBody() + .consumeWith(document("user", + responseFields( + subsectionWithPath("contact").description("The user's contact details")))); (1) +``` + +|**1**|Document the subsection with the path `contact`.`contact.email` and `contact.name` are now seen as having also been documented.
Uses the static `subsectionWithPath` method on `org.springframework.restdocs.payload.PayloadDocumentation`.| +|-----|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +REST Assured + +``` +RestAssured.given(this.spec).accept("application/json") + .filter(document("user", + responseFields(subsectionWithPath("contact").description("The user's contact details")))) (1) + .when().get("/user/5").then().assertThat().statusCode(is(200)); +``` + +|**1**|Document the subsection with the path `contact`.`contact.email` and `contact.name` are now seen as having also been documented.
Uses the static `subsectionWithPath` method on `org.springframework.restdocs.payload.PayloadDocumentation`.| +|-----|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +`subsectionWithPath` can be useful for providing a high-level overview of a particular section of a payload. +You can then produce separate, more detailed documentation for a subsection. +See [Documenting a Subsection of a Request or Response Payload](#documenting-your-api-request-response-payloads-subsections). + +If you do not want to document a field or subsection at all, you can mark it as ignored. +This prevents it from appearing in the generated snippet while avoiding the failure described earlier. + +You can also document fields in a relaxed mode, where any undocumented fields do not cause a test failure. +To do so, use the `relaxedRequestFields` and `relaxedResponseFields` methods on `org.springframework.restdocs.payload.PayloadDocumentation`. +This can be useful when documenting a particular scenario where you want to focus only on a subset of the payload. + +| |By default, Spring REST Docs assumes that the payload you are documenting is JSON.
If you want to document an XML payload, the content type of the request or response must be compatible with `application/xml`.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### [Fields in JSON Payloads](#documenting-your-api-request-response-payloads-fields-json) + +This section describes how to work with fields in JSON payloads. + +###### [JSON Field Paths](#documenting-your-api-request-response-payloads-fields-json-field-paths) + +JSON field paths use either dot notation or bracket notation. +Dot notation uses '.' to separate each key in the path (for example, `a.b`). +Bracket notation wraps each key in square brackets and single quotation marks (for example, `['a']['b']`). +In either case, `[]` is used to identify an array. +Dot notation is more concise, but using bracket notation enables the use of `.` within a key name (for example, `['a.b']`). +The two different notations can be used in the same path (for example, `a['b']`). + +Consider the following JSON payload: + +``` +{ + "a":{ + "b":[ + { + "c":"one" + }, + { + "c":"two" + }, + { + "d":"three" + } + ], + "e.dot" : "four" + } +} +``` + +In the preceding JSON payload, the following paths are all present: + +| Path | Value | +|----------------|-----------------------------------------------| +| `a` | An object containing `b` | +| `a.b` | An array containing three objects | +| `['a']['b']` | An array containing three objects | +| `a['b']` | An array containing three objects | +| `['a'].b` | An array containing three objects | +| `a.b[]` | An array containing three objects | +| `a.b[].c` |An array containing the strings `one` and `two`| +| `a.b[].d` | The string `three` | +| `a['e.dot']` | The string `four` | +|`['a']['e.dot']`| The string `four` | + +You can also document a payload that uses an array at its root. +The path `[]` refers to the entire array. +You can then use bracket or dot notation to identify fields within the array’s entries. +For example, `[].id` corresponds to the `id` field of every object found in the following array: + +``` +[ + { + "id":1 + }, + { + "id":2 + } +] +``` + +You can use `*` as a wildcard to match fields with different names. +For example, `users.*.role` could be used to document the role of every user in the following JSON: + +``` +{ + "users":{ + "ab12cd34":{ + "role": "Administrator" + }, + "12ab34cd":{ + "role": "Guest" + } + } +} +``` + +###### [JSON Field Types](#documenting-your-api-request-response-payloads-fields-json-field-types) + +When a field is documented, Spring REST Docs tries to determine its type by examining the payload. +Seven different types are supported: + +| Type | Description | +|---------|---------------------------------------------------------------------------------| +| `array` | The value of each occurrence of the field is an array. | +|`boolean`| The value of each occurrence of the field is a boolean (`true` or `false`). | +|`object` | The value of each occurrence of the field is an object. | +|`number` | The value of each occurrence of the field is a number. | +| `null` | The value of each occurrence of the field is `null`. | +|`string` | The value of each occurrence of the field is a string. | +|`varies` |The field occurs multiple times in the payload with a variety of different types.| + +You can also explicitly set the type by using the `type(Object)` method on `FieldDescriptor`. +The result of the `toString` method of the supplied `Object` is used in the documentation. +Typically, one of the values enumerated by `JsonFieldType` is used. +The following examples show how to do so: + +MockMvc + +``` +.andDo(document("index", responseFields(fieldWithPath("contact.email").type(JsonFieldType.STRING) (1) + .description("The user's email address")))); +``` + +|**1**|Set the field’s type to `String`.| +|-----|---------------------------------| + +WebTestClient + +``` +.consumeWith(document("user", + responseFields( + fieldWithPath("contact.email") + .type(JsonFieldType.STRING) (1) + .description("The user's email address")))); +``` + +|**1**|Set the field’s type to `String`.| +|-----|---------------------------------| + +REST Assured + +``` +.filter(document("user", responseFields(fieldWithPath("contact.email").type(JsonFieldType.STRING) (1) + .description("The user's email address")))) +``` + +|**1**|Set the field’s type to `String`.| +|-----|---------------------------------| + +##### [XML payloads](#documenting-your-api-request-response-payloads-fields-xml) + +This section describes how to work with XML payloads. + +###### [XML Field Paths](#documenting-your-api-request-response-payloads-fields-xml-field-paths) + +XML field paths are described using XPath.`/` is used to descend into a child node. + +###### [XML Field Types](#documenting-your-api-request-response-payloads-fields-xml-field-types) + +When documenting an XML payload, you must provide a type for the field by using the `type(Object)` method on `FieldDescriptor`. +The result of the supplied type’s `toString` method is used in the documentation. + +##### [Reusing Field Descriptors](#documenting-your-api-request-response-payloads-fields-reusing-field-descriptors) + +In addition to the general support for [reusing snippets](#documenting-your-api-reusing-snippets), the request and response snippets let additional descriptors be configured with a path prefix. +This lets the descriptors for a repeated portion of a request or response payload be created once and then reused. + +Consider an endpoint that returns a book: + +``` +{ + "title": "Pride and Prejudice", + "author": "Jane Austen" +} +``` + +The paths for `title` and `author` are `title` and `author`, respectively. + +Now consider an endpoint that returns an array of books: + +``` +[{ + "title": "Pride and Prejudice", + "author": "Jane Austen" +}, +{ + "title": "To Kill a Mockingbird", + "author": "Harper Lee" +}] +``` + +The paths for `title` and `author` are `[].title` and `[].author`, respectively. +The only difference between the single book and the array of books is that the fields' paths now have a `[].` prefix. + +You can create the descriptors that document a book as follows: + +``` +FieldDescriptor[] book = new FieldDescriptor[] { fieldWithPath("title").description("Title of the book"), + fieldWithPath("author").description("Author of the book") }; +``` + +You can then use them to document a single book, as follows: + +MockMvc + +``` +this.mockMvc.perform(get("/books/1").accept(MediaType.APPLICATION_JSON)).andExpect(status().isOk()) + .andDo(document("book", responseFields(book))); (1) +``` + +|**1**|Document `title` and `author` by using existing descriptors| +|-----|-----------------------------------------------------------| + +WebTestClient + +``` +this.webTestClient.get().uri("/books/1").accept(MediaType.APPLICATION_JSON) + .exchange().expectStatus().isOk().expectBody() + .consumeWith(document("book", + responseFields(book))); (1) +``` + +|**1**|Document `title` and `author` by using existing descriptors| +|-----|-----------------------------------------------------------| + +REST Assured + +``` +RestAssured.given(this.spec).accept("application/json").filter(document("book", responseFields(book))) (1) + .when().get("/books/1").then().assertThat().statusCode(is(200)); +``` + +|**1**|Document `title` and `author` by using existing descriptors| +|-----|-----------------------------------------------------------| + +You can also use the descriptors to document an array of books, as follows: + +MockMvc + +``` +this.mockMvc.perform(get("/books").accept(MediaType.APPLICATION_JSON)).andExpect(status().isOk()) + .andDo(document("book", responseFields(fieldWithPath("[]").description("An array of books")) (1) + .andWithPrefix("[].", book))); (2) +``` + +|**1**| Document the array. | +|-----|-----------------------------------------------------------------------------------------| +|**2**|Document `[].title` and `[].author` by using the existing descriptors prefixed with `[].`| + +WebTestClient + +``` +this.webTestClient.get().uri("/books").accept(MediaType.APPLICATION_JSON) + .exchange().expectStatus().isOk().expectBody() + .consumeWith(document("books", + responseFields( + fieldWithPath("[]") + .description("An array of books")) (1) + .andWithPrefix("[].", book))); (2) +``` + +|**1**| Document the array. | +|-----|-----------------------------------------------------------------------------------------| +|**2**|Document `[].title` and `[].author` by using the existing descriptors prefixed with `[].`| + +REST Assured + +``` +RestAssured.given(this.spec).accept("application/json") + .filter(document("books", responseFields(fieldWithPath("[]").description("An array of books")) (1) + .andWithPrefix("[].", book))) (2) + .when().get("/books").then().assertThat().statusCode(is(200)); +``` + +|**1**| Document the array. | +|-----|-----------------------------------------------------------------------------------------| +|**2**|Document `[].title` and `[].author` by using the existing descriptors prefixed with `[].`| + +#### [Documenting a Subsection of a Request or Response Payload](#documenting-your-api-request-response-payloads-subsections) + +If a payload is large or structurally complex, it can be useful to document individual sections of the payload. +REST Docs let you do so by extracting a subsection of the payload and then documenting it. + +##### [Documenting a Subsection of a Request or Response Body](#documenting-your-api-request-response-payloads-subsections-body) + +Consider the following JSON response body: + +``` +{ + "weather": { + "wind": { + "speed": 15.3, + "direction": 287.0 + }, + "temperature": { + "high": 21.2, + "low": 14.8 + } + } +} +``` + +You can produce a snippet that documents the `temperature` object as follows: + +MockMvc + +``` +this.mockMvc.perform(get("/locations/1").accept(MediaType.APPLICATION_JSON)).andExpect(status().isOk()) + .andDo(document("location", responseBody(beneathPath("weather.temperature")))); (1) +``` + +|**1**|Produce a snippet containing a subsection of the response body.
Uses the static `responseBody` and `beneathPath` methods on `org.springframework.restdocs.payload.PayloadDocumentation`.
To produce a snippet for the request body, you can use `requestBody` in place of `responseBody`.| +|-----|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +WebTestClient + +``` +this.webTestClient.get().uri("/locations/1").accept(MediaType.APPLICATION_JSON) + .exchange().expectStatus().isOk().expectBody() + .consumeWith(document("temperature", + responseBody(beneathPath("weather.temperature")))); (1) +``` + +|**1**|Produce a snippet containing a subsection of the response body.
Uses the static `responseBody` and `beneathPath` methods on `org.springframework.restdocs.payload.PayloadDocumentation`.
To produce a snippet for the request body, you can use `requestBody` in place of `responseBody`.| +|-----|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +REST Assured + +``` +RestAssured.given(this.spec).accept("application/json") + .filter(document("location", responseBody(beneathPath("weather.temperature")))) (1) + .when().get("/locations/1").then().assertThat().statusCode(is(200)); +``` + +|**1**|Produce a snippet containing a subsection of the response body.
Uses the static `responseBody` and `beneathPath` methods on `org.springframework.restdocs.payload.PayloadDocumentation`.
To produce a snippet for the request body, you can use `requestBody` in place of `responseBody`.| +|-----|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The result is a snippet with the following contents: + +``` +{ + "temperature": { + "high": 21.2, + "low": 14.8 + } +} +``` + +To make the snippet’s name distinct, an identifier for the subsection is included. +By default, this identifier is `beneath-${path}`. +For example, the preceding code results in a snippet named `response-body-beneath-weather.temperature.adoc`. +You can customize the identifier by using the `withSubsectionId(String)` method, as follows: + +``` +responseBody(beneathPath("weather.temperature").withSubsectionId("temp")); +``` + +The result is a snippet named `request-body-temp.adoc`. + +##### [Documenting the Fields of a Subsection of a Request or Response](#documenting-your-api-request-response-payloads-subsections-fields) + +As well as documenting a subsection of a request or response body, you can also document the fields in a particular subsection. +You can produce a snippet that documents the fields of the `temperature` object (`high` and `low`) as follows: + +MockMvc + +``` +this.mockMvc.perform(get("/locations/1").accept(MediaType.APPLICATION_JSON)).andExpect(status().isOk()) + .andDo(document("location", responseFields(beneathPath("weather.temperature"), (1) + fieldWithPath("high").description("The forecast high in degrees celcius"), (2) + fieldWithPath("low").description("The forecast low in degrees celcius")))); +``` + +|**1**|Produce a snippet describing the fields in the subsection of the response payload beneath the path `weather.temperature`.
Uses the static `beneathPath` method on `org.springframework.restdocs.payload.PayloadDocumentation`.| +|-----|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| Document the `high` and `low` fields. | + +WebTestClient + +``` +this.webTestClient.get().uri("/locations/1").accept(MediaType.APPLICATION_JSON) + .exchange().expectStatus().isOk().expectBody() + .consumeWith(document("temperature", + responseFields(beneathPath("weather.temperature"), (1) + fieldWithPath("high").description("The forecast high in degrees celcius"), (2) + fieldWithPath("low").description("The forecast low in degrees celcius")))); +``` + +|**1**|Produce a snippet describing the fields in the subsection of the response payload beneath the path `weather.temperature`.
Uses the static `beneathPath` method on `org.springframework.restdocs.payload.PayloadDocumentation`.| +|-----|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| Document the `high` and `low` fields. | + +REST Assured + +``` +RestAssured.given(this.spec).accept("application/json") + .filter(document("location", responseFields(beneathPath("weather.temperature"), (1) + fieldWithPath("high").description("The forecast high in degrees celcius"), (2) + fieldWithPath("low").description("The forecast low in degrees celcius")))) + .when().get("/locations/1").then().assertThat().statusCode(is(200)); +``` + +|**1**|Produce a snippet describing the fields in the subsection of the response payload beneath the path `weather.temperature`.
Uses the static `beneathPath` method on `org.springframework.restdocs.payload.PayloadDocumentation`.| +|-----|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| Document the `high` and `low` fields. | + +The result is a snippet that contains a table describing the `high` and `low` fields of `weather.temperature`. +To make the snippet’s name distinct, an identifier for the subsection is included. +By default, this identifier is `beneath-${path}`. +For example, the preceding code results in a snippet named `response-fields-beneath-weather.temperature.adoc`. + +### [Request Parameters](#documenting-your-api-request-parameters) + +You can document a request’s parameters by using `requestParameters`. +You can include request parameters in a `GET` request’s query string. +The following examples show how to do so: + +MockMvc + +``` +this.mockMvc.perform(get("/users?page=2&per_page=100")) (1) + .andExpect(status().isOk()).andDo(document("users", requestParameters((2) + parameterWithName("page").description("The page to retrieve"), (3) + parameterWithName("per_page").description("Entries per page") (4) + ))); +``` + +|**1**| Perform a `GET` request with two parameters, `page` and `per_page`, in the query string. | +|-----|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**|Configure Spring REST Docs to produce a snippet describing the request’s parameters.
Uses the static `requestParameters` method on `org.springframework.restdocs.request.RequestDocumentation`.| +|**3**| Document the `page` parameter.
Uses the static `parameterWithName` method on `org.springframework.restdocs.request.RequestDocumentation`. | +|**4**| Document the `per_page` parameter. | + +WebTestClient + +``` +this.webTestClient.get().uri("/users?page=2&per_page=100") (1) + .exchange().expectStatus().isOk().expectBody() + .consumeWith(document("users", requestParameters((2) + parameterWithName("page").description("The page to retrieve"), (3) + parameterWithName("per_page").description("Entries per page") (4) + ))); +``` + +|**1**| Perform a `GET` request with two parameters, `page` and `per_page`, in the query string. | +|-----|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**|Configure Spring REST Docs to produce a snippet describing the request’s parameters.
Uses the static `requestParameters` method on `org.springframework.restdocs.request.RequestDocumentation`.| +|**3**| Document the `page` parameter.
Uses the static `parameterWithName` method on `org.springframework.restdocs.request.RequestDocumentation`. | +|**4**| Document the `per_page` parameter. | + +REST Assured + +``` +RestAssured.given(this.spec).filter(document("users", requestParameters((1) + parameterWithName("page").description("The page to retrieve"), (2) + parameterWithName("per_page").description("Entries per page")))) (3) + .when().get("/users?page=2&per_page=100") (4) + .then().assertThat().statusCode(is(200)); +``` + +|**1**|Configure Spring REST Docs to produce a snippet describing the request’s parameters.
Uses the static `requestParameters` method on `org.springframework.restdocs.request.RequestDocumentation`.| +|-----|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| Document the `page` parameter.
Uses the static `parameterWithName` method on `org.springframework.restdocs.request.RequestDocumentation`. | +|**3**| Document the `per_page` parameter. | +|**4**| Perform a `GET` request with two parameters, `page` and `per_page`, in the query string. | + +You can also include request parameters as form data in the body of a POST request. +The following examples show how to do so: + +MockMvc + +``` +this.mockMvc.perform(post("/users").param("username", "Tester")) (1) + .andExpect(status().isCreated()).andDo(document("create-user", + requestParameters(parameterWithName("username").description("The user's username")))); +``` + +|**1**|Perform a `POST` request with a single parameter, `username`.| +|-----|-------------------------------------------------------------| + +WebTestClient + +``` +MultiValueMap formData = new LinkedMultiValueMap<>(); +formData.add("username", "Tester"); +this.webTestClient.post().uri("/users").body(BodyInserters.fromFormData(formData)) (1) + .exchange().expectStatus().isCreated().expectBody() + .consumeWith(document("create-user", requestParameters( + parameterWithName("username").description("The user's username") +))); +``` + +|**1**|Perform a `POST` request with a single parameter, `username`.| +|-----|-------------------------------------------------------------| + +REST Assured + +``` +RestAssured.given(this.spec) + .filter(document("create-user", + requestParameters(parameterWithName("username").description("The user's username")))) + .formParam("username", "Tester") (1) + .when().post("/users") (2) + .then().assertThat().statusCode(is(200)); +``` + +|**1**|Configure the `username` parameter.| +|-----|-----------------------------------| +|**2**| Perform the `POST` request. | + +In all cases, the result is a snippet named `request-parameters.adoc` that contains a table describing the parameters that are supported by the resource. + +When documenting request parameters, the test fails if an undocumented request parameter is used in the request. +Similarly, the test also fails if a documented request parameter is not found in the request and the request parameter has not been marked as optional. + +If you do not want to document a request parameter, you can mark it as ignored. +This prevents it from appearing in the generated snippet while avoiding the failure described above. + +You can also document request parameters in a relaxed mode where any undocumented parameters do not cause a test failure. +To do so, use the `relaxedRequestParameters` method on `org.springframework.restdocs.request.RequestDocumentation`. +This can be useful when documenting a particular scenario where you only want to focus on a subset of the request parameters. + +### [Path Parameters](#documenting-your-api-path-parameters) + +You can document a request’s path parameters by using `pathParameters`. +The following examples show how to do so: + +MockMvc + +``` +this.mockMvc.perform(get("/locations/{latitude}/{longitude}", 51.5072, 0.1275)) (1) + .andExpect(status().isOk()).andDo(document("locations", pathParameters((2) + parameterWithName("latitude").description("The location's latitude"), (3) + parameterWithName("longitude").description("The location's longitude") (4) + ))); +``` + +|**1**| Perform a `GET` request with two path parameters, `latitude` and `longitude`. | +|-----|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**|Configure Spring REST Docs to produce a snippet describing the request’s path parameters.
Uses the static `pathParameters` method on `org.springframework.restdocs.request.RequestDocumentation`.| +|**3**| Document the parameter named `latitude`.
Uses the static `parameterWithName` method on `org.springframework.restdocs.request.RequestDocumentation`. | +|**4**| Document the parameter named `longitude`. | + +WebTestClient + +``` +this.webTestClient.get().uri("/locations/{latitude}/{longitude}", 51.5072, 0.1275) (1) + .exchange().expectStatus().isOk().expectBody() + .consumeWith(document("locations", + pathParameters((2) + parameterWithName("latitude").description("The location's latitude"), (3) + parameterWithName("longitude").description("The location's longitude")))); (4) +``` + +|**1**| Perform a `GET` request with two path parameters, `latitude` and `longitude`. | +|-----|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**|Configure Spring REST Docs to produce a snippet describing the request’s path parameters.
Uses the static `pathParameters` method on `org.springframework.restdocs.request.RequestDocumentation`.| +|**3**| Document the parameter named `latitude`.
Uses the static `parameterWithName` method on `org.springframework.restdocs.request.RequestDocumentation`. | +|**4**| Document the parameter named `longitude`. | + +REST Assured + +``` +RestAssured.given(this.spec).filter(document("locations", pathParameters((1) + parameterWithName("latitude").description("The location's latitude"), (2) + parameterWithName("longitude").description("The location's longitude")))) (3) + .when().get("/locations/{latitude}/{longitude}", 51.5072, 0.1275) (4) + .then().assertThat().statusCode(is(200)); +``` + +|**1**|Configure Spring REST Docs to produce a snippet describing the request’s path parameters.
Uses the static `pathParameters` method on `org.springframework.restdocs.request.RequestDocumentation`.| +|-----|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| Document the parameter named `latitude`.
Uses the static `parameterWithName` method on `org.springframework.restdocs.request.RequestDocumentation`. | +|**3**| Document the parameter named `longitude`. | +|**4**| Perform a `GET` request with two path parameters, `latitude` and `longitude`. | + +The result is a snippet named `path-parameters.adoc` that contains a table describing the path parameters that are supported by the resource. + +| |If you use MockMvc, to make the path parameters available for documentation, you must build the request by using one of the methods on `RestDocumentationRequestBuilders` rather than `MockMvcRequestBuilders`.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +When documenting path parameters, the test fails if an undocumented path parameter is used in the request. +Similarly, the test also fails if a documented path parameter is not found in the request and the path parameter has not been marked as optional. + +You can also document path parameters in a relaxed mode, where any undocumented parameters do not cause a test failure. +To do so, use the `relaxedPathParameters` method on `org.springframework.restdocs.request.RequestDocumentation`. +This can be useful when documenting a particular scenario where you only want to focus on a subset of the path parameters. + +If you do not want to document a path parameter, you can mark it as ignored. +Doing so prevents it from appearing in the generated snippet while avoiding the failure described earlier. + +### [Request Parts](#documenting-your-api-request-parts) + +You can use `requestParts` to document the parts of a multipart request. +The following example shows how to do so: + +MockMvc + +``` +this.mockMvc.perform(multipart("/upload").file("file", "example".getBytes())) (1) + .andExpect(status().isOk()).andDo(document("upload", requestParts((2) + partWithName("file").description("The file to upload")) (3) + )); +``` + +|**1**| Perform a `POST` request with a single part named `file`. | +|-----|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**|Configure Spring REST Docs to produce a snippet describing the request’s parts.
Uses the static `requestParts` method on `org.springframework.restdocs.request.RequestDocumentation`.| +|**3**| Document the part named `file`.
Uses the static `partWithName` method on `org.springframework.restdocs.request.RequestDocumentation`. | + +WebTestClient + +``` +MultiValueMap multipartData = new LinkedMultiValueMap<>(); +multipartData.add("file", "example".getBytes()); +this.webTestClient.post().uri("/upload").body(BodyInserters.fromMultipartData(multipartData)) (1) + .exchange().expectStatus().isOk().expectBody() + .consumeWith(document("upload", requestParts((2) + partWithName("file").description("The file to upload")) (3) +)); +``` + +|**1**| Perform a `POST` request with a single part named `file`. | +|-----|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**|Configure Spring REST Docs to produce a snippet describing the request’s parts.
Uses the static `requestParts` method on `org.springframework.restdocs.request.RequestDocumentation`.| +|**3**| Document the part named `file`.
Uses the static `partWithName` method on `org.springframework.restdocs.request.RequestDocumentation`. | + +REST Assured + +``` +RestAssured.given(this.spec).filter(document("users", requestParts((1) + partWithName("file").description("The file to upload")))) (2) + .multiPart("file", "example") (3) + .when().post("/upload") (4) + .then().statusCode(is(200)); +``` + +|**1**|Configure Spring REST Docs to produce a snippet describing the request’s parts.
Uses the static `requestParts` method on `org.springframework.restdocs.request.RequestDocumentation`.| +|-----|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| Document the part named `file`.
Uses the static `partWithName` method on `org.springframework.restdocs.request.RequestDocumentation`. | +|**3**| Configure the request with the part named `file`. | +|**4**| Perform the `POST` request to `/upload`. | + +The result is a snippet named `request-parts.adoc` that contains a table describing the request parts that are supported by the resource. + +When documenting request parts, the test fails if an undocumented part is used in the request. +Similarly, the test also fails if a documented part is not found in the request and the part has not been marked as optional. + +You can also document request parts in a relaxed mode where any undocumented parts do not cause a test failure. +To do so, use the `relaxedRequestParts` method on `org.springframework.restdocs.request.RequestDocumentation`. +This can be useful when documenting a particular scenario where you only want to focus on a subset of the request parts. + +If you do not want to document a request part, you can mark it as ignored. +This prevents it from appearing in the generated snippet while avoiding the failure described earlier. + +### [Request Part Payloads](#documenting-your-api-request-parts-payloads) + +You can document the payload of a request part in much the same way as the [payload of a request](#documenting-your-api-request-response-payloads), with support for documenting a request part’s body and its fields. + +#### [Documenting a Request Part’s Body](#documenting-your-api-request-parts-payloads-body) + +You can generate a snippet containing the body of a request part as follows: + +MockMvc + +``` +MockMultipartFile image = new MockMultipartFile("image", "image.png", "image/png", "<>".getBytes()); +MockMultipartFile metadata = new MockMultipartFile("metadata", "", "application/json", + "{ \"version\": \"1.0\"}".getBytes()); + +this.mockMvc.perform(multipart("/images").file(image).file(metadata).accept(MediaType.APPLICATION_JSON)) + .andExpect(status().isOk()).andDo(document("image-upload", requestPartBody("metadata"))); (1) +``` + +|**1**|Configure Spring REST docs to produce a snippet containing the body of the request part named `metadata`.
Uses the static `requestPartBody` method on `PayloadDocumentation`.| +|-----|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +WebTestClient + +``` +MultiValueMap multipartData = new LinkedMultiValueMap<>(); +Resource imageResource = new ByteArrayResource("<>".getBytes()) { + + @Override + public String getFilename() { + return "image.png"; + } + +}; +multipartData.add("image", imageResource); +multipartData.add("metadata", Collections.singletonMap("version", "1.0")); + +this.webTestClient.post().uri("/images").body(BodyInserters.fromMultipartData(multipartData)) + .accept(MediaType.APPLICATION_JSON).exchange() + .expectStatus().isOk().expectBody() + .consumeWith(document("image-upload", + requestPartBody("metadata"))); (1) +``` + +|**1**|Configure Spring REST docs to produce a snippet containing the body of the request part named `metadata`.
Uses the static `requestPartBody` method on `PayloadDocumentation`.| +|-----|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +REST Assured + +``` +Map metadata = new HashMap<>(); +metadata.put("version", "1.0"); +RestAssured.given(this.spec).accept("application/json") + .filter(document("image-upload", requestPartBody("metadata"))) (1) + .when().multiPart("image", new File("image.png"), "image/png").multiPart("metadata", metadata) + .post("images").then().assertThat().statusCode(is(200)); +``` + +|**1**|Configure Spring REST docs to produce a snippet containing the body of the request part named `metadata`.
Uses the static `requestPartBody` method on `PayloadDocumentation`.| +|-----|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The result is a snippet named `request-part-${part-name}-body.adoc` that contains the part’s body. +For example, documenting a part named `metadata` produces a snippet named `request-part-metadata-body.adoc`. + +#### [Documenting a Request Part’s Fields](#documenting-your-api-request-parts-payloads-fields) + +You can document a request part’s fields in much the same way as the fields of a request or response, as follows: + +MockMvc + +``` +MockMultipartFile image = new MockMultipartFile("image", "image.png", "image/png", "<>".getBytes()); +MockMultipartFile metadata = new MockMultipartFile("metadata", "", "application/json", + "{ \"version\": \"1.0\"}".getBytes()); + +this.mockMvc.perform(multipart("/images").file(image).file(metadata).accept(MediaType.APPLICATION_JSON)) + .andExpect(status().isOk()).andDo(document("image-upload", requestPartFields("metadata", (1) + fieldWithPath("version").description("The version of the image")))); (2) +``` + +|**1**|Configure Spring REST docs to produce a snippet describing the fields in the payload of the request part named `metadata`.
Uses the static `requestPartFields` method on `PayloadDocumentation`.| +|-----|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| Expect a field with the path `version`.
Uses the static `fieldWithPath` method on `org.springframework.restdocs.payload.PayloadDocumentation`. | + +WebTestClient + +``` +MultiValueMap multipartData = new LinkedMultiValueMap<>(); +Resource imageResource = new ByteArrayResource("<>".getBytes()) { + + @Override + public String getFilename() { + return "image.png"; + } + +}; +multipartData.add("image", imageResource); +multipartData.add("metadata", Collections.singletonMap("version", "1.0")); +this.webTestClient.post().uri("/images").body(BodyInserters.fromMultipartData(multipartData)) + .accept(MediaType.APPLICATION_JSON).exchange() + .expectStatus().isOk().expectBody() + .consumeWith(document("image-upload", + requestPartFields("metadata", (1) + fieldWithPath("version").description("The version of the image")))); (2) +``` + +|**1**|Configure Spring REST docs to produce a snippet describing the fields in the payload of the request part named `metadata`.
Uses the static `requestPartFields` method on `PayloadDocumentation`.| +|-----|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| Expect a field with the path `version`.
Uses the static `fieldWithPath` method on `org.springframework.restdocs.payload.PayloadDocumentation`. | + +REST Assured + +``` +Map metadata = new HashMap<>(); +metadata.put("version", "1.0"); +RestAssured.given(this.spec).accept("application/json") + .filter(document("image-upload", requestPartFields("metadata", (1) + fieldWithPath("version").description("The version of the image")))) (2) + .when().multiPart("image", new File("image.png"), "image/png").multiPart("metadata", metadata) + .post("images").then().assertThat().statusCode(is(200)); +``` + +|**1**|Configure Spring REST docs to produce a snippet describing the fields in the payload of the request part named `metadata`.
Uses the static `requestPartFields` method on `PayloadDocumentation`.| +|-----|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| Expect a field with the path `version`.
Uses the static `fieldWithPath` method on `org.springframework.restdocs.payload.PayloadDocumentation`. | + +The result is a snippet that contains a table describing the part’s fields. +This snippet is named `request-part-${part-name}-fields.adoc`. +For example, documenting a part named `metadata` produces a snippet named `request-part-metadata-fields.adoc`. + +When documenting fields, the test fails if an undocumented field is found in the payload of the part. +Similarly, the test also fails if a documented field is not found in the payload of the part and the field has not been marked as optional. +For payloads with a hierarchical structure, documenting a field is sufficient for all of its descendants to also be treated as having been documented. + +If you do not want to document a field, you can mark it as ignored. +Doing so prevents it from appearing in the generated snippet while avoiding the failure described above. + +You can also document fields in a relaxed mode, where any undocumented fields do not cause a test failure. +To do so, use the `relaxedRequestPartFields` method on `org.springframework.restdocs.payload.PayloadDocumentation`. +This can be useful when documenting a particular scenario where you only want to focus on a subset of the payload of the part. + +For further information on describing fields, documenting payloads that use XML, and more, see the [section on documenting request and response payloads](#documenting-your-api-request-response-payloads). + +### [HTTP Headers](#documenting-your-api-http-headers) + +You can document the headers in a request or response by using `requestHeaders` and `responseHeaders`, respectively. +The following examples show how to do so: + +MockMvc + +``` +this.mockMvc.perform(get("/people").header("Authorization", "Basic dXNlcjpzZWNyZXQ=")) (1) + .andExpect(status().isOk()).andDo(document("headers", requestHeaders((2) + headerWithName("Authorization").description("Basic auth credentials")), (3) + responseHeaders((4) + headerWithName("X-RateLimit-Limit") + .description("The total number of requests permitted per period"), + headerWithName("X-RateLimit-Remaining") + .description("Remaining requests permitted in current period"), + headerWithName("X-RateLimit-Reset") + .description("Time at which the rate limit period will reset")))); +``` + +|**1**| Perform a `GET` request with an `Authorization` header that uses basic authentication. | +|-----|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**|Configure Spring REST Docs to produce a snippet describing the request’s headers.
Uses the static `requestHeaders` method on `org.springframework.restdocs.headers.HeaderDocumentation`.| +|**3**| Document the `Authorization` header.
Uses the static `headerWithName` method on `org.springframework.restdocs.headers.HeaderDocumentation`. | +|**4**| Produce a snippet describing the response’s headers.
Uses the static `responseHeaders` method on `org.springframework.restdocs.headers.HeaderDocumentation`. | + +WebTestClient + +``` +this.webTestClient + .get().uri("/people").header("Authorization", "Basic dXNlcjpzZWNyZXQ=") (1) + .exchange().expectStatus().isOk().expectBody() + .consumeWith(document("headers", + requestHeaders((2) + headerWithName("Authorization").description("Basic auth credentials")), (3) + responseHeaders((4) + headerWithName("X-RateLimit-Limit") + .description("The total number of requests permitted per period"), + headerWithName("X-RateLimit-Remaining") + .description("Remaining requests permitted in current period"), + headerWithName("X-RateLimit-Reset") + .description("Time at which the rate limit period will reset")))); +``` + +|**1**| Perform a `GET` request with an `Authorization` header that uses basic authentication. | +|-----|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**|Configure Spring REST Docs to produce a snippet describing the request’s headers.
Uses the static `requestHeaders` method on `org.springframework.restdocs.headers.HeaderDocumentation`.| +|**3**| Document the `Authorization` header.
Uses the static `headerWithName` method on `org.springframework.restdocs.headers.HeaderDocumentation`. | +|**4**| Produce a snippet describing the response’s headers.
Uses the static `responseHeaders` method on `org.springframework.restdocs.headers.HeaderDocumentation`. | + +REST Assured + +``` +RestAssured.given(this.spec).filter(document("headers", requestHeaders((1) + headerWithName("Authorization").description("Basic auth credentials")), (2) + responseHeaders((3) + headerWithName("X-RateLimit-Limit") + .description("The total number of requests permitted per period"), + headerWithName("X-RateLimit-Remaining") + .description("Remaining requests permitted in current period"), + headerWithName("X-RateLimit-Reset") + .description("Time at which the rate limit period will reset")))) + .header("Authorization", "Basic dXNlcjpzZWNyZXQ=") (4) + .when().get("/people").then().assertThat().statusCode(is(200)); +``` + +|**1**|Configure Spring REST Docs to produce a snippet describing the request’s headers.
Uses the static `requestHeaders` method on `org.springframework.restdocs.headers.HeaderDocumentation`.| +|-----|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| Document the `Authorization` header.
Uses the static `headerWithName` method on `org.springframework.restdocs.headers.HeaderDocumentation. | +|**3**| Produce a snippet describing the response’s headers.
Uses the static `responseHeaders` method on `org.springframework.restdocs.headers.HeaderDocumentation`. | +|**4**| Configure the request with an `Authorization` header that uses basic authentication. | + +The result is a snippet named `request-headers.adoc` and a snippet named `response-headers.adoc`. +Each contains a table describing the headers. + +When documenting HTTP Headers, the test fails if a documented header is not found in the request or response. + +### [Reusing Snippets](#documenting-your-api-reusing-snippets) + +It is common for an API that is being documented to have some features that are common across several of its resources. +To avoid repetition when documenting such resources, you can reuse a `Snippet` configured with the common elements. + +First, create the `Snippet` that describes the common elements. +The following example shows how to do so: + +``` +protected final LinksSnippet pagingLinks = links( + linkWithRel("first").optional().description("The first page of results"), + linkWithRel("last").optional().description("The last page of results"), + linkWithRel("next").optional().description("The next page of results"), + linkWithRel("prev").optional().description("The previous page of results")); +``` + +Second, use this snippet and add further descriptors that are resource-specific. +The following examples show how to do so: + +MockMvc + +``` +this.mockMvc.perform(get("/").accept(MediaType.APPLICATION_JSON)).andExpect(status().isOk()) + .andDo(document("example", this.pagingLinks.and((1) + linkWithRel("alpha").description("Link to the alpha resource"), + linkWithRel("bravo").description("Link to the bravo resource")))); +``` + +|**1**|Reuse the `pagingLinks` `Snippet`, calling `and` to add descriptors that are specific to the resource that is being documented.| +|-----|-------------------------------------------------------------------------------------------------------------------------------| + +WebTestClient + +``` +this.webTestClient.get().uri("/").accept(MediaType.APPLICATION_JSON).exchange() + .expectStatus().isOk().expectBody() + .consumeWith(document("example", this.pagingLinks.and((1) + linkWithRel("alpha").description("Link to the alpha resource"), + linkWithRel("bravo").description("Link to the bravo resource")))); +``` + +|**1**|Reuse the `pagingLinks` `Snippet`, calling `and` to add descriptors that are specific to the resource that is being documented.| +|-----|-------------------------------------------------------------------------------------------------------------------------------| + +REST Assured + +``` +RestAssured.given(this.spec).accept("application/json").filter(document("example", this.pagingLinks.and((1) + linkWithRel("alpha").description("Link to the alpha resource"), + linkWithRel("bravo").description("Link to the bravo resource")))).get("/").then().assertThat() + .statusCode(is(200)); +``` + +|**1**|Reuse the `pagingLinks` `Snippet`, calling `and` to add descriptors that are specific to the resource that is being documented.| +|-----|-------------------------------------------------------------------------------------------------------------------------------| + +The result of the example is that links with `rel` values of `first`, `last`, `next`, `previous`, `alpha`, and `bravo` are all documented. + +### [Documenting Constraints](#documenting-your-api-constraints) + +Spring REST Docs provides a number of classes that can help you to document constraints. +You can use an instance of `ConstraintDescriptions` to access descriptions of a class’s constraints. +The following example shows how to do so: + +``` +public void example() { + ConstraintDescriptions userConstraints = new ConstraintDescriptions(UserInput.class); (1) + List descriptions = userConstraints.descriptionsForProperty("name"); (2) +} + +static class UserInput { + + @NotNull + @Size(min = 1) + String name; + + @NotNull + @Size(min = 8) + String password; + +} +``` + +|**1**| Create an instance of `ConstraintDescriptions` for the `UserInput` class. | +|-----|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**|Get the descriptions of the `name` property’s constraints.
This list contains two descriptions: one for the `NotNull` constraint and one for the `Size` constraint.| + +The [`ApiDocumentation`](https://github.com/spring-projects/spring-restdocs/tree/v2.0.6.RELEASE/samples/rest-notes-spring-hateoas/src/test/java/com/example/notes/ApiDocumentation.java) class in the Spring HATEOAS sample shows this functionality in action. + +#### [Finding Constraints](#documenting-your-api-constraints-finding) + +By default, constraints are found by using a Bean Validation `Validator`. +Currently, only property constraints are supported. +You can customize the `Validator` that is used by creating `ConstraintDescriptions` with a custom `ValidatorConstraintResolver` instance. +To take complete control of constraint resolution, you can use your own implementation of `ConstraintResolver`. + +#### [Describing Constraints](#documenting-your-api-constraints-describing) + +Default descriptions are provided for all of Bean Validation 2.0’s constraints: + +* `AssertFalse` + +* `AssertTrue` + +* `DecimalMax` + +* `DecimalMin` + +* `Digits` + +* `Email` + +* `Future` + +* `FutureOrPresent` + +* `Max` + +* `Min` + +* `Negative` + +* `NegativeOrZero` + +* `NotBlank` + +* `NotEmpty` + +* `NotNull` + +* `Null` + +* `Past` + +* `PastOrPresent` + +* `Pattern` + +* `Positive` + +* `PositiveOrZero` + +* `Size` + +Default descriptions are also provided for the following constraints from Hibernate +Validator: + +* `CodePointLength` + +* `CreditCardNumber` + +* `Currency` + +* `EAN` + +* `Email` + +* `Length` + +* `LuhnCheck` + +* `Mod10Check` + +* `Mod11Check` + +* `NotBlank` + +* `NotEmpty` + +* `Currency` + +* `Range` + +* `SafeHtml` + +* `URL` + +To override the default descriptions or to provide a new description, you can create a resource bundle with a base name of `org.springframework.restdocs.constraints.ConstraintDescriptions`. +The Spring HATEOAS-based sample contains [an example of such a resource bundle](https://github.com/spring-projects/spring-restdocs/tree/v2.0.6.RELEASE/samples/rest-notes-spring-hateoas/src/test/resources/org/springframework/restdocs/constraints/ConstraintDescriptions.properties). + +Each key in the resource bundle is the fully-qualified name of a constraint plus a `.description`. +For example, the key for the standard `@NotNull` constraint is `javax.validation.constraints.NotNull.description`. + +You can use a property placeholder referring to a constraint’s attributes in its description. +For example, the default description of the `@Min` constraint, `Must be at least ${value}`, refers to the constraint’s `value` attribute. + +To take more control of constraint description resolution, you can create `ConstraintDescriptions` with a custom `ResourceBundleConstraintDescriptionResolver`. +To take complete control, you can create `ConstraintDescriptions` with a custom `ConstraintDescriptionResolver` implementation. + +#### [Using Constraint Descriptions in Generated Snippets](#_using_constraint_descriptions_in_generated_snippets) + +Once you have a constraint’s descriptions, you are free to use them however you like in the generated snippets. +For example, you may want to include the constraint descriptions as part of a field’s description. +Alternatively, you could include the constraints as [extra information](#documenting-your-api-customizing-including-extra-information) in the request fields snippet. +The [`ApiDocumentation`](https://github.com/spring-projects/spring-restdocs/tree/v2.0.6.RELEASE/samples/rest-notes-spring-hateoas/src/test/java/com/example/notes/ApiDocumentation.java) class in the Spring HATEOAS-based sample illustrates the latter approach. + +### [Default Snippets](#documenting-your-api-default-snippets) + +A number of snippets are produced automatically when you document a request and response. + +| Snippet | Description | +|---------------------|---------------------------------------------------------------------------------------------------------------------| +| `curl-request.adoc` |Contains the [`curl`](https://curl.haxx.se) command that is equivalent to the `MockMvc`call that is being documented.| +|`httpie-request.adoc`|Contains the [`HTTPie`](https://httpie.org) command that is equivalent to the `MockMvc`call that is being documented.| +| `http-request.adoc` | Contains the HTTP request that is equivalent to the `MockMvc` call that is being documented. | +|`http-response.adoc` | Contains the HTTP response that was returned. | +| `request-body.adoc` | Contains the body of the request that was sent. | +|`response-body.adoc` | Contains the body of the response that was returned. | + +You can configure which snippets are produced by default. +See the [configuration section](#configuration) for more information. + +### [Using Parameterized Output Directories](#documentating-your-api-parameterized-output-directories) + +When using MockMvc, REST Assured, or `WebTestClient` you can parameterize the output directory used by `document`. +Parameterizing output with `WebTestClient` requires Spring Framework 5.3.5 or later. + +The following parameters are supported: + +| Parameter | Description | +|--------------|---------------------------------------------------------------| +| {methodName} | The unmodified name of the test method. | +|{method-name} | The name of the test method, formatted using kebab-case. | +|{method\_name}| The name of the test method, formatted using snake\_case. | +| {ClassName} | The unmodified simple name of the test class. | +| {class-name} |The simple name of the test class, formatted using kebab-case. | +|{class\_name} |The simple name of the test class, formatted using snake\_case.| +| {step} | The count of calls made to the service in the current test. | + +For example, `document("{class-name}/{method-name}")` in a test method named `creatingANote` on the test class `GettingStartedDocumentation` writes snippets into a directory named `getting-started-documentation/creating-a-note`. + +A parameterized output directory is particularly useful in combination with a `@Before` method. +It lets documentation be configured once in a setup method and then reused in every test in the class. +The following examples show how to do so: + +MockMvc + +``` +@Before +public void setUp() { + this.mockMvc = MockMvcBuilders.webAppContextSetup(this.context) + .apply(documentationConfiguration(this.restDocumentation)).alwaysDo(document("{method-name}/{step}/")) + .build(); +} +``` + +REST Assured + +``` +@Before +public void setUp() { + this.spec = new RequestSpecBuilder().addFilter(documentationConfiguration(this.restDocumentation)) + .addFilter(document("{method-name}/{step}")).build(); +} +``` + +WebTestClient + +``` +@Before +public void setUp() { + this.webTestClient = WebTestClient.bindToApplicationContext(this.context).configureClient() + .filter(documentationConfiguration(this.restDocumentation)) + .entityExchangeResultConsumer(document("{method-name}/{step}")).build(); +} +``` + +With this configuration in place, every call to the service you are testing produces the [default snippets](#documenting-your-api-default-snippets) without any further configuration. +Take a look at the `GettingStartedDocumentation` classes in each of the sample applications to see this functionality in action. + +### [Customizing the Output](#documenting-your-api-customizing) + +This section describes how to customize the output of Spring REST Docs. + +#### [Customizing the Generated Snippets](#documenting-your-api-customizing-snippets) + +Spring REST Docs uses [Mustache](https://mustache.github.io) templates to produce the generated snippets.[Default templates](https://github.com/spring-projects/spring-restdocs/tree/v2.0.6.RELEASE/spring-restdocs-core/src/main/resources/org/springframework/restdocs/templates) are provided for each of the snippets that Spring REST Docs can produce. +To customize a snippet’s content, you can provide your own template. + +Templates are loaded from the classpath from an `org.springframework.restdocs.templates` subpackage. +The name of the subpackage is determined by the ID of the template format that is in use. +The default template format, Asciidoctor, has an ID of `asciidoctor`, so snippets are loaded from `org.springframework.restdocs.templates.asciidoctor`. +Each template is named after the snippet that it produces. +For example, to override the template for the `curl-request.adoc` snippet, create a template named `curl-request.snippet` in `src/test/resources/org/springframework/restdocs/templates/asciidoctor`. + +#### [Including Extra Information](#documenting-your-api-customizing-including-extra-information) + +There are two ways to provide extra information for inclusion in a generated snippet: + +* Use the `attributes` method on a descriptor to add one or more attributes to it. + +* Pass in some attributes when calling `curlRequest`, `httpRequest`, `httpResponse`, and so on. + Such attributes are associated with the snippet as a whole. + +Any additional attributes are made available during the template rendering process. +Coupled with a custom snippet template, this makes it possible to include extra information in a generated snippet. + +A concrete example is the addition of a constraints column and a title when documenting request fields. +The first step is to provide a `constraints` attribute for each field that you document and to provide a `title` attribute. +The following examples show how to do so: + +MockMvc + +``` +.andDo(document("create-user", requestFields(attributes(key("title").value("Fields for user creation")), (1) + fieldWithPath("name").description("The user's name") + .attributes(key("constraints").value("Must not be null. Must not be empty")), (2) + fieldWithPath("email").description("The user's email address") + .attributes(key("constraints").value("Must be a valid email address"))))); (3) +``` + +|**1**|Configure the `title` attribute for the request fields snippet.| +|-----|---------------------------------------------------------------| +|**2**| Set the `constraints` attribute for the `name` field. | +|**3**| Set the `constraints` attribute for the `email` field. | + +WebTestClient + +``` +.consumeWith(document("create-user", + requestFields( + attributes(key("title").value("Fields for user creation")), (1) + fieldWithPath("name") + .description("The user's name") + .attributes(key("constraints").value("Must not be null. Must not be empty")), (2) + fieldWithPath("email") + .description("The user's email address") + .attributes(key("constraints").value("Must be a valid email address"))))); (3) +``` + +|**1**|Configure the `title` attribute for the request fields snippet.| +|-----|---------------------------------------------------------------| +|**2**| Set the `constraints` attribute for the `name` field. | +|**3**| Set the `constraints` attribute for the `email` field. | + +REST Assured + +``` +.filter(document("create-user", + requestFields(attributes(key("title").value("Fields for user creation")), (1) + fieldWithPath("name").description("The user's name") + .attributes(key("constraints").value("Must not be null. Must not be empty")), (2) + fieldWithPath("email").description("The user's email address") + .attributes(key("constraints").value("Must be a valid email address"))))) (3) +``` + +|**1**|Configure the `title` attribute for the request fields snippet.| +|-----|---------------------------------------------------------------| +|**2**| Set the `constraints` attribute for the `name` field. | +|**3**| Set the `constraints` attribute for the `email` field. | + +The second step is to provide a custom template named `request-fields.snippet` that includes the information about the fields' constraints in the generated snippet’s table and adds a title. +The following example shows how to do so: + +``` +.{{title}} (1) +|=== +|Path|Type|Description|Constraints (2) + +{{#fields}} +|{{path}} +|{{type}} +|{{description}} +|{{constraints}} (3) + +{{/fields}} +|=== +``` + +|**1**| Add a title to the table. | +|-----|--------------------------------------------------------------------------| +|**2**| Add a new column named "Constraints". | +|**3**|Include the descriptors' `constraints` attribute in each row of the table.| + +## [Customizing requests and responses](#customizing-requests-and-responses) + +There may be situations where you do not want to document a request exactly as it was sent or a response exactly as it was received. +Spring REST Docs provides a number of preprocessors that can be used to modify a request or response before it is documented. + +Preprocessing is configured by calling `document` with an `OperationRequestPreprocessor` or an `OperationResponsePreprocessor`. +You can obtain instances by using the static `preprocessRequest` and `preprocessResponse` methods on `Preprocessors`. +The following examples show how to do so: + +MockMvc + +``` +this.mockMvc.perform(get("/")).andExpect(status().isOk()) + .andDo(document("index", preprocessRequest(removeHeaders("Foo")), (1) + preprocessResponse(prettyPrint()))); (2) +``` + +|**1**|Apply a request preprocessor that removes the header named `Foo`.| +|-----|-----------------------------------------------------------------| +|**2**| Apply a response preprocessor that pretty prints its content. | + +WebTestClient + +``` +this.webTestClient.get().uri("/").exchange().expectStatus().isOk().expectBody() + .consumeWith(document("index", + preprocessRequest(removeHeaders("Foo")), (1) + preprocessResponse(prettyPrint()))); (2) +``` + +|**1**|Apply a request preprocessor that removes the header named `Foo`.| +|-----|-----------------------------------------------------------------| +|**2**| Apply a response preprocessor that pretty prints its content. | + +REST Assured + +``` +RestAssured.given(this.spec).filter(document("index", preprocessRequest(removeHeaders("Foo")), (1) + preprocessResponse(prettyPrint()))) (2) + .when().get("/").then().assertThat().statusCode(is(200)); +``` + +|**1**|Apply a request preprocessor that removes the header named `Foo`.| +|-----|-----------------------------------------------------------------| +|**2**| Apply a response preprocessor that pretty prints its content. | + +Alternatively, you may want to apply the same preprocessors to every test. +You can do so by using the `RestDocumentationConfigurer` API in your `@Before` method to configure the preprocessors. +For example, to remove the `Foo` header from all requests and pretty print all responses, you could do one of the following (depending on your testing environment): + +MockMvc + +``` +private MockMvc mockMvc; + +@Before +public void setup() { + this.mockMvc = MockMvcBuilders.webAppContextSetup(this.context) + .apply(documentationConfiguration(this.restDocumentation).operationPreprocessors() + .withRequestDefaults(removeHeaders("Foo")) (1) + .withResponseDefaults(prettyPrint())) (2) + .build(); +} +``` + +|**1**|Apply a request preprocessor that removes the header named `Foo`.| +|-----|-----------------------------------------------------------------| +|**2**| Apply a response preprocessor that pretty prints its content. | + +WebTestClient + +``` +private WebTestClient webTestClient; + +@Before +public void setup() { + this.webTestClient = WebTestClient.bindToApplicationContext(this.context) + .configureClient() + .filter(documentationConfiguration(this.restDocumentation) + .operationPreprocessors() + .withRequestDefaults(removeHeaders("Foo")) (1) + .withResponseDefaults(prettyPrint())) (2) + .build(); +} +``` + +|**1**|Apply a request preprocessor that removes the header named `Foo`.| +|-----|-----------------------------------------------------------------| +|**2**| Apply a response preprocessor that pretty prints its content. | + +REST Assured + +``` +private RequestSpecification spec; + +@Before +public void setup() { + this.spec = new RequestSpecBuilder() + .addFilter(documentationConfiguration(this.restDocumentation).operationPreprocessors() + .withRequestDefaults(removeHeaders("Foo")) (1) + .withResponseDefaults(prettyPrint())) (2) + .build(); +} +``` + +|**1**|Apply a request preprocessor that removes the header named `Foo`.| +|-----|-----------------------------------------------------------------| +|**2**| Apply a response preprocessor that pretty prints its content. | + +Then, in each test, you can perform any configuration specific to that test. +The following examples show how to do so: + +MockMvc + +``` +this.mockMvc.perform(get("/")).andExpect(status().isOk()) + .andDo(document("index", links(linkWithRel("self").description("Canonical self link")))); +``` + +WebTestClient + +``` +this.webTestClient.get().uri("/").exchange().expectStatus().isOk() + .expectBody().consumeWith(document("index", + links(linkWithRel("self").description("Canonical self link")))); +``` + +REST Assured + +``` +RestAssured.given(this.spec) + .filter(document("index", links(linkWithRel("self").description("Canonical self link")))).when() + .get("/").then().assertThat().statusCode(is(200)); +``` + +Various built-in preprocessors, including those illustrated above, are available through the static methods on `Preprocessors`. +See [below](#customizing-requests-and-responses-preprocessors) for further details. + +### [Preprocessors](#customizing-requests-and-responses-preprocessors) + +#### [Pretty Printing](#customizing-requests-and-responses-preprocessors-pretty-print) + +`prettyPrint` on `Preprocessors` formats the content of the request or response to make it easier to read. + +#### [Masking Links](#customizing-requests-and-responses-preprocessors-mask-links) + +If you are documenting a hypermedia-based API, you may want to encourage clients to navigate the API by using links rather than through the use of hard coded URIs. +One way to do so is to limit the use of URIs in the documentation.`maskLinks` on `Preprocessors` replaces the `href` of any links in the response with `…​`. +You can also specify a different replacement if you wish. + +#### [Removing Headers](#customizing-requests-and-responses-preprocessors-remove-headers) + +`removeHeaders` on `Preprocessors` removes any headers from the request or response where the name is equal to any of the given header names. + +`removeMatchingHeaders` on `Preprocessors` removes any headers from the request or response where the name matches any of the given regular expression patterns. + +#### [Replacing Patterns](#customizing-requests-and-responses-preprocessors-replace-patterns) + +`replacePattern` on `Preprocessors` provides a general purpose mechanism for replacing content in a request or response. +Any occurrences that match a regular expression are replaced. + +#### [Modifying Request Parameters](#customizing-requests-and-responses-preprocessors-modify-request-parameters) + +You can use `modifyParameters` on `Preprocessors` to add, set, and remove request parameters. + +#### [Modifying URIs](#customizing-requests-and-responses-preprocessors-modify-uris) + +| |If you use MockMvc or a WebTestClient that is not bound to a server, you should customize URIs by [changing the configuration](#configuration-uris).| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------| + +You can use `modifyUris` on `Preprocessors` to modify any URIs in a request or a response. +When using REST Assured or WebTestClient bound to a server, this lets you customize the URIs that appear in the documentation while testing a local instance of the service. + +#### [Writing Your Own Preprocessor](#customizing-requests-and-responses-preprocessors-writing-your-own) + +If one of the built-in preprocessors does not meet your needs, you can write your own by implementing the `OperationPreprocessor` interface. +You can then use your custom preprocessor in exactly the same way as any of the built-in preprocessors. + +If you want to modify only the content (body) of a request or response, consider implementing the `ContentModifier` interface and using it with the built-in `ContentModifyingOperationPreprocessor`. + +## [Configuration](#configuration) + +This section covers how to configure Spring REST Docs. + +### [Documented URIs](#configuration-uris) + +This section covers configuring documented URIs. + +#### [MockMvc URI Customization](#configuration-uris-mockmvc) + +When using MockMvc, the default configuration for URIs documented by Spring REST Docs is as follows: + +|Setting| Default | +|-------|-----------| +|Scheme | `http` | +| Host |`localhost`| +| Port | `8080` | + +This configuration is applied by `MockMvcRestDocumentationConfigurer`. +You can use its API to change one or more of the defaults to suit your needs. +The following example shows how to do so: + +``` +this.mockMvc = MockMvcBuilders.webAppContextSetup(this.context) + .apply(documentationConfiguration(this.restDocumentation).uris().withScheme("https") + .withHost("example.com").withPort(443)) + .build(); +``` + +| |If the port is set to the default for the configured scheme (port 80 for HTTP or port 443 for HTTPS), it is omitted from any URIs in the generated snippets.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |To configure a request’s context path, use the `contextPath` method on `MockHttpServletRequestBuilder`.| +|---|-------------------------------------------------------------------------------------------------------| + +#### [REST Assured URI Customization](#configuration-uris-rest-assured) + +REST Assured tests a service by making actual HTTP requests. +As a result, URIs must be customized once the operation on the service has been performed but before it is documented. +A [REST-Assured-specific preprocessor](#customizing-requests-and-responses-preprocessors-modify-uris) is provided for this purpose. + +#### [WebTestClient URI Customization](#configuration-uris-webtestclient) + +When using WebTestClient, the default base for URIs documented by Spring REST Docs is `[http://localhost:8080](http://localhost:8080)`. +You can customize this base by using the [`baseUrl(String)` method on `WebTestClient.Builder`](https://docs.spring.io/spring-framework/docs/5.0.x/javadoc-api/org/springframework/test/web/reactive/server/WebTestClient.Builder.html#baseUrl-java.lang.String-). +The following example shows how to do so: + +``` +@Before +public void setUp() { + this.webTestClient = WebTestClient.bindToApplicationContext(this.context).configureClient() + .baseUrl("https://api.example.com") (1) + .filter(documentationConfiguration(this.restDocumentation)).build(); +} +``` + +|**1**|Configure the base of documented URIs to be `[https://api.example.com](https://api.example.com)`.| +|-----|-------------------------------------------------------------------------------------------------| + +### [Snippet Encoding](#configuration-snippet-encoding) + +The default snippet encoding is `UTF-8`. +You can change the default snippet encoding by using the `RestDocumentationConfigurer` API. +For example, the following examples use `ISO-8859-1`: + +MockMvc + +``` +this.mockMvc = MockMvcBuilders.webAppContextSetup(this.context) + .apply(documentationConfiguration(this.restDocumentation).snippets().withEncoding("ISO-8859-1")) + .build(); +``` + +WebTestClient + +``` +this.webTestClient = WebTestClient.bindToApplicationContext(this.context).configureClient() + .filter(documentationConfiguration(this.restDocumentation) + .snippets().withEncoding("ISO-8859-1")) + .build(); +``` + +REST Assured + +``` +this.spec = new RequestSpecBuilder() + .addFilter(documentationConfiguration(this.restDocumentation).snippets().withEncoding("ISO-8859-1")) + .build(); +``` + +| |When Spring REST Docs converts the content of a request or a response to a `String`, the `charset` specified in the `Content-Type` header is used if it is available.
In its absence, the JVM’s default `Charset` is used.
You can configure the JVM’s default `Charset` by using the `file.encoding` system property.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### [Snippet Template Format](#configuration-snippet-template-format) + +The default snippet template format is Asciidoctor. +Markdown is also supported out of the box. +You can change the default format by using the `RestDocumentationConfigurer` API. +The following examples show how to do so: + +MockMvc + +``` +this.mockMvc = MockMvcBuilders.webAppContextSetup(this.context) + .apply(documentationConfiguration(this.restDocumentation).snippets() + .withTemplateFormat(TemplateFormats.markdown())) + .build(); +``` + +WebTestClient + +``` +this.webTestClient = WebTestClient.bindToApplicationContext(this.context).configureClient() + .filter(documentationConfiguration(this.restDocumentation) + .snippets().withTemplateFormat(TemplateFormats.markdown())) + .build(); +``` + +REST Assured + +``` +this.spec = new RequestSpecBuilder().addFilter(documentationConfiguration(this.restDocumentation).snippets() + .withTemplateFormat(TemplateFormats.markdown())).build(); +``` + +### [Default Snippets](#configuration-default-snippets) + +Six snippets are produced by default: + +* `curl-request` + +* `http-request` + +* `http-response` + +* `httpie-request` + +* `request-body` + +* `response-body` + +You can change the default snippet configuration during setup by using the `RestDocumentationConfigurer` API. +The following examples produce only the `curl-request` snippet by default: + +MockMvc + +``` +this.mockMvc = MockMvcBuilders.webAppContextSetup(this.context) + .apply(documentationConfiguration(this.restDocumentation).snippets().withDefaults(curlRequest())) + .build(); +``` + +WebTestClient + +``` +this.webTestClient = WebTestClient.bindToApplicationContext(this.context) + .configureClient().filter( + documentationConfiguration(this.restDocumentation) + .snippets().withDefaults(curlRequest())) + .build(); +``` + +REST Assured + +``` +this.spec = new RequestSpecBuilder() + .addFilter(documentationConfiguration(this.restDocumentation).snippets().withDefaults(curlRequest())) + .build(); +``` + +### [Default Operation Preprocessors](#configuration-default-preprocessors) + +You can configure default request and response preprocessors during setup by using the `RestDocumentationConfigurer` API. +The following examples remove the `Foo` headers from all requests and pretty print all responses: + +MockMvc + +``` +this.mockMvc = MockMvcBuilders.webAppContextSetup(this.context) + .apply(documentationConfiguration(this.restDocumentation).operationPreprocessors() + .withRequestDefaults(removeHeaders("Foo")) (1) + .withResponseDefaults(prettyPrint())) (2) + .build(); +``` + +|**1**|Apply a request preprocessor that removes the header named `Foo`.| +|-----|-----------------------------------------------------------------| +|**2**| Apply a response preprocessor that pretty prints its content. | + +WebTestClient + +``` +this.webTestClient = WebTestClient.bindToApplicationContext(this.context) + .configureClient() + .filter(documentationConfiguration(this.restDocumentation) + .operationPreprocessors() + .withRequestDefaults(removeHeaders("Foo")) (1) + .withResponseDefaults(prettyPrint())) (2) + .build(); +``` + +|**1**|Apply a request preprocessor that removes the header named `Foo`.| +|-----|-----------------------------------------------------------------| +|**2**| Apply a response preprocessor that pretty prints its content. | + +REST Assured + +``` +this.spec = new RequestSpecBuilder() + .addFilter(documentationConfiguration(this.restDocumentation).operationPreprocessors() + .withRequestDefaults(removeHeaders("Foo")) (1) + .withResponseDefaults(prettyPrint())) (2) + .build(); +``` + +|**1**|Apply a request preprocessor that removes the header named `Foo`.| +|-----|-----------------------------------------------------------------| +|**2**| Apply a response preprocessor that pretty prints its content. | + +## [Working with Asciidoctor](#working-with-asciidoctor) + +This section describes the aspects of working with Asciidoctor that are particularly relevant to Spring REST Docs. + +| |Asciidoc is the document format.
Asciidoctor is the tool that produces content (usually as HTML) from Asciidoc files (which end with `.adoc`).| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------| + +### [Resources](#working-with-asciidoctor-resources) + +* [Syntax quick reference](https://asciidoctor.org/docs/asciidoc-syntax-quick-reference) + +* [User manual](https://asciidoctor.org/docs/user-manual) + +### [Including Snippets](#working-with-asciidoctor-including-snippets) + +This section covers how to include Asciidoc snippets. + +#### [Including Multiple Snippets for an Operation](#working-with-asciidoctor-including-snippets-operation) + +You can use a macro named `operation` to import all or some of the snippets that have been generated for a specific operation. +It is made available by including `spring-restdocs-asciidoctor` in your project’s [build configuration](#getting-started-build-configuration). + +The target of the macro is the name of the operation. +In its simplest form, you can use the macro to include all of the snippets for an operation, as shown in the following example: + +``` +operation::index[] +``` + +You can use the operation macro also supports a `snippets` attribute. +The `snippets` attribute to select the snippets that should be included. +The attribute’s value is a comma-separated list. +Each entry in the list should be the name of a snippet file (minus the `.adoc` suffix) to include. +For example, only the curl, HTTP request, and HTTP response snippets can be included, as shown in the following example: + +``` +operation::index[snippets='curl-request,http-request,http-response'] +``` + +The preceding example is the equivalent of the following: + +``` +[[example_curl_request]] +== Curl request + +include::{snippets}/index/curl-request.adoc[] + +[[example_http_request]] +== HTTP request + +include::{snippets}/index/http-request.adoc[] + +[[example_http_response]] +== HTTP response + +include::{snippets}/index/http-response.adoc[] +``` + +##### [Section Titles](#working-with-asciidoctor-including-snippets-operation-titles) + +For each snippet that is included by using the `operation` macro, a section with a title is created. +Default titles are provided for the following built-in snippets: + +| Snippet | Title | +|-----------------|---------------| +| `curl-request` | Curl Request | +| `http-request` | HTTP request | +| `http-response` | HTTP response | +|`httpie-request` |HTTPie request | +| `links` | Links | +| `request-body` | Request body | +|`request-fields` |Request fields | +| `response-body` | Response body | +|`response-fields`|Response fields| + +For snippets not listed in the preceding table, a default title is generated by replacing `-` characters with spaces and capitalizing the first letter. +For example, the title for a snippet named `custom-snippet` `will be` “Custom snippet”. + +You can customize the default titles by using document attributes. +The name of the attribute should be `operation-{snippet}-title`. +For example, to customize the title of the `curl-request` snippet to be "Example request", you can use the following attribute: + +``` +:operation-curl-request-title: Example request +``` + +#### [Including Individual Snippets](#working-with-asciidoctor-including-snippets-individual) + +The [include macro](https://asciidoctor.org/docs/asciidoc-syntax-quick-reference/#include-files) is used to include individual snippets in your documentation. +You can use the `snippets` attribute (which is automatically set by `spring-restdocs-asciidoctor` configured in the [build configuration](#getting-started-build-configuration)) to reference the snippets output directory. +The following example shows how to do so: + +``` +include::{snippets}/index/curl-request.adoc[] +``` + +### [Customizing Tables](#working-with-asciidoctor-customizing-tables) + +Many of the snippets contain a table in its default configuration. +The appearance of the table can be customized, either by providing some additional configuration when the snippet is included or by using a custom snippet template. + +#### [Formatting Columns](#working-with-asciidoctor-customizing-tables-formatting-columns) + +Asciidoctor has rich support for [formatting a table’s columns](https://asciidoctor.org/docs/user-manual/#cols-format). +As the following example shows, you can specify the widths of a table’s columns by using the `cols` attribute: + +``` +[cols="1,3"] (1) +include::{snippets}/index/links.adoc[] +``` + +|**1**|The table’s width is split across its two columns, with the second column being three times as wide as the first.| +|-----|-----------------------------------------------------------------------------------------------------------------| + +#### [Configuring the Title](#working-with-asciidoctor-customizing-tables-title) + +You can specify the title of a table by using a line prefixed by a `.`. +The following example shows how to do so: + +``` +.Links (1) +include::{snippets}/index/links.adoc[] +``` + +|**1**|The table’s title will be `Links`.| +|-----|----------------------------------| + +#### [Avoiding Table Formatting Problems](#working-with-asciidoctor-customizing-tables-formatting-problems) + +Asciidoctor uses the `|` character to delimit cells in a table. +This can cause problems if you want a `|` to appear in a cell’s contents. +You can avoid the problem by escaping the `|` with a backslash — in other words, by using `\|` rather than `|`. + +All of the default Asciidoctor snippet templates perform this escaping automatically by using a Mustache lamba named `tableCellContent`. +If you write your own custom templates you may want to use this lamba. +The following example shows how to escape `|` characters in a cell that contains the value of a `description` attribute: + +``` +| {{#tableCellContent}}{{description}}{{/tableCellContent}} +``` + +### [Further Reading](#working-with-asciidoctor-further-reading) + +See the [Tables section of the Asciidoctor user manual](https://asciidoctor.org/docs/user-manual/#tables) for more information about customizing tables. + +## [Working with Markdown](#working-with-markdown) + +This section describes the aspects of working with Markdown that are particularly relevant to Spring REST Docs. + +### [Limitations](#working-with-markdown-limitations) + +Markdown was originally designed for people writing for the web and, as such, is not as well-suited to writing documentation as Asciidoctor. +Typically, these limitations are overcome by using another tool that builds on top of Markdown. + +Markdown has no official support for tables. +Spring REST Docs' default Markdown snippet templates use [Markdown Extra’s table format](https://michelf.ca/projects/php-markdown/extra/#table). + +### [Including Snippets](#working-with-markdown-including-snippets) + +Markdown has no built-in support for including one Markdown file in another. +To include the generated snippets of Markdown in your documentation, you should use an additional tool that supports this functionality. +One example that is particularly well-suited to documenting APIs is [Slate](https://github.com/tripit/slate). + +## [Contributing](#contributing) + +Spring REST Docs is intended to make it easy for you to produce high-quality documentation for your RESTful services. +However, we cannot achieve that goal without your contributions. + +### [Questions](#contributing-questions) + +You can ask questions about Spring REST Docs on [Stack Overflow](https://stackoverflow.com) by using the `spring-restdocs` tag. +Similarly, we encourage you to help your fellow Spring REST Docs users by answering questions. + +### [Bugs](#contributing-bugs) + +If you believe you have found a bug, please take a moment to search the [existing issues](https://github.com/spring-projects/spring-restdocs/issues?q=is%3Aissue). +If no one else has reported the problem, please [open a new issue](https://github.com/spring-projects/spring-restdocs/issues/new) that describes the problem in detail and, ideally, includes a test that reproduces it. + +### [Enhancements](#contributing-enhancements) + +If you would like an enhancement to be made to Spring REST Docs, pull requests are most welcome. +The source code is on [GitHub](https://github.com/spring-projects/spring-restdocs). +You may want to search the [existing issues](https://github.com/spring-projects/spring-restdocs/issues?q=is%3Aissue) and [pull requests](https://github.com/spring-projects/spring-restdocs/pulls?q=is%3Apr) to see if the enhancement has already been proposed. +You may also want to [open a new issue](https://github.com/spring-projects/spring-restdocs/issues/new) to discuss a possible enhancement before work on it begins. \ No newline at end of file diff --git a/docs/en/spring-security/README.md b/docs/en/spring-security/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/en/spring-session/README.md b/docs/en/spring-session/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/en/spring-shell/README.md b/docs/en/spring-shell/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/en/spring-shell/spring-shell.md b/docs/en/spring-shell/spring-shell.md new file mode 100644 index 0000000000000000000000000000000000000000..921c1dc1ff9b8bf9c3be5040659206cccfc18f8d --- /dev/null +++ b/docs/en/spring-shell/spring-shell.md @@ -0,0 +1,857 @@ +# Spring Shell Reference Documentation + +## What is Spring Shell? + +Not all applications need a fancy web user interface! +Sometimes, interacting with an application using an interactive terminal is +the most appropriate way to get things done. + +Spring Shell allows one to easily create such a runnable application, where the +user will enter textual commands that will get executed until the program terminates. +The Spring Shell project provides the infrastructure to create such a REPL (Read, Eval, +Print Loop), allowing the developer to concentrate on the commands implementation, using +the familiar Spring programming model. + +Advanced features such as parsing, TAB completion, colorization of output, fancy ascii-art +table display, input conversion and validation all come for free, with the developer only +having to focus on core command logic. + +## Using Spring Shell + +### Getting Started + +To see what Spring Shell has to offer, let’s write a trivial shell application that +has a simple command to add two numbers together. + +#### Let’s Write a Simple Boot App + +Starting with version 2, Spring Shell has been rewritten from the ground up with various +enhancements in mind, one of which is easy integration with Spring Boot, although it is +not a strong requirement. +For the purpose of this tutorial, let’s create a simple Boot application, for example +using [start.spring.io](https://start.spring.io). This minimal application only depends on `spring-boot-starter`and configures the `spring-boot-maven-plugin`, generating an executable über-jar: + +``` +... + + + org.springframework.boot + spring-boot-starter + + ... +``` + +#### Adding a Dependency on Spring Shell + +The easiest way to get going with Spring Shell is to depend on the `spring-shell-starter` artifact. +This comes with everything one needs to use Spring Shell and plays nicely with Boot, +configuring only the necessary beans as needed: + +``` +... + + org.springframework.shell + spring-shell-starter + 2.0.1.RELEASE + +... +``` + +| |Given that Spring Shell will kick in and start the REPL by virtue of this dependency being present,
you’ll need to either build skipping tests (`-DskipTests`) throughout this tutorial or remove the sample integration test
that was generated by [start.spring.io](https://start.spring.io). If you don’t do so, the integration test will create
the Spring `ApplicationContext` and, depending on your build tool, will stay stuck in the eval loop or crash with a NPE.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### Your first command + +It’s time to add our first command. Create a new class (name it however you want) and +annotate it with `@ShellComponent` (a variation of `@Component` that is used to restrict +the set of classes that are scanned for candidate commands). + +Then, create an `add` method that takes two ints (`a` and `b`) and returns their sum. Annotate it +with `@ShellMethod` and provide a description of the command in the annotation (the only piece of +information that is required): + +``` +package com.example.demo; + +import org.springframework.shell.standard.ShellMethod; +import org.springframework.shell.standard.ShellComponent; + +@ShellComponent +public class MyCommands { + + @ShellMethod("Add two integers together.") + public int add(int a, int b) { + return a + b; + } +} +``` + +#### Let’s Give It a Ride! + +Build the application and run the generated jar, like so; + +``` +./mvnw clean install -DskipTests +[...] + +java -jar target/demo-0.0.1-SNAPSHOT.jar +``` + +You’ll be greeted by the following screen (the banner comes from Spring Boot, and can be customized[as usual](https://docs.spring.io/spring-boot/docs/current/reference/htmlsingle/#boot-features-banner)): + +``` + . ____ _ __ _ _ + /\\ / ___'_ __ _ _(_)_ __ __ _ \ \ \ \ +( ( )\___ | '_ | '_| | '_ \/ _` | \ \ \ \ + \\/ ___)| |_)| | | | | || (_| | ) ) ) ) + ' |____| .__|_| |_|_| |_\__, | / / / / + =========|_|==============|___/=/_/_/_/ + :: Spring Boot :: (v1.5.6.RELEASE) + +shell:> +``` + +Below is a yellow `shell:>` prompt that invites you to type commands. Type `add 1 2` then ENTER and admire the magic! + +``` +shell:>add 1 2 +3 +``` + +Try to play with the shell (hint: there is a `help` command) and when you’re done, type `exit` ENTER. + +The rest of this document delves deeper into the whole Spring Shell programming model. + +### Writing your own Commands + +The way Spring Shell decides to turn a method into an actual shell command is entirely pluggable +(see [Extending Spring Shell](#extending-spring-shell)), but as of Spring Shell 2.x, the recommended way to write commands +is to use the new API described in this section (the so-called *standard* API). + +Using the *standard* API, methods on beans will be turned into executable commands provided that + +* the bean class bears the `@ShellComponent` annotation. This is used to restrict the set of beans that + are considered. + +* the method bears the `@ShellMethod` annotation. + +| |The `@ShellComponent` is a stereotype annotation itself meta-annotated with `@Component`. As such, it
can be used in addition to the filtering mechanism to also *declare* beans (*e.g.* using `@ComponentScan`).

The name of the created bean can be customized using the `value` attribute of the annotation.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### It’s all about Documentation! + +The only required attribute of the `@ShellMethod` annotation is its `value` attribute, which should be used +to write a short, one-sentence, description of what the command does. This is important so that your users can +get consistent help about your commands without having to leave the shell (see [Integrated Documentation with the `help` Command](#help-command)). + +| |The description of your command should be short, one or two sentences only. For better consistency, it is
recommended that it starts with a capital letter and ends with a dot.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### Customizing the Command Name(s) + +By default, there is no need to specify the *key* for your command (*i.e.* the word(s) that should be used +to invoke it in the shell). The name of the method will be used as the command key, turning camelCase names into +dashed, gnu-style, names (that is, `sayHello()` will become `say-hello`). + +It is possible, however, to explicitly set the command key, using the `key` attribute of the annotation, like so: + +``` + @ShellMethod(value = "Add numbers.", key = "sum") + public int add(int a, int b) { + return a + b; + } +``` + +| |The `key` attribute accepts multiple values.
If you set multiple keys for a single method, then the command will be registered using those different aliases.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |The command key can contain pretty much any character, including spaces. When coming up with names though,
keep in mind that consistency is often appreciated by users (*i.e.* avoid mixing dashed-names with spaced names, *etc.*)| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### Invoking your Commands + +#### By Name *vs.* Positional Parameters + +As seen above, decorating a method with `@ShellMethod` is the sole requirement for creating a command. +When doing so, the user can set the value of all method parameters in two possible ways: + +* using a parameter key (*e.g.* `--arg value`). This approach is called "by name" parameters + +* or without a key, simply setting parameter values in the same order they appear in the method signature ("positional" parameters). + +These two approaches can be mixed and matched, with named parameters always taking precedence (as they are less +prone to ambiguity). As such, given the following command + +``` + @ShellMethod("Display stuff.") + public String echo(int a, int b, int c) { + return String.format("You said a=%d, b=%d, c=%d", a, b, c); + } +``` + +then the following invocations are all equivalent, as witnessed by the output: + +``` +shell:>echo 1 2 3 (1) +You said a=1, b=2, c=3 + +shell:>echo --a 1 --b 2 --c 3 (2) +You said a=1, b=2, c=3 + +shell:>echo --b 2 --c 3 --a 1 (3) +You said a=1, b=2, c=3 + +shell:>echo --a 1 2 3 (4) +You said a=1, b=2, c=3 + +shell:>echo 1 --c 3 2 (5) +You said a=1, b=2, c=3 +``` + +|**1**| This uses positional parameters | +|-----|----------------------------------------------------------------| +|**2**| This is an example of full by-name parameters | +|**3**| By-name parameters can be reordered as desired | +|**4**| You can use a mix of the two approaches | +|**5**|The non by-name parameters are resolved in the order they appear| + +##### Customizing the Named Parameter Key(s) + +As seen above, the default strategy for deriving the key for a named parameter is to use the java +name of the method signature and prefixing it with two dashes (`--`). This can be customized in two ways: + +1. to change the default prefix for the whole method, use the `prefix()` attribute of the`@ShellMethod` annotation + +2. to override the *whole* key on a per-parameter fashion, annotate the parameter with the `@ShellOption` annotation. + +Have a look at the following example: + +``` + @ShellMethod(value = "Display stuff.", prefix="-") + public String echo(int a, int b, @ShellOption("--third") int c) { + return String.format("You said a=%d, b=%d, c=%d", a, b, c); + } +``` + +For such a setup, the possible parameter keys will be `-a`, `-b` and `--third`. + +| |It is possible to specify several keys for a single parameter. If so, these will be mutually exclusive ways
to specify the same parameter (so only one of them can be used). As an example, here is the signature of the
built-in [`help`](#help-command) command:

```
@ShellMethod("Describe a command.")
public String help(@ShellOption({"-C", "--command"}) String command) {
...
}
```| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### Optional Parameters and Default Values + +Spring Shell provides the ability to give parameters default values, which will allow the user to omit +those parameters: + +``` + @ShellMethod("Say hello.") + public String greet(@ShellOption(defaultValue="World") String who) { + return "Hello " + who; + } +``` + +Now, the `greet` command can still be invoked as `greet Mother` (or `greet --who Mother`), but the following +is also possible: + +``` +shell:>greet +Hello World +``` + +#### Parameter Arity + +Up to now, it has always been assumed that each parameter mapped to a single word entered by the user. +Situations may arise though, when a parameter value should be *multi valued*. This is driven by the `arity()`attribute of the `@ShellOption` annotation. Simply use a collection or array for the parameter type, and specify how +many values are expected: + +``` + @ShellMethod("Add Numbers.") + public float add(@ShellOption(arity=3) float[] numbers) { + return numbers[0] + numbers[1] + numbers[2]; + } +``` + +The command may then be invoked using any of the following syntax: + +``` +shell:>add 1 2 3.3 +6.3 +shell:>add --numbers 1 2 3.3 +6.3 +``` + +| |When using the *by-name* parameter approach, the key should **not** be repeated. The following does **not** work:

```
shell:>add --numbers 1 --numbers 2 --numbers 3.3
```| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Infinite Arity + +TO BE IMPLEMENTED + +##### Special Handling of Boolean Parameters + +When it comes to parameter arity, there is a kind of parameters that receives a special treatment by default, as +is often the case in command-line utilities. +Boolean (that is, `boolean` as well as `java.lang.Boolean`) parameters behave like they have an `arity()` of `0` by default, allowing users to set their values using a "flag" approach. +Take a look at the following: + +``` + @ShellMethod("Terminate the system.") + public String shutdown(boolean force) { + return "You said " + force; + } +``` + +This allows the following invocations: + +``` +shell:>shutdown +You said false +shell:>shutdown --force +You said true +``` + +| |This special treatment plays well with the [default value](#optional-parameters-default-values) specification. Although the default
for boolean parameters is to have their default value be `false`, you can specify otherwise (*i.e.*`@ShellOption(defaultValue="true")`) and the behavior will be inverted (that is, not specifying the parameter
will result in the value being `true`, and specifying the flag will result in the value being `false`)| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Having this behavior of implicit `arity()=0` prevents the user from specifying a value (*e.g.* `shutdown --force true`).
If you would like to allow this behavior (and forego the flag approach), then force an arity of `1` using the annotation:

```
@ShellMethod("Terminate the system.")
public String shutdown(@ShellOption(arity=1, defaultValue="false") boolean force) {
return "You said " + force;
}
```| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### Quotes Handling + +Spring Shell takes user input and tokenizes it in *words*, splitting on space characters. +If the user wants to provide a parameter value that contains spaces, that value needs to be quoted. +Both single (`'`) and double (`"`) quotes are supported, and those quotes will not be part of the value: + +``` + @ShellMethod("Prints what has been entered.") + public String echo(String what) { + return "You said " + what; + } +``` + +``` +shell:>echo Hello +You said Hello +shell:>echo 'Hello' +You said Hello +shell:>echo 'Hello World' +You said Hello World +shell:>echo "Hello World" +You said Hello World +``` + +Supporting both single and double quotes allows the user to easily embed one type of quotes into +a value: + +``` +shell:>echo "I'm here!" +You said I'm here! +shell:>echo 'He said "Hi!"' +You said He said "Hi!" +``` + +Should the user need to embed the same kind of quote that was used to quote the whole parameter, +the escape sequence uses the backslash (`\`) character: + +``` +shell:>echo 'I\'m here!' +You said I'm here! +shell:>echo "He said \"Hi!\"" +You said He said "Hi!" +shell:>echo I\'m here! +You said I'm here! +``` + +It is also possible to escape space characters when not using enclosing quotes, as such: + +``` +shell:>echo This\ is\ a\ single\ value +You said This is a single value +``` + +#### Interacting with the Shell + +The Spring Shell project builds on top of the [JLine](https://github.com/jline/jline3) library, and as such brings +a lot of nice interactive features, some of which are detailed in this section. + +First and foremost, Spring Shell supports TAB completion almost everywhere possible. So if there +is an `echo` command and the user presses e, c, TAB then `echo` will appear. +Should there be several commands that start with `ec`, then the user will be prompted to choose (using TAB orShift+TAB to navigate, and ENTER for selection.) + +But completion does not stop at command keys. It also works for parameter keys (`--arg`) and even +parameter values, if the application developer registered the appropriate beans (see [Providing TAB Completion Proposals](#providing-tab-completion)). + +Another nice feature of Spring Shell apps is support for line continuation. If a command and its parameters +is too long and does not fit nicely on screen, a user may chunk it and terminate a line with a backslash (`\`) character +then hit ENTER and continue on the next line. Uppon submission of the whole command, this will +be parsed as if the user entered a single space on line breaks. + +``` +shell:>register module --type source --name foo \ (1) +> --uri file:///tmp/bar +Successfully registered module 'source:foo' +``` + +|**1**|command continues on next line| +|-----|------------------------------| + +Line continuation also automatically triggers if the user has opened a quote (see [Quotes Handling](#quotes-handling)) +and hits ENTER while still in the quotes: + +``` +shell:>echo "Hello (1) +dquote> World" +You said Hello World +``` + +|**1**|user presses ENTER here| +|-----|-----------------------| + +Lastly, Spring Shell apps benefit from a lot of keyboard shortcuts you may already be familiar with when +working with your regular OS Shell, borrowed from Emacs. Notable shortcuts include Ctrl+r to perform +a reverse search, Ctrl+a and Ctrl+e to move to beginning and end of line respectively or Esc f andEsc b to move forward (*resp.* backward) one word at a time. + +##### Providing TAB Completion Proposals + +TBD + +### Validating Command Arguments + +Spring Shell integrates with the [Bean Validation API](http://beanvalidation.org/) to support +automatic and self documenting constraints on command parameters. + +Annotations found on command parameters as well as annotations at the method level will be +honored and trigger validation prior to the command executing. Given the following command: + +``` + @ShellMethod("Change password.") + public String changePassword(@Size(min = 8, max = 40) String password) { + return "Password successfully set to " + password; + } +``` + +You’ll get this behavior, for free: + +``` +shell:>change-password hello +The following constraints were not met: + --password string : size must be between 8 and 40 (You passed 'hello') +``` + +| |Applies to All Command Implementations

It is important to note that bean validation applies to all command implementations, whether
they use the "standard" API or any other API, through the use of an adapter (see [Supporting Other APIs](#support-for-shell-1-and-jcommander))| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### Dynamic Command Availability + +There may be times when registered commands don’t make sense, due to internal state of the application. +For example, maybe there is a `download` command, but it only works once the user has used `connect` on a remote +server. Now, if the user tries to use the `download` command, the shell should gracefully explain that +the command *does* exist, but that it is not available at the time. +Spring Shell lets the developer do that, even providing a short explanation of the reason for +the command not being available. + +There are three possible ways for a command to indicate availability. +They all leverage a no-arg method that returns an instance of `Availability`. +Let’s start with a simple example: + +``` +@ShellComponent +public class MyCommands { + + private boolean connected; + + @ShellMethod("Connect to the server.") + public void connect(String user, String password) { + [...] + connected = true; + } + + @ShellMethod("Download the nuclear codes.") + public void download() { + [...] + } + + public Availability downloadAvailability() { + return connected + ? Availability.available() + : Availability.unavailable("you are not connected"); + } +} +``` + +Here you see the `connect` method is used to connect to the server (details omitted), altering state +of the command through the `connected` boolean when done. +The `download` command will be marked as *unavailable* till the user has connected, thanks to the presence +of a method named exactly as the `download` command method with the `Availability` suffix in its name. +The method returns an instance of `Availability`, constructed with one of the two factory methods. +In case of the command not being available, an explanation has to be provided. +Now, if the user tries to invoke the command while not being connected, here is what happens: + +``` +shell:>download +Command 'download' exists but is not currently available because you are not connected. +Details of the error have been omitted. You can use the stacktrace command to print the full stacktrace. +``` + +Information about currently unavailable commands is also leveraged in the integrated help. See [Integrated Documentation with the `help` Command](#help-command). + +| |The reason provided when the command is not available should read nicely if appended after "Because …​"

It’s best not to start the sentence with a capital and not add a final dot.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +If for some reason naming the availability method after the name of the command method does not suit you, you +can provide an explicit name using the `@ShellMethodAvailability`, like so: + +``` + @ShellMethod("Download the nuclear codes.") + @ShellMethodAvailability("availabilityCheck") (1) + public void download() { + [...] + } + + public Availability availabilityCheck() { (1) + return connected + ? Availability.available() + : Availability.unavailable("you are not connected"); + } +``` + +|**1**|the names have to match| +|-----|-----------------------| + +Lastly, it is often the case that several commands in the same class share the same internal state and thus +should all be available or unavailable all at one. Instead of having to stick the `@ShellMethodAvailability`on all command methods, Spring Shell allows the user to flip things around and put the `@ShellMethodAvailabilty`annotation on the availability method, specifying the names of the commands that it controls: + +``` + @ShellMethod("Download the nuclear codes.") + public void download() { + [...] + } + + @ShellMethod("Disconnect from the server.") + public void disconnect() { + [...] + } + + @ShellMethodAvailability({"download", "disconnect"}) + public Availability availabilityCheck() { + return connected + ? Availability.available() + : Availability.unavailable("you are not connected"); + } +``` + +| |The default value for the `@ShellMethodAvailability.value()` attribute is `"*"` and this serves as a special
wildcard that matches all command names. It’s thus easy to turn all commands of a single class on or off
with a single availability method. Here is an example below:

```
@ShellComponent
public class Toggles {
@ShellMethodAvailability
public Availability availabilityOnWeekdays() {
return Calendar.getInstance().get(DAY_OF_WEEK) == SUNDAY
? Availability.available()
: Availability.unavailable("today is not Sunday");
}

@ShellMethod
public void foo() {}

@ShellMethod
public void bar() {}
}
```| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Spring Shell does not impose much constraints on how to write commands and how to organize classes.
But it’s often good practice to put related commands in the same class, and the availability indicators
can benefit from that.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### Organizing Commands + +When your shell starts to provide a lot of functionality, you may en up +with a lot of commands, which could be confusing for your users. Typing `help`they would see a daunting list of commands, organized by alphabetical order, +which may not always make sense. + +To alleviate this, Spring Shell provides the ability to group commands together, +with reasonable defaults. Related commands would then end up in the same *group* (*e.g.* `User Management Commands`) +and be displayed together in the help screen and other places. + +By default, commands will be grouped according to the class they are implemented in, +turning the camel case class name into separate words (so `URLRelatedCommands` becomes `URL Related Commands`). +This is a very sensible default, as related commands are often already in the class anyway, +for they need to use the same collaborating objects. + +If however, this behavior does not suit you, you can override the group for a +command in the following ways, in order of priority: + +* specifying a `group()` in the `@ShellMethod` annotation + +* placing a `@ShellCommandGroup` on the class the command is defined in. This will apply + the group for all commands defined in that class (unless overridden as above) + +* placing a `@ShellCommandGroup` on the package (*via* `package-info.java`) + the command is defined in. This will apply to all commands defined in the + package (unless overridden at the method or class level as explained above) + +Here is a short example: + +``` +public class UserCommands { + @ShellMethod(value = "This command ends up in the 'User Commands' group") + public void foo() {} + + @ShellMethod(value = "This command ends up in the 'Other Commands' group", + group = "Other Commands") + public void bar() {} +} + +... + +@ShellCommandGroup("Other Commands") +public class SomeCommands { + @ShellMethod(value = "This one is in 'Other Commands'") + public void wizz() {} + + @ShellMethod(value = "And this one is 'Yet Another Group'", + group = "Yet Another Group") + public void last() {} +} +``` + +### Built-In Commands + +Any application built using the `spring-shell-starter` artifact +(or, to be more precise, the `spring-shell-standard-commands` dependency) comes with a set of built-in commands. +These commands can be overridden or disabled individually (see [Overriding or Disabling Built-In Commands](#overriding-or-disabling-built-in-commands)), but if they’re +not, this section describes their behavior. + +#### Integrated Documentation with the `help` Command + +Running a shell application often implies that the user is in a graphically limited environment. And although, in the era of mobile +phones we’re always connected, accessing a web browser or any other rich UI application such as a pdf viewer may not always +be possible. This is why it is important that the shell commands are correctly self documented, and this is where the `help`command comes in. + +Typing `help` + ENTER will list all the known commands to the shell (including [unavailable](#dynamic-command-availability) commands) +and a short description of what they do: + +``` +shell:>help +AVAILABLE COMMANDS + add: Add numbers together. + * authenticate: Authenticate with the system. + * blow-up: Blow Everything up. + clear: Clear the shell screen. + connect: Connect to the system + disconnect: Disconnect from the system. + exit, quit: Exit the shell. + help: Display help about available commands. + register module: Register a new module. + script: Read and execute commands from a file. + stacktrace: Display the full stacktrace of the last error. + +Commands marked with (*) are currently unavailable. +Type `help ` to learn more. +``` + +Typing `help ` will display more detailed information about a command, including the available parameters, their +type and whether they are mandatory or not, *etc.* + +Here is the `help` command applied to itself: + +``` +shell:>help help + +NAME + help - Display help about available commands. + +SYNOPSYS + help [[-C] string] + +OPTIONS + -C or --command string + The command to obtain help for. [Optional, default = ] +``` + +#### Clearing the Screen + +The `clear` command does what you would expect and clears the screen, resetting the prompt +in the top left corner. + +#### Exitting the Shell + +The `quit` command (also aliased as `exit`) simply requests the shell to quit, gracefully +closing the Spring application context. If not overridden, a JLine `History` bean will write a history of all +commands executed to disk, so that they are available again (see [Interacting with the Shell](#interacting-with-the-shell)) on next launch. + +#### Displaying Details about an Error + +When an exception occurs inside command code, it is caught by the shell and a simple, one-line message is displayed +so as not to overflow the user with too much information. +There are cases though when understanding what exactly happened is important (especially if the exception has a nested cause). + +To this purpose, Spring Shell remembers the last exception that occurred and the user can later use the `stacktrace`command to print all the gory details on the console. + +#### Running a Batch of Commands + +The `script` command accepts a local file as an argument and will replay commands found there, one at a time. + +Reading from the file behaves exactly like inside the interactive shell, so lines starting with `//` will be considered +as comments and ignored, while lines ending with `\` will trigger line continuation. + +### Customizing the Shell + +#### Overriding or Disabling Built-In Commands + +[Built-in commands](#built-in-commands) are provided with Spring Shell to achieve everyday tasks that many if not +all shell applications need. If you’re not happy with the way they behave though, you can disable or override them, as explained in this section. + +| |Disabling all Built-in Commands

If you don’t need built-in commands at all, then there is an easy way to "disable" them: just don’t include them!
Either use a maven exclusion on `spring-shell-standard-commands` or, if you’re selectively including Spring Shell dependencies,
don’t bring that one in!

```

org.springframework.shell
spring-shell-starter
2.0.1.RELEASE


org.springframework.shell
spring-shell-standard-commands



```| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### Disabling Specific Commands + +To disable a single built-in command, simply set the `spring.shell.command..enabled` property to `false` in the app`Environment`. One easy way to do this is to pass extra args to the Boot application in your `main()` entry point: + +``` + public static void main(String[] args) throws Exception { + String[] disabledCommands = {"--spring.shell.command.help.enabled=false"}; (1) + String[] fullArgs = StringUtils.concatenateStringArrays(args, disabledCommands); + SpringApplication.run(MyApp.class, fullArgs); + } +``` + +|**1**|This disables the integrated `help` command| +|-----|-------------------------------------------| + +##### Overriding Specific Commands + +If, instead of disabling a command you’d rather provide your own implementation, then you can either + +* disable the command like explained above and have your implementation registered with the same name + +* have your implementing class implement the `.Command` interface. As an example, here is how + to override the `clear` command: + + ``` + public class MyClear implements Clear.Command { + + @ShellMethod("Clear the screen, only better.") + public void clear() { + // ... + } + } + ``` + +| |Please Consider Contributing your Changes

If you feel like your implementation of a standard command could be valuable to the community,
please consider opening a pull-request at [github.com/spring-projects/spring-shell](https://github.com/spring-projects/spring-shell).

Alternatively, before making any changes on your own, you can open an issue with the project. Feedback is
always welcome!| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### ResultHandlers + +#### PromptProvider + +After each command invocation, the shell waits for new input from the user, displaying +a *prompt* in yellow: + +``` +shell:> +``` + +It is possible to customize this behavior by registering a bean of type `PromptProvider`. +Such a bean may use internal state to decide what to display to the user (it may for example +react to [application events](https://docs.spring.io/spring/docs/current/spring-framework-reference/htmlsingle/#context-functionality-events-annotation)) +and can use JLine’s `AttributedCharSequence` to display fancy ANSI text. + +Here is a fictional example: + +``` +@Component +public class CustomPromptProvider implements PromptProvider { + + private ConnectionDetails connection; + + @Override + public AttributedString getPrompt() { + if (connection != null) { + return new AttributedString(connection.getHost() + ":>", + AttributedStyle.DEFAULT.foreground(AttributedStyle.YELLOW)); + } + else { + return new AttributedString("server-unknown:>", + AttributedStyle.DEFAULT.foreground(AttributedStyle.RED)); + } + } + + @EventListener + public void handle(ConnectionUpdatedEvent event) { + this.connection = event.getConnectionDetails(); + } +} +``` + +#### Customizing Command Line Options Behavior + +Spring Shell comes with two default Spring Boot `ApplicationRunners`: + +* `InteractiveShellApplicationRunner` bootstraps the Shell REPL. It sets up the JLine infrastructure and eventually + calls `Shell.run()` + +* `ScriptShellApplicationRunner` looks for program arguments that start with `@`, assumes those are local file names and + tries to run commands contained in those files (with the same semantics as the [script command](#script-command)) and + then exits the process (by effectively disabling the `InteractiveShellApplicationRunner`, see below). + +If this behavior does not suit you, simply provide one (or more) bean of type `ApplicationRunner`and optionally disable the standard ones. You’ll want to take inspiration from the `ScriptShellApplicationRunner`: + +``` +@Order(InteractiveShellApplicationRunner.PRECEDENCE - 100) // Runs before InteractiveShellApplicationRunner +public class ScriptShellApplicationRunner implements ApplicationRunner { + + @Override + public void run(ApplicationArguments args) throws Exception { + List scriptsToRun = args.getNonOptionArgs().stream() + .filter(s -> s.startsWith("@")) + .map(s -> new File(s.substring(1))) + .collect(Collectors.toList()); + + boolean batchEnabled = environment.getProperty(SPRING_SHELL_SCRIPT_ENABLED, boolean.class, true); + + if (!scriptsToRun.isEmpty() && batchEnabled) { + InteractiveShellApplicationRunner.disable(environment); + for (File file : scriptsToRun) { + try (Reader reader = new FileReader(file); + FileInputProvider inputProvider = new FileInputProvider(reader, parser)) { + shell.run(inputProvider); + } + } + } + } + +... +``` + +#### Customizing Arguments Conversion + +Conversion from text input to actual method arguments uses the standard Spring[conversion](https://docs.spring.io/spring/docs/4.3.11.RELEASE/spring-framework-reference/htmlsingle/#core-convert) mechanism. +Spring Shell installs a new `DefaultConversionService` (with built-in converters enabled) +and registers to it any bean of type `Converter`, `GenericConverter` or`ConverterFactory` that it finds in the application context. + +This means that it’s really easy to customize conversion to your custom objects of type `Foo`: +just install a `Converter` bean in the context. + +``` +@ShellComponent +class ConversionCommands { + + @ShellMethod("Shows conversion using Spring converter") + public String conversionExample(DomainObject object) { + return object.getClass(); + } + +} + +class DomainObject { + private final String value; + + DomainObject(String value) { + this.value = value; + } + + public String toString() { + return value; + } +} + +@Component +class CustomDomainConverter implements Converter { + + @Override + public DomainObject convert(String source) { + return new DomainObject(source); + } +} +``` + +| |Mind your String representation

As in the example above, it’s probably a good idea if you can to have
your `toString()` implementations return the converse of what was used
to create the object instance. This is because when a value fails
validation, Spring Shell prints

```
The following constraints were not met:
--arg : (You passed '')
```

See [Validating Command Arguments](#validating-command-arguments) for more information.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |If you want to customize the `ConversionService` further, you can either

* Have the default one injected in your code and act upon it in some way

* Override it altogether with your own (custom converters will need to be registered by hand).
The ConversionService used by Spring Shell needs to be [qualified](https://docs.spring.io/spring/docs/4.3.12.RELEASE/spring-framework-reference/htmlsingle/#beans-autowired-annotation-qualifiers) as `"spring-shell"`.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| \ No newline at end of file diff --git a/docs/en/spring-statemachine/README.md b/docs/en/spring-statemachine/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/en/spring-statemachine/spring-statemachine.md b/docs/en/spring-statemachine/spring-statemachine.md new file mode 100644 index 0000000000000000000000000000000000000000..9b21d47e06dd0bbc76c0195e3328432a4cd180e1 --- /dev/null +++ b/docs/en/spring-statemachine/spring-statemachine.md @@ -0,0 +1,9596 @@ +# Spring Statemachine - Reference Documentation + +## Preface + +The concept of a state machine is most likely older than any reader +of this reference documentation and definitely older than the Java +language itself. Description of finite automata dates back to 1943 +when gentlemen Warren McCulloch and Walter Pitts wrote a paper about +it. Later George H. Mealy presented a state machine concept (known as +a “Mealy Machine”) in 1955. A year later, in 1956, Edward F. +Moore presented another paper, in which he described what is known as +a “Moore Machine”. If +you have ever read anything about state machines, the names, Mealy and Moore, +should have popped up at some point. + +This reference documentation contains the following parts: + +[Introduction](#introduction) contains introduction to this reference documentation. + +[Using Spring Statemachine](#statemachine) describes the usage of Spring Statemachine(SSM). + +[State Machine Examples](#statemachine-examples) contains more detailed state machine examples. + +[FAQ](#statemachine-faq) contains frequently asked questions. + +[Appendices](#appendices) contains generic information about used material and state machines. + +# Introduction + +Spring Statemachine (SSM) is a framework that lets application developers +use traditional state machine concepts with Spring applications. SSM +provides the following features: + +* Easy-to-use flat (one-level) state machine for simple use cases. + +* Hierarchical state machine structure to ease complex state + configuration. + +* State machine regions to provide even more complex state + configurations. + +* Usage of triggers, transitions, guards, and actions. + +* Type-safe configuration adapter. + +* State machine event listeners. + +* Spring IoC integration to associate beans with a state machine. + +Before you continue, we recommend going through the appendices [Glossary](#glossary)and [A State Machine Crash Course](#crashcourse) to get a generic idea of what state machines are. +The rest of the documentation expects you to be +familiar with state machine concepts. + +## Background + +State machines are powerful because their behavior is always guaranteed to be +consistent and relatively easily debugged due to how operational +rules are written in stone when a machine is started. The idea is that your +application is now in and may exist in a finite number of states. Then something +happens that takes your application from one state to the next. +A state machine is driven by triggers, which are based on either +events or timers. + +It is much easier to design high-level logic outside of your +application and then interact with a state machine in various +different ways. You can interact with a state machine by +sending events, listening to what a state machine does, or requesting +the current state. + +Traditionally, state machines are added to an existing project when +developers realize that the code base is starting to look like a plate +full of spaghetti. Spaghetti code looks like a never ending, hierarchical +structure of IF, ELSE, and BREAK clauses, and compilers should probably +ask developers to go home when things are starting to look too complex. + +## Usage Scenarios + +A project is a good candidate to use a state machine when: + +* You can represent the application or part of its structure as states. + +* You want to split complex logic into smaller manageable tasks. + +* The application is already suffering concurrency issues with (for example) + something happening asynchronously. + +You are already trying to implement a state machine when you: + +* Use boolean flags or enums to model situations. + +* Have variables that have meaning only for some part of your + application lifecycle. + +* Loop through an if-else structure (or, worse, multiple such structures), + check whether a particular flag or + enum is set, and then make further exceptions about what to do when certain + combinations of your flags and enums exist or do not exist. + +# Getting started + +If you are just getting started with Spring Statemachine, +this is the section for you! Here, we answer the basic +“`what?`”, “`how?`” and “`why?`” questions. We start with a gentle +introduction to Spring Statemachine. We then build our +first Spring Statemachine application and discuss some +core principles as we go. + +## System Requirement + +Spring Statemachine 3.0.1 is built and tested with +JDK 8 (all artifacts have JDK 7 compatibility) and Spring +Framework 5.3.8. It does not require any other +dependencies outside of Spring Framework within its core system. + +Other optional parts (such as [Using Distributed States](#sm-distributed)) have dependencies on +Zookeeper, while [State Machine Examples](#statemachine-examples) has dependencies +on `spring-shell` and `spring-boot`, which pull other dependencies +beyond the framework itself. Also, the optional security and data access features have +dependencies to on Spring Security and Spring Data modules. + +## Modules + +The following table describes the modules that are available for Spring Statemachine. + +| Module | Description | +|------------------------------------|----------------------------------------------------------------------------------------------------------------------| +| `spring-statemachine-core` | The core system of Spring Statemachine. | +|`spring-statemachine-recipes-common`| Common recipes that do not require dependencies outside of the core
framework. | +| `spring-statemachine-kryo` | `Kryo` serializers for Spring Statemachine. | +| `spring-statemachine-data-common` | Common support module for `Spring Data`. | +| `spring-statemachine-data-jpa` | Support module for `Spring Data JPA`. | +| `spring-statemachine-data-redis` | Support module for `Spring Data Redis`. | +| `spring-statemachine-data-mongodb` | Support module for `Spring Data MongoDB`. | +| `spring-statemachine-zookeeper` | Zookeeper integration for a distributed state machine. | +| `spring-statemachine-test` | Support module for state machine testing. | +| `spring-statemachine-cluster` |Support module for Spring Cloud Cluster.
Note that Spring Cloud Cluster has been superseded by Spring Integration.| +| `spring-statemachine-uml` | Support module for UI UML modeling with Eclipse Papyrus. | +|`spring-statemachine-autoconfigure` | Support module for Spring Boot. | +| `spring-statemachine-bom` | Bill of Materials pom. | +| `spring-statemachine-starter` | Spring Boot starter. | + +## Using Gradle + +The following listing shows a typical `build.gradle` file created by choosing various settings at [https://start.spring.io](https://start.spring.io): + +``` +buildscript { + ext { + springBootVersion = '2.4.8' + } + repositories { + mavenCentral() + maven { url "https://repo.spring.io/snapshot" } + maven { url "https://repo.spring.io/milestone" } + } + dependencies { + classpath("org.springframework.boot:spring-boot-gradle-plugin:${springBootVersion}") + } +} + +apply plugin: 'java' +apply plugin: 'eclipse' +apply plugin: 'org.springframework.boot' +apply plugin: 'io.spring.dependency-management' + +group = 'com.example' +version = '0.0.1-SNAPSHOT' +sourceCompatibility = 1.8 + +repositories { + mavenCentral() + maven { url "https://repo.spring.io/snapshot" } + maven { url "https://repo.spring.io/milestone" } +} + +ext { + springStatemachineVersion = '3.0.1' +} + +dependencies { + compile('org.springframework.statemachine:spring-statemachine-starter') + testCompile('org.springframework.boot:spring-boot-starter-test') +} + +dependencyManagement { + imports { + mavenBom "org.springframework.statemachine:spring-statemachine-bom:${springStatemachineVersion}" + } +} +``` + +| |Replace `0.0.1-SNAPSHOT` with a version you want to use.| +|---|--------------------------------------------------------| + +With a normal project structure, you can build this project with the following command: + +``` +# ./gradlew clean build +``` + +The expected Spring Boot-packaged fat jar would be `build/libs/demo-0.0.1-SNAPSHOT.jar`. + +| |You do not need the`libs-milestone` and `libs-snapshot` repositories for
production development.| +|---|----------------------------------------------------------------------------------------------------| + +## Using Maven + +The following example shows a typical `pom.xml` file, which was created by choosing various options at [https://start.spring.io](https://start.spring.io): + +``` + + + 4.0.0 + + com.example + demo + 0.0.1-SNAPSHOT + jar + + gs-statemachine + Demo project for Spring Statemachine + + + org.springframework.boot + spring-boot-starter-parent + 2.4.8 + + + + + UTF-8 + UTF-8 + 1.8 + 3.0.1 + + + + + org.springframework.statemachine + spring-statemachine-starter + + + + org.springframework.boot + spring-boot-starter-test + test + + + + + + + org.springframework.statemachine + spring-statemachine-bom + ${spring-statemachine.version} + pom + import + + + + + + + + org.springframework.boot + spring-boot-maven-plugin + + + + + + + spring-snapshots + Spring Snapshots + https://repo.spring.io/snapshot + + true + + + + spring-milestones + Spring Milestones + https://repo.spring.io/milestone + + false + + + + + + + spring-snapshots + Spring Snapshots + https://repo.spring.io/snapshot + + true + + + + spring-milestones + Spring Milestones + https://repo.spring.io/milestone + + false + + + + + +``` + +| |Replace `0.0.1-SNAPSHOT` with a version you want to use.| +|---|--------------------------------------------------------| + +With a normal project structure, you can build this project with the following command: + +``` +# mvn clean package +``` + +The expected Spring Boot-packaged fat-jar would be `target/demo-0.0.1-SNAPSHOT.jar`. + +| |You do not need the `libs-milestone` and `libs-snapshot` repositories for
production development.| +|---|-----------------------------------------------------------------------------------------------------| + +## Developing Your First Spring Statemachine Application + +You can start by creating a simple Spring Boot `Application` class +that implements `CommandLineRunner`. The following example shows how to do so: + +``` +@SpringBootApplication +public class Application implements CommandLineRunner { + + public static void main(String[] args) { + SpringApplication.run(Application.class, args); + } + +} +``` + +Then you need to add states and events, as the following example shows: + +``` +public enum States { + SI, S1, S2 +} + +public enum Events { + E1, E2 +} +``` + +Then you need to add state machine configuration, as the following example shows: + +``` +@Configuration +@EnableStateMachine +public class StateMachineConfig + extends EnumStateMachineConfigurerAdapter { + + @Override + public void configure(StateMachineConfigurationConfigurer config) + throws Exception { + config + .withConfiguration() + .autoStartup(true) + .listener(listener()); + } + + @Override + public void configure(StateMachineStateConfigurer states) + throws Exception { + states + .withStates() + .initial(States.SI) + .states(EnumSet.allOf(States.class)); + } + + @Override + public void configure(StateMachineTransitionConfigurer transitions) + throws Exception { + transitions + .withExternal() + .source(States.SI).target(States.S1).event(Events.E1) + .and() + .withExternal() + .source(States.S1).target(States.S2).event(Events.E2); + } + + @Bean + public StateMachineListener listener() { + return new StateMachineListenerAdapter() { + @Override + public void stateChanged(State from, State to) { + System.out.println("State change to " + to.getId()); + } + }; + } +} +``` + +Then you need to implement `CommandLineRunner` and autowire `StateMachine`. +The following example shows how to do so: + +``` +@Autowired +private StateMachine stateMachine; + +@Override +public void run(String... args) throws Exception { + stateMachine.sendEvent(Events.E1); + stateMachine.sendEvent(Events.E2); +} +``` + +Depending on whether you build your application with `Gradle` or `Maven`, +you can run it by using `java -jar build/libs/gs-statemachine-0.1.0.jar` or`java -jar target/gs-statemachine-0.1.0.jar`, respectively. + +The result of this command should be normal Spring Boot output. +However, you should also find the following lines: + +``` +State change to SI +State change to S1 +State change to S2 +``` + +These lines indicate that the machine you constructed +is moving from one state to another, as it should. + +# What’s New + +## In 1.1 + +Spring Statemachine 1.1 focuses on security and better +interoperability with web applications. It includes the following: + +* Comprehensive support for Spring Security has been added. See [State Machine Security](#sm-security). + +* Context integration with `@WithStateMachine' has been greatly + enhanced. See [Context Integration](#sm-context). + +* `StateContext` is now a first class citizen, letting you + interact with a State Machine. See [Using `StateContext`](#sm-statecontext). + +* Features around persistence have been enhanced with built-in + support for redis. See [Using Redis](#sm-persist-redis). + +* A new feature helps with persist operations. See[Using `StateMachinePersister`](#sm-persist-statemachinepersister). + +* Configuration model classes are now in a public API. + +* New features in timer-based events. + +* New `Junction` pseudostate. See [Junction State](#statemachine-config-states-junction). + +* New Exit Point and Entry Point pseudostates. See [Exit and Entry Point States](#statemachine-config-states-exitentry). + +* Configuration model verifier. + +* New samples. See [Security](#statemachine-examples-security) and [Event Service](#statemachine-examples-eventservice). + +* UI modeling support using Eclipse Papyrus. See [Eclipse Modeling Support](#sm-papyrus). + +## In 1.2 + +Spring Statemachine 1.2 focuses on generic enhancements, better +UML support, and integrations with external config repositories. +It includes the following: + +* Support for UML sub-machines. See [Using a Sub-Machine Reference](#sm-papyrus-submachineref). + +* A new repository abstraction that keeps machine configuration in an + external repository. See [Repository Support](#sm-repository). + +* New support for state actions. See [State Actions](#state-actions). + +* New transition error action concepts. See [Transition Action Error Handling](#statemachine-config-transition-actions-errorhandling). + +* New action error concepts. See [State Action Error Handling](#statemachine-config-state-actions-errorhandling). + +* Initial work for Spring Boot support. See [Spring Boot Support](#sm-boot). + +* Support for tracing and monitoring. See [Monitoring a State Machine](#sm-monitoring). + +### In 1.2.8 + +Spring Statemachine 1.2.8 contains a bit more functionality than normally +not seen in a point release, but these changes did not merit a fork of +Spring Statemachine 1.3. It includes the following: + +* JPA entity classes have changed table names. See [JPA](#sm-repository-config-jpa). + +* A new sample. See [Data Persist](#statemachine-examples-datapersist). + +* New entity classes for persistence. See [Repository Persistence](#sm-repository-persistence). + +* Transition conflict policy. See[Configuring Common Settings](#statemachine-config-commonsettings) + +## In 2.0 + +Spring Statemachine 2.0 focuses on Spring Boot 2.x support. + +### In 2.0.0 + +Spring Statemachine 2.0.0 includes the following: + +* The format of monitoring and tracing has been changed. See [Monitoring and Tracing](#sm-boot-monitoring). + +* The `spring-statemachine-boot` module has been renamed to `spring-statemachine-autoconfigure`. + +## In 3.0 + +Spring Statemachine 3.0.0 focuses on adding a Reactive support. Moving from `2.x` to `3.x` is +introducing some breaking changes which are detailed in [Reactor Migration Guide](#appendix-reactormigrationguide). + +With `3.0.x` we have deprecated all blocking methods which will get removed at some point +in a future releases. + +| |Please read an appendix [Reactor Migration Guide](#appendix-reactormigrationguide) carefully as it will steer you
through a process of migrating into `3.x` for cases we’re not handling internallyl.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +At this point most of a documentation has been changed to showcase reactive interfaces +while we still keep some notes around to users still using old blocking methods. + +# Using Spring Statemachine + +This part of the reference documentation explains the core functionality +that Spring Statemachine provides to any Spring based application. + +It includes the following topics: + +* [Statemachine Configuration](#sm-config) describes the generic configuration support. + +* [State Machine ID](#sm-machineid) describes the use of machine id. + +* [State Machine Factories](#sm-factories) describes the generic state machine factory support. + +* [Using Deferred Events](#sm-deferevents) describes the deferred event support. + +* [Using Scopes](#sm-scopes) describes the scope support. + +* [Using Actions](#sm-actions) describes the actions support. + +* [Using Guards](#sm-guards) describes the guard support. + +* [Using Extended State](#sm-extendedstate) describes the extended state support. + +* [Using `StateContext`](#sm-statecontext) describes the state context support. + +* [Triggering Transitions](#sm-triggers) describes the use of triggers. + +* [Listening to State Machine Events](#sm-listeners) describes the use of state machine listeners. + +* [Context Integration](#sm-context) describes the generic Spring application context support. + +* [Using `StateMachineAccessor`](#sm-accessor) describes the state machine internal accessor support. + +* [Using `StateMachineInterceptor`](#sm-interceptor) describes the state machine error handling support. + +* [State Machine Security](#sm-security) describes the state machine security support. + +* [State Machine Error Handling](#sm-error-handling) describes the state machine interceptor support. + +* [State Machine Services](#sm-service) describes the state machine service support. + +* [Persisting a State Machine](#sm-persist) describes the state machine persisting support. + +* [Spring Boot Support](#sm-boot) describes the Spring Boot support. + +* [Monitoring a State Machine](#sm-monitoring) describes the monitoring and trancing support. + +* [Using Distributed States](#sm-distributed) describes the distributed state machine support. + +* [Testing Support](#sm-test) describes the state machine testing support. + +* [Eclipse Modeling Support](#sm-papyrus) describes the state machine UML modeling support. + +* [Repository Support](#sm-repository) describes the state machine repository config support. + +## Statemachine Configuration + +One of the common tasks when using a state machine is to design its +runtime configuration. This chapter focuses on how Spring +Statemachine is configured and how it leverages Spring’s lightweight +IoC containers to simplify the application internals to make it more +manageable. + +| |Configuration examples in this section are not feature complete. That is,
you always need to have definitions of both states and transitions.
Otherwise, state machine configuration would be ill-formed. We have
simply made code snippets less verbose by leaving other needed parts
out.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### Using `enable` Annotations + +We use two familiar Spring *enabler* annotations to ease configuration:`@EnableStateMachine` and `@EnableStateMachineFactory`. +These annotations, when placed in a `@Configuration` class, enable +some basic functionality needed by a state machine. + +You can use `@EnableStateMachine` when you need a configuration to create an +instance of `StateMachine`. Usually, a `@Configuration` class extends adapters +(`EnumStateMachineConfigurerAdapter` or `StateMachineConfigurerAdapter`), which +lets you override configuration callback methods. We automatically +detect whether you use these adapter classes and modify the runtime configuration +logic accordingly. + +You can use `@EnableStateMachineFactory` when you need a configuration to create an +instance of a `StateMachineFactory`. + +| |Usage examples of these are shown in below sections.| +|---|----------------------------------------------------| + +### Configuring States + +We get into more complex configuration examples a bit later in this guide, but +we first start with something simple. For most simple state +machine, you can use `EnumStateMachineConfigurerAdapter` and define +possible states and choose the initial and optional end states. + +``` +@Configuration +@EnableStateMachine +public class Config1Enums + extends EnumStateMachineConfigurerAdapter { + + @Override + public void configure(StateMachineStateConfigurer states) + throws Exception { + states + .withStates() + .initial(States.S1) + .end(States.SF) + .states(EnumSet.allOf(States.class)); + } + +} +``` + +You can also use strings instead of enumerations as states and +events by using `StateMachineConfigurerAdapter`, as shown in the next example. Most +of the configuration examples ues enumerations, but, generally speaking, +you can interchange strings and enumerations. + +``` +@Configuration +@EnableStateMachine +public class Config1Strings + extends StateMachineConfigurerAdapter { + + @Override + public void configure(StateMachineStateConfigurer states) + throws Exception { + states + .withStates() + .initial("S1") + .end("SF") + .states(new HashSet(Arrays.asList("S1","S2","S3","S4"))); + } + +} +``` + +| |Using enumerations brings a safer set of states and event types but
limits possible combinations to compile time. Strings do not have this
limitation and let you use more dynamic ways to build state
machine configurations but do not allow same level of safety.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### Configuring Hierarchical States + +You can define hierarchical states can by using multiple `withStates()`calls, where you can use `parent()` to indicate that these +particular states are sub-states of some other state. +The following example shows how to do so: + +``` +@Configuration +@EnableStateMachine +public class Config2 + extends EnumStateMachineConfigurerAdapter { + + @Override + public void configure(StateMachineStateConfigurer states) + throws Exception { + states + .withStates() + .initial(States.S1) + .state(States.S1) + .and() + .withStates() + .parent(States.S1) + .initial(States.S2) + .state(States.S2); + } + +} +``` + +### Configuring Regions + +There are no special configuration methods to mark a collection of +states to be part of an orthogonal state. To put it simply, orthogonal +state is created when the same hierarchical state machine has multiple sets +of states, each of which has an initial state. Because an individual state +machine can only have one initial state, multiple initial states must +mean that a specific state must have multiple independent regions. +The following example shows how to define regions: + +``` +@Configuration +@EnableStateMachine +public class Config10 + extends EnumStateMachineConfigurerAdapter { + + @Override + public void configure(StateMachineStateConfigurer states) + throws Exception { + states + .withStates() + .initial(States2.S1) + .state(States2.S2) + .and() + .withStates() + .parent(States2.S2) + .initial(States2.S2I) + .state(States2.S21) + .end(States2.S2F) + .and() + .withStates() + .parent(States2.S2) + .initial(States2.S3I) + .state(States2.S31) + .end(States2.S3F); + } + +} +``` + +When persisting machines with regions or generally +relying on any functionalities to reset a machine, you may need +to have a dedicated ID for a region. By default, this ID +is a generated UUID. As the following example shows, `StateConfigurer` has +a method called `region(String id)` that lets you set the ID for a region: + +``` +@Configuration +@EnableStateMachine +public class Config10RegionId + extends EnumStateMachineConfigurerAdapter { + + @Override + public void configure(StateMachineStateConfigurer states) + throws Exception { + states + .withStates() + .initial(States2.S1) + .state(States2.S2) + .and() + .withStates() + .parent(States2.S2) + .region("R1") + .initial(States2.S2I) + .state(States2.S21) + .end(States2.S2F) + .and() + .withStates() + .parent(States2.S2) + .region("R2") + .initial(States2.S3I) + .state(States2.S31) + .end(States2.S3F); + } + +} +``` + +### Configuring Transitions + +We support three different types of transitions: `external`,`internal`, and `local`. Transitions are triggered either by a signal +(which is an event sent into a state machine) or by a timer. +The following example shows how to define all three kinds of transitions: + +``` +@Configuration +@EnableStateMachine +public class Config3 + extends EnumStateMachineConfigurerAdapter { + + @Override + public void configure(StateMachineStateConfigurer states) + throws Exception { + states + .withStates() + .initial(States.S1) + .states(EnumSet.allOf(States.class)); + } + + @Override + public void configure(StateMachineTransitionConfigurer transitions) + throws Exception { + transitions + .withExternal() + .source(States.S1).target(States.S2) + .event(Events.E1) + .and() + .withInternal() + .source(States.S2) + .event(Events.E2) + .and() + .withLocal() + .source(States.S2).target(States.S3) + .event(Events.E3); + } + +} +``` + +### Configuring Guards + +You can use guards to protect state transitions. You can use the `Guard` interface +to do an evaluation where a method has access to a `StateContext`. +The following example shows how to do so: + +``` +@Configuration +@EnableStateMachine +public class Config4 + extends EnumStateMachineConfigurerAdapter { + + @Override + public void configure(StateMachineTransitionConfigurer transitions) + throws Exception { + transitions + .withExternal() + .source(States.S1).target(States.S2) + .event(Events.E1) + .guard(guard()) + .and() + .withExternal() + .source(States.S2).target(States.S3) + .event(Events.E2) + .guardExpression("true"); + + } + + @Bean + public Guard guard() { + return new Guard() { + + @Override + public boolean evaluate(StateContext context) { + return true; + } + }; + } + +} +``` + +In the preceding example, we used two different types of guard configurations. First, we +created a simple `Guard` as a bean and attached it to the transition between +states `S1` and `S2`. + +Second, we used a SPeL expression as a guard to dicate that the +expression must return a `BOOLEAN` value. Behind the scenes, this +expression-based guard is a `SpelExpressionGuard`. We attached it to +the transition between states `S2` and `S3`. Both guards +always evaluate to `true`. + +### Configuring Actions + +You can define actions to be executed with transitions and states. +An action is always run as a result of a transition that +originates from a trigger. The following example shows how to define an action: + +``` +@Configuration +@EnableStateMachine +public class Config51 + extends EnumStateMachineConfigurerAdapter { + + @Override + public void configure(StateMachineTransitionConfigurer transitions) + throws Exception { + transitions + .withExternal() + .source(States.S1) + .target(States.S2) + .event(Events.E1) + .action(action()); + } + + @Bean + public Action action() { + return new Action() { + + @Override + public void execute(StateContext context) { + // do something + } + }; + } + +} +``` + +In the preceding example, a single `Action` is defined as a bean named `action` and associated +with a transition from `S1` to `S2`. +The following example shows how to use an action multiple times: + +``` +@Configuration +@EnableStateMachine +public class Config52 + extends EnumStateMachineConfigurerAdapter { + + @Override + public void configure(StateMachineStateConfigurer states) + throws Exception { + states + .withStates() + .initial(States.S1, action()) + .state(States.S1, action(), null) + .state(States.S2, null, action()) + .state(States.S2, action()) + .state(States.S3, action(), action()); + } + + @Bean + public Action action() { + return new Action() { + + @Override + public void execute(StateContext context) { + // do something + } + }; + } + +} +``` + +| |Usually, you would not define the same `Action` instance for different
stages, but we did it here to not make too much noise in a code
snippet.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------| + +In the preceding example, a single `Action` is defined by the bean named `action` and associated +with states `S1`, `S2`, and `S3`. We need to clarify what is going on here: + +* We defined an action for the initial state, `S1`. + +* We defined an entry action for state `S1` and left the exit action empty. + +* We defined an exit action for state `S2` and left the entry action empty. + +* We defined a single state action for state `S2`. + +* We defined both entry and exit actions for state `S3`. + +* Note that state `S1` is used twice with `initial()` and `state()`functions. You need to do this only if you want to define entry or exit + actions with initial state. + +| |Defining action with `initial()` function only runs a particular
action when a state machine or sub state is started. This action
is an initializing action that is run only once. An action defined
with `state()` is then run if the state machine transitions back
and forward between initial and non-initial states.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### State Actions + +State actions are run differently compared to entry and exit +actions, because execution happens after state has been entered +and can be cancelled if state exit happens before a particular action +has been completed. + +State actions are executed using normal reactive flow by subscribing with +a Reactor’s default parallel scheduler. This means that, whatever you do in your +action, you need to be able to catch `InterruptedException` or, more generally, +periodically check whether `Thread` is interrupted. + +The following example shows typical configuration that uses default the `IMMEDIATE_CANCEL`, which +would immediately cancel a running task when its state is complete: + +``` +@Configuration +@EnableStateMachine +static class Config1 extends StateMachineConfigurerAdapter { + + @Override + public void configure(StateMachineConfigurationConfigurer config) throws Exception { + config + .withConfiguration() + .stateDoActionPolicy(StateDoActionPolicy.IMMEDIATE_CANCEL); + } + + @Override + public void configure(StateMachineStateConfigurer states) throws Exception { + states + .withStates() + .initial("S1") + .state("S2", context -> {}) + .state("S3"); + } + + @Override + public void configure(StateMachineTransitionConfigurer transitions) throws Exception { + transitions + .withExternal() + .source("S1") + .target("S2") + .event("E1") + .and() + .withExternal() + .source("S2") + .target("S3") + .event("E2"); + } +} +``` + +You can set a policy to `TIMEOUT_CANCEL` together with a global timeout +for each machine. This changes state behavior to await action completion +before cancelation is requested. The following example shows how to do so: + +``` +@Override +public void configure(StateMachineConfigurationConfigurer config) throws Exception { + config + .withConfiguration() + .stateDoActionPolicy(StateDoActionPolicy.TIMEOUT_CANCEL) + .stateDoActionPolicyTimeout(10, TimeUnit.SECONDS); +} +``` + +If `Event` directly takes a machine into a state so that event headers +are available to a particular action, you can also use a dedicated +event header to set a specific timeout (defined in `millis`). +You can use the reserved header value `StateMachineMessageHeaders.HEADER_DO_ACTION_TIMEOUT`for this purpose. The following example shows how to do so: + +``` +@Autowired +StateMachine stateMachine; + +void sendEventUsingTimeout() { + stateMachine + .sendEvent(Mono.just(MessageBuilder + .withPayload("E1") + .setHeader(StateMachineMessageHeaders.HEADER_DO_ACTION_TIMEOUT, 5000) + .build())) + .subscribe(); + +} +``` + +#### Transition Action Error Handling + +You can always catch exceptions manually. However, with actions defined for +transitions, you can define an error action that is called if an +exception is raised. The exception is then available from a `StateContext`passed to that action. The following example shows how to create a state +that handles an exception: + +``` +@Configuration +@EnableStateMachine +public class Config53 + extends EnumStateMachineConfigurerAdapter { + + @Override + public void configure(StateMachineTransitionConfigurer transitions) + throws Exception { + transitions + .withExternal() + .source(States.S1) + .target(States.S2) + .event(Events.E1) + .action(action(), errorAction()); + } + + @Bean + public Action action() { + return new Action() { + + @Override + public void execute(StateContext context) { + throw new RuntimeException("MyError"); + } + }; + } + + @Bean + public Action errorAction() { + return new Action() { + + @Override + public void execute(StateContext context) { + // RuntimeException("MyError") added to context + Exception exception = context.getException(); + exception.getMessage(); + } + }; + } + +} +``` + +If need be, you can manually create similar logic for every action. +The following example shows how to do so: + +``` +@Override +public void configure(StateMachineTransitionConfigurer transitions) + throws Exception { + transitions + .withExternal() + .source(States.S1) + .target(States.S2) + .event(Events.E1) + .action(Actions.errorCallingAction(action(), errorAction())); +} +``` + +#### State Action Error Handling + +Logic similar to the logic that handles errors in state transitions is also available +for entry to a state and exit from a state. + +For these situations, `StateConfigurer` has methods called `stateEntry`, `stateDo`, and`stateExit`. These methods define an `error` action together with a normal (non-error) `action`. +The following example shows how to use all three methods: + +``` +@Configuration +@EnableStateMachine +public class Config55 + extends EnumStateMachineConfigurerAdapter { + + @Override + public void configure(StateMachineStateConfigurer states) + throws Exception { + states + .withStates() + .initial(States.S1) + .stateEntry(States.S2, action(), errorAction()) + .stateDo(States.S2, action(), errorAction()) + .stateExit(States.S2, action(), errorAction()) + .state(States.S3); + } + + @Bean + public Action action() { + return new Action() { + + @Override + public void execute(StateContext context) { + throw new RuntimeException("MyError"); + } + }; + } + + @Bean + public Action errorAction() { + return new Action() { + + @Override + public void execute(StateContext context) { + // RuntimeException("MyError") added to context + Exception exception = context.getException(); + exception.getMessage(); + } + }; + } +} +``` + +### Configuring Pseudo States + +*Pseudo state* configuration is usually done by configuring states and +transitions. Pseudo states are automatically added to state machine as +states. + +#### Initial State + +You can mark a particular state as initial state by using the `initial()`method. This initial action is good, for example, to initialize +extended state variables. The following example shows how to use the `initial()` method: + +``` +@Configuration +@EnableStateMachine +public class Config11 + extends EnumStateMachineConfigurerAdapter { + + @Override + public void configure(StateMachineStateConfigurer states) + throws Exception { + states + .withStates() + .initial(States.S1, initialAction()) + .end(States.SF) + .states(EnumSet.allOf(States.class)); + } + + @Bean + public Action initialAction() { + return new Action() { + + @Override + public void execute(StateContext context) { + // do something initially + } + }; + } + +} +``` + +#### Terminate State + +You can mark a particular state as being an end state by using the `end()` method. +You can do so at most once for each individual sub-machine or region. +The following example shows how to use the `end()` method: + +``` +@Configuration +@EnableStateMachine +public class Config1Enums + extends EnumStateMachineConfigurerAdapter { + + @Override + public void configure(StateMachineStateConfigurer states) + throws Exception { + states + .withStates() + .initial(States.S1) + .end(States.SF) + .states(EnumSet.allOf(States.class)); + } + +} +``` + +#### State History + +You can define state history once for each individual state machine. +You need to choose its state identifier and set either `History.SHALLOW` or`History.DEEP`. The following example uses `History.SHALLOW`: + +``` +@Configuration +@EnableStateMachine +public class Config12 + extends EnumStateMachineConfigurerAdapter { + + @Override + public void configure(StateMachineStateConfigurer states) + throws Exception { + states + .withStates() + .initial(States3.S1) + .state(States3.S2) + .and() + .withStates() + .parent(States3.S2) + .initial(States3.S2I) + .state(States3.S21) + .state(States3.S22) + .history(States3.SH, History.SHALLOW); + } + + @Override + public void configure(StateMachineTransitionConfigurer transitions) + throws Exception { + transitions + .withHistory() + .source(States3.SH) + .target(States3.S22); + } + +} +``` + +Also, as the preceding example shows, you can optionally define a default +transition from a history state into a state vertex in a same machine. +This transition takes place as a default if, for example, the machine has +never been entered — thus, no history would be available. If a default +state transition is not defined, then normal entry into a region is +done. This default transition is also used if a machine’s history is +a final state. + +#### Choice State + +Choice needs to be defined in both states and transitions to work +properly. You can mark a particular state as being a choice state by using the `choice()`method. This state needs to match source state when a transition is +configured for this choice. + +You can configure a transition by using `withChoice()`, where you define source +state and a `first/then/last` structure, which is equivalent to a normal`if/elseif/else`. With `first` and `then`, you can specify a guard just +as you would use a condition with `if/elseif` clauses. + +A transition needs to be able to exist, so you must make sure to use `last`. +Otherwise, the configuration is ill-formed. The following example shows how to define +a choice state: + +``` +@Configuration +@EnableStateMachine +public class Config13 + extends EnumStateMachineConfigurerAdapter { + + @Override + public void configure(StateMachineStateConfigurer states) + throws Exception { + states + .withStates() + .initial(States.SI) + .choice(States.S1) + .end(States.SF) + .states(EnumSet.allOf(States.class)); + } + + @Override + public void configure(StateMachineTransitionConfigurer transitions) + throws Exception { + transitions + .withChoice() + .source(States.S1) + .first(States.S2, s2Guard()) + .then(States.S3, s3Guard()) + .last(States.S4); + } + + @Bean + public Guard s2Guard() { + return new Guard() { + + @Override + public boolean evaluate(StateContext context) { + return false; + } + }; + } + + @Bean + public Guard s3Guard() { + return new Guard() { + + @Override + public boolean evaluate(StateContext context) { + return true; + } + }; + } + +} +``` + +Actions can be run with both incoming and outgoing transitions of +a choice pseudostate. As the following example shows, one dummy lambda +action is defined that leads into a choice state and one similar dummy +lambda action is defined for one outgoing transition (where it also +defines an error action): + +``` +@Configuration +@EnableStateMachine +public class Config23 + extends EnumStateMachineConfigurerAdapter { + + @Override + public void configure(StateMachineStateConfigurer states) + throws Exception { + states + .withStates() + .initial(States.SI) + .choice(States.S1) + .end(States.SF) + .states(EnumSet.allOf(States.class)); + } + + @Override + public void configure(StateMachineTransitionConfigurer transitions) + throws Exception { + transitions + .withExternal() + .source(States.SI) + .action(c -> { + // action with SI-S1 + }) + .target(States.S1) + .and() + .withChoice() + .source(States.S1) + .first(States.S2, c -> { + return true; + }) + .last(States.S3, c -> { + // action with S1-S3 + }, c -> { + // error callback for action S1-S3 + }); + } +} +``` + +| |Junction have same api format meaning actions can be defined
similarly.| +|---|---------------------------------------------------------------------------| + +#### Junction State + +You need to define a junction in both states and transitions for it to work +properly. You can mark a particular state as being a choice state by using the `junction()`method. This state needs to match the source state when a transition is +configured for this choice. + +You can configure the transition by using `withJunction()` where you define source +state and a `first/then/last` structure (which is equivalent to a normal`if/elseif/else`). With `first` and `then`, you can specify a guard as +you would use a condition with `if/elseif` clauses. + +A transition needs to be able to exist, so you must make sure to use `last`. +Otherwise, the configuration is ill-formed. +The following example uses a junction: + +``` +@Configuration +@EnableStateMachine +public class Config20 + extends EnumStateMachineConfigurerAdapter { + + @Override + public void configure(StateMachineStateConfigurer states) + throws Exception { + states + .withStates() + .initial(States.SI) + .junction(States.S1) + .end(States.SF) + .states(EnumSet.allOf(States.class)); + } + + @Override + public void configure(StateMachineTransitionConfigurer transitions) + throws Exception { + transitions + .withJunction() + .source(States.S1) + .first(States.S2, s2Guard()) + .then(States.S3, s3Guard()) + .last(States.S4); + } + + @Bean + public Guard s2Guard() { + return new Guard() { + + @Override + public boolean evaluate(StateContext context) { + return false; + } + }; + } + + @Bean + public Guard s3Guard() { + return new Guard() { + + @Override + public boolean evaluate(StateContext context) { + return true; + } + }; + } + +} +``` + +| |The difference between choice and junction is purely academic, as both are
implemented with `first/then/last` structures . However, in theory, based
on UML modeling, `choice` allows only one incoming transition while`junction` allows multiple incoming transitions. At a code level, the
functionality is pretty much identical.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### Fork State + +You must define a fork in both states and transitions for it to work +properly. You can mark a particular state as being a choice state by using the `fork()`method. This state needs to match source state when a transition is +configured for this fork. + +The target state needs to be a super state or an immediate state in a +regions. Using a super state as a target takes all regions into +initial states. Targeting individual state gives more controlled entry +into regions. The following example uses a fork: + +``` +@Configuration +@EnableStateMachine +public class Config14 + extends EnumStateMachineConfigurerAdapter { + + @Override + public void configure(StateMachineStateConfigurer states) + throws Exception { + states + .withStates() + .initial(States2.S1) + .fork(States2.S2) + .state(States2.S3) + .and() + .withStates() + .parent(States2.S3) + .initial(States2.S2I) + .state(States2.S21) + .state(States2.S22) + .end(States2.S2F) + .and() + .withStates() + .parent(States2.S3) + .initial(States2.S3I) + .state(States2.S31) + .state(States2.S32) + .end(States2.S3F); + } + + @Override + public void configure(StateMachineTransitionConfigurer transitions) + throws Exception { + transitions + .withFork() + .source(States2.S2) + .target(States2.S22) + .target(States2.S32); + } + +} +``` + +#### Join State + +You must define a join in both states and transitions for it to work +properly. You can mark aparticular state as being a choice state by using the `join()`method. This state does not need to match either source states or a +target state in a transition configuration. + +You can select a target state where a transition goes when all source states +have been joined. If you use state hosting regions as the source, the end +states of a region are used as joins. Otherwise, you can pick any +states from a region. The following exmaple uses a join: + +``` +@Configuration +@EnableStateMachine +public class Config15 + extends EnumStateMachineConfigurerAdapter { + + @Override + public void configure(StateMachineStateConfigurer states) + throws Exception { + states + .withStates() + .initial(States2.S1) + .state(States2.S3) + .join(States2.S4) + .state(States2.S5) + .and() + .withStates() + .parent(States2.S3) + .initial(States2.S2I) + .state(States2.S21) + .state(States2.S22) + .end(States2.S2F) + .and() + .withStates() + .parent(States2.S3) + .initial(States2.S3I) + .state(States2.S31) + .state(States2.S32) + .end(States2.S3F); + } + + @Override + public void configure(StateMachineTransitionConfigurer transitions) + throws Exception { + transitions + .withJoin() + .source(States2.S2F) + .source(States2.S3F) + .target(States2.S4) + .and() + .withExternal() + .source(States2.S4) + .target(States2.S5); + } +} +``` + +You can also have multiple transitions originate from a +join state. It this case, we advise you to use guards and define your guards +such that only one guard evaluates to `TRUE` at any given time. Otherwise, +transition behavior is not predictable. This is shown in the following example, where the guard +checks whether the extended state has variables: + +``` +@Configuration +@EnableStateMachine +public class Config22 + extends EnumStateMachineConfigurerAdapter { + + @Override + public void configure(StateMachineStateConfigurer states) + throws Exception { + states + .withStates() + .initial(States2.S1) + .state(States2.S3) + .join(States2.S4) + .state(States2.S5) + .end(States2.SF) + .and() + .withStates() + .parent(States2.S3) + .initial(States2.S2I) + .state(States2.S21) + .state(States2.S22) + .end(States2.S2F) + .and() + .withStates() + .parent(States2.S3) + .initial(States2.S3I) + .state(States2.S31) + .state(States2.S32) + .end(States2.S3F); + } + + @Override + public void configure(StateMachineTransitionConfigurer transitions) + throws Exception { + transitions + .withJoin() + .source(States2.S2F) + .source(States2.S3F) + .target(States2.S4) + .and() + .withExternal() + .source(States2.S4) + .target(States2.S5) + .guardExpression("!extendedState.variables.isEmpty()") + .and() + .withExternal() + .source(States2.S4) + .target(States2.SF) + .guardExpression("extendedState.variables.isEmpty()"); + } +} +``` + +#### Exit and Entry Point States + +You can use exit and entry points to do more controlled exit and entry +from and into a submachine. +The following example uses the `withEntry` and `withExit` methods to define entry points: + +``` +@Configuration +@EnableStateMachine +static class Config21 extends StateMachineConfigurerAdapter { + + @Override + public void configure(StateMachineStateConfigurer states) + throws Exception { + states + .withStates() + .initial("S1") + .state("S2") + .state("S3") + .and() + .withStates() + .parent("S2") + .initial("S21") + .entry("S2ENTRY") + .exit("S2EXIT") + .state("S22"); + } + + @Override + public void configure(StateMachineTransitionConfigurer transitions) + throws Exception { + transitions + .withExternal() + .source("S1").target("S2") + .event("E1") + .and() + .withExternal() + .source("S1").target("S2ENTRY") + .event("ENTRY") + .and() + .withExternal() + .source("S22").target("S2EXIT") + .event("EXIT") + .and() + .withEntry() + .source("S2ENTRY").target("S22") + .and() + .withExit() + .source("S2EXIT").target("S3"); + } +} +``` + +As shown in the preceding, you need to mark particular states as being `exit` and`entry` states. Then you create a normal transitions into those states +and also specify `withExit()` and `withEntry()`, where those states +exit and entry respectively. + +### Configuring Common Settings + +You can set part of a common state machine configuration by using`ConfigurationConfigurer`. With it you can set `BeanFactory` and an autostart flag +for a state machine. It also lets you register `StateMachineListener` instances, +configure transition conflict policy and region execution policy. +The following example shows how to use `ConfigurationConfigurer`: + +``` +@Configuration +@EnableStateMachine +public class Config17 + extends EnumStateMachineConfigurerAdapter { + + @Override + public void configure(StateMachineConfigurationConfigurer config) + throws Exception { + config + .withConfiguration() + .autoStartup(true) + .machineId("myMachineId") + .beanFactory(new StaticListableBeanFactory()) + .listener(new StateMachineListenerAdapter()) + .transitionConflictPolicy(TransitionConflictPolicy.CHILD) + .regionExecutionPolicy(RegionExecutionPolicy.PARALLEL); + } +} +``` + +By default, the state machine `autoStartup` flag is disabled, because all +instances that handle sub-states are controlled by the state machine itself +and cannot be automatically started. Also, it is much safer to leave +whether a machine should be started +automatically or not to the user. This flag controls only the autostart of a +top-level state machine. + +Setting `machineId` within a configuration class is simply a convenience for those times when +you want or need to do it there. + +Registering `StateMachineListener` instances is also partly for +convenience but is required if you want to catch a callback during a +state machine lifecycle, such as getting notified of a state machine’s +start and stop events. Note that you cannot listen a state +machine’s start events if `autoStartup` is enabled, unless you register a listener +during a configuration phase. + +You can use `transitionConflictPolicy` when multiple +transition paths could be selected. One usual use case for this is when a +machine contains anonymous transitions that lead out from a sub-state +and a parent state and you want to define a policy in which one is +selected. This is a global setting within a machine instance and +defaults to `CHILD`. + +You can use `withDistributed()` to configure `DistributedStateMachine`. It +lets you set a `StateMachineEnsemble`, which (if it exists) automatically +wraps any created `StateMachine` with `DistributedStateMachine` and +enables distributed mode. The following example shows how to use it: + +``` +@Configuration +@EnableStateMachine +public class Config18 + extends EnumStateMachineConfigurerAdapter { + + @Override + public void configure(StateMachineConfigurationConfigurer config) + throws Exception { + config + .withDistributed() + .ensemble(stateMachineEnsemble()); + } + + @Bean + public StateMachineEnsemble stateMachineEnsemble() + throws Exception { + // naturally not null but should return ensemble instance + return null; + } +} +``` + +For more about distributed states, see [Using Distributed States](#sm-distributed). + +The `StateMachineModelVerifier` interface is used internally to +do some sanity checks for a state machine’s structure. Its purpose is to +fail fast early instead of letting common configuration errors into a +state machine. By default, a verifier is automatically enabled and the`DefaultStateMachineModelVerifier` implementation is used. + +With `withVerifier()`, you can disable verifier or set a custom one if +needed. The following example shows how to do so: + +``` +@Configuration +@EnableStateMachine +public class Config19 + extends EnumStateMachineConfigurerAdapter { + + @Override + public void configure(StateMachineConfigurationConfigurer config) + throws Exception { + config + .withVerifier() + .enabled(true) + .verifier(verifier()); + } + + @Bean + public StateMachineModelVerifier verifier() { + return new StateMachineModelVerifier() { + + @Override + public void verify(StateMachineModel model) { + // throw exception indicating malformed model + } + }; + } +} +``` + +For more about config model, see [StateMachine Config Model](#devdocs-configmodel). + +| |The `withSecurity`, `withMonitoring` and `withPersistence` configuration methods
are documented in [State Machine Security](#sm-security), [Monitoring a State Machine](#sm-monitoring), and[Using `StateMachineRuntimePersister`](#sm-persist-statemachineruntimepersister), respectively.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### Configuring Model + +`StateMachineModelFactory` is a hook that lets you configure a statemachine model +without using a manual configuration. Essentially, it is a third-party +integration to integrate into a configuration model. +You can hook `StateMachineModelFactory` into a configuration model by +using a `StateMachineModelConfigurer`. The following example shows how to do so: + +``` +@Configuration +@EnableStateMachine +public static class Config1 extends StateMachineConfigurerAdapter { + + @Override + public void configure(StateMachineModelConfigurer model) throws Exception { + model + .withModel() + .factory(modelFactory()); + } + + @Bean + public StateMachineModelFactory modelFactory() { + return new CustomStateMachineModelFactory(); + } +} +``` + +The follwoing example uses `CustomStateMachineModelFactory` to +define two states (`S1` and `S2`) and an event (`E1`) between those +states: + +``` +public static class CustomStateMachineModelFactory implements StateMachineModelFactory { + + @Override + public StateMachineModel build() { + ConfigurationData configurationData = new ConfigurationData<>(); + Collection> stateData = new ArrayList<>(); + stateData.add(new StateData("S1", true)); + stateData.add(new StateData("S2")); + StatesData statesData = new StatesData<>(stateData); + Collection> transitionData = new ArrayList<>(); + transitionData.add(new TransitionData("S1", "S2", "E1")); + TransitionsData transitionsData = new TransitionsData<>(transitionData); + StateMachineModel stateMachineModel = new DefaultStateMachineModel(configurationData, + statesData, transitionsData); + return stateMachineModel; + } + + @Override + public StateMachineModel build(String machineId) { + return build(); + } +} +``` + +| |Defining a custom model is usually not what people are looking for,
although it is possible. However, it is a central concept of allowing
external access to this configuration model.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +You can find an example of using this model factory integration in[Eclipse Modeling Support](#sm-papyrus). You can find more generic info about custom model integration +in [Developer Documentation](#devdocs). + +### Things to Remember + +When defining actions, guards, or any other references from a +configuration, it pays to remember how Spring Framework works +with beans. In the next example, we have defined a normal configuration with +states `S1` and `S2` and four transitions between those. All transitions +are guarded by either `guard1` or `guard2`. You must ensure that`guard1` is created as a real bean because it is annotated with`@Bean`, while `guard2` is not. + +This means that event `E3` would get the `guard2` condition as`TRUE`, and `E4` would get the `guard2` condition as `FALSE`, because those are +coming from plain method calls to those functions. + +However, because `guard1` is defined as a `@Bean`, it is proxied by the +Spring Framework. Thus, additional calls to its method result in +only one instantiation of that instance. Event `E1` would first get the +proxied instance with condition `TRUE`, while event `E2` would get the same +instance with `TRUE` condition when the method call was defined with`FALSE`. This is not a Spring State Machine-specific behavior. Rather, it is +how Spring Framework works with beans. +The following example shows how this arrangement works: + +``` +@Configuration +@EnableStateMachine +public class Config1 + extends StateMachineConfigurerAdapter { + + @Override + public void configure(StateMachineStateConfigurer states) + throws Exception { + states + .withStates() + .initial("S1") + .state("S2"); + } + + @Override + public void configure(StateMachineTransitionConfigurer transitions) + throws Exception { + transitions + .withExternal() + .source("S1").target("S2").event("E1").guard(guard1(true)) + .and() + .withExternal() + .source("S1").target("S2").event("E2").guard(guard1(false)) + .and() + .withExternal() + .source("S1").target("S2").event("E3").guard(guard2(true)) + .and() + .withExternal() + .source("S1").target("S2").event("E4").guard(guard2(false)); + } + + @Bean + public Guard guard1(final boolean value) { + return new Guard() { + @Override + public boolean evaluate(StateContext context) { + return value; + } + }; + } + + public Guard guard2(final boolean value) { + return new Guard() { + @Override + public boolean evaluate(StateContext context) { + return value; + } + }; + } +} +``` + +## State Machine ID + +Various classes and interfaces use `machineId` either as a variable or as a +parameter in methods. This section takes a closer look at how`machineId` relates to normal machine operation and instantiation. + +During runtime, a `machineId` really does not have any big operational +role except to distinguish machines from each other — for example, when +following logs or doing deeper debugging. Having a lot of different +machine instances quickly gets developers lost in translation if there is +no easy way to identify these instances. As a result, we added the option to set the`machineId`. + +### Using `@EnableStateMachine` + +Setting `machineId` in Java configuration as `mymachine` then exposes that value +for logs. This same `machineId` is also available from the`StateMachine.getId()` method. The following example uses the `machineId` method: + +``` +@Override +public void configure(StateMachineConfigurationConfigurer config) + throws Exception { + config + .withConfiguration() + .machineId("mymachine"); +} +``` + +The following example of log output shows the `mymachine` ID: + +``` +11:23:54,509 INFO main support.LifecycleObjectSupport [main] - +started S2 S1 / S1 / uuid=8fe53d34-8c85-49fd-a6ba-773da15fcaf1 / id=mymachine +``` + +| |The manual builder (see [State Machine through a Builder](#state-machine-via-builder)) uses the same configuration
interface, meaning that the behavior is equivalent.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### Using `@EnableStateMachineFactory` + +You can see the same `machineId` getting configured if you use a`StateMachineFactory` and request a new machine by using that ID, +as the following example shows: + +``` +StateMachineFactory factory = context.getBean(StateMachineFactory.class); +StateMachine machine = factory.getStateMachine("mymachine"); +``` + +### Using `StateMachineModelFactory` + +Behind the scenes, all machine configurations are first translated into a`StateMachineModel` so that `StateMachineFactory` need not know +from where the configuration originated, as a machine can be built from +Java configuration, UML, or a repository. If you want to go crazy, you can also use a custom`StateMachineModel`, which is the lowest possible +level at which to define configuration. + +What do all of these have to do with a `machineId`?`StateMachineModelFactory` also has a method with the following signature:`StateMachineModel build(String machineId)` which a `StateMachineModelFactory`implementation may choose to use. + +`RepositoryStateMachineModelFactory` (see [Repository Support](#sm-repository)) uses`machineId` to support different configurations in a persistent +store through Spring Data Repository interfaces. For example, both`StateRepository` and `TransitionRepository` have a method (`List +findByMachineId(String machineId)`), to build different states and +transitions by a `machineId`. With`RepositoryStateMachineModelFactory`, if `machineId` is used as empty +or NULL, it defaults to repository configuration (in a backing-persistent model) +without a known machine id. + +| |Currently, `UmlStateMachineModelFactory` does not distinguish between
different machine IDs, as UML source is always coming from the same
file. This may change in future releases.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +## State Machine Factories + +There are use cases when a state machine needs to be created dynamically +instead of by defining static configuration at compile time. For example, +if there are custom components that use their own state machines +and these components are created dynamically, it is impossible to have +a static state machine that is built during the application start. Internally, +state machines are always built through factory interfaces. This then +gives you an option to use this feature programmatically. +Configuration for a state machine factory is exactly the same as shown +in various examples in this document where state machine configuration +is hard coded. + +### Factory through an Adapter + +Actually creating a state machine by using `@EnableStateMachine`works through a factory, so `@EnableStateMachineFactory` merely exposes +that factory through its interface. The following example uses`@EnableStateMachineFactory`: + +``` +@Configuration +@EnableStateMachineFactory +public class Config6 + extends EnumStateMachineConfigurerAdapter { + + @Override + public void configure(StateMachineStateConfigurer states) + throws Exception { + states + .withStates() + .initial(States.S1) + .end(States.SF) + .states(EnumSet.allOf(States.class)); + } + +} +``` + +Now that you have used `@EnableStateMachineFactory` to create a factory +instead of a state machine bean, you can inject it and use it (as is) to +request new state machines. The following example shows how to do so: + +``` +public class Bean3 { + + @Autowired + StateMachineFactory factory; + + void method() { + StateMachine stateMachine = factory.getStateMachine(); + stateMachine.startReactively().subscribe(); + } +} +``` + +#### Adapter Factory Limitations + +The current limitation of factory is that all the actions and guard with which it +associates a state machine share the same instance. +This means that, from your actions and guard, you need to +specifically handle the case in which the same bean is called by different +state machines. This limitation is something that will be resolved in +future releases. + +### State Machine through a Builder + +Using adapters (as shown above) has a limitation imposed by its +requirement to work through Spring `@Configuration` classes and the +application context. While this is a very clear model to configure a +state machine, it limits configuration at compile time, +which is not always what a user wants to do. If there is a requirement +to build more dynamic state machines, you can use a simple builder pattern +to construct similar instances. By using strings as states and +events, you can use this builder pattern to build fully dynamic state +machines outside of a Spring application context. The following example +shows how to do so: + +``` +StateMachine buildMachine1() throws Exception { + Builder builder = StateMachineBuilder.builder(); + builder.configureStates() + .withStates() + .initial("S1") + .end("SF") + .states(new HashSet(Arrays.asList("S1","S2","S3","S4"))); + return builder.build(); +} +``` + +The builder uses the same configuration interfaces behind the scenes that +the `@Configuration` model uses for adapter classes. The same model goes to +configuring transitions, states, and common configuration through a builder’s +methods. This means that whatever you can use with a normal`EnumStateMachineConfigurerAdapter` or `StateMachineConfigurerAdapter`you can use dynamically through a builder. + +| |Currently, the `builder.configureStates()`, `builder.configureTransitions()`,
and `builder.configureConfiguration()` interface methods cannot be
chained together, meaning that builder methods need to be called individually.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The following example sets a number of options with a builder: + +``` +StateMachine buildMachine2() throws Exception { + Builder builder = StateMachineBuilder.builder(); + builder.configureConfiguration() + .withConfiguration() + .autoStartup(false) + .beanFactory(null) + .listener(null); + return builder.build(); +} +``` + +You need to understand when common configuration needs +to be used with machines instantiated from a builder. You can use a configurer +returned from a `withConfiguration()` to setup `autoStart` and `BeanFactory`. +You can also use one to register a `StateMachineListener`. If a `StateMachine`instance returned from a builder is registered as a bean by using `@Bean`, `BeanFactory`is attached automatically. If you use instances outside of a spring application context, +you must use these methods to set up the needed facilities. + +## Using Deferred Events + +When an event is sent, it may fire an `EventTrigger`, which may then cause +a transition to happen, if a state machine is in a state where a trigger is +evaluated successfully. Normally, this may lead to a situation where +an event is not accepted and is dropped. However, you may wish +postpone this event until a state machine enters another state. In that case, +you can accept that event. In other words, an event +arrives at an inconvenient time. + +Spring Statemachine provides a mechanism for deferring events for later +processing. Every state can have a list of deferred events. If an event +in the current state’s deferred event list occurs, the event is saved +(deferred) for future processing until a state is entered that does not list +the event in its deferred event list. When such a state is entered, the +state machine automatically recalls any saved events that are no longer +deferred and then either consumes or discards these events. It is possible +for a superstate to have a transition defined on an event that is deferred +by a substate. Following same hierarchical state machines concepts, the substate +takes precedence over the superstate, the event is deferred, and the +transition for the superstate is not run. With orthogonal regions, +where one orthogonal region defers an event and another accepts the event, the +accept takes precedence and the event is consumed and not deferred. + +The most obvious use case for event deferring is when an event causes +a transition into a particular state and the state machine is then returned back +to its original state where a second event should cause the same +transition. The following example shows this situation: + +``` +@Configuration +@EnableStateMachine +static class Config5 extends StateMachineConfigurerAdapter { + + @Override + public void configure(StateMachineStateConfigurer states) + throws Exception { + states + .withStates() + .initial("READY") + .state("DEPLOYPREPARE", "DEPLOY") + .state("DEPLOYEXECUTE", "DEPLOY"); + } + + @Override + public void configure(StateMachineTransitionConfigurer transitions) + throws Exception { + transitions + .withExternal() + .source("READY").target("DEPLOYPREPARE") + .event("DEPLOY") + .and() + .withExternal() + .source("DEPLOYPREPARE").target("DEPLOYEXECUTE") + .and() + .withExternal() + .source("DEPLOYEXECUTE").target("READY"); + } +} +``` + +In the preceding example, the state machine has a state of `READY`, which indicates that the machine is +ready to process events that would take it into a `DEPLOY` state, where the +actual deployment would happen. After a deploy action has been run, the machine +is returned back to the `READY` state. Sending multiple events in a`READY` state does not cause any trouble if the machine is using synchronous executors, +because event sending would block between event calls. However, if the executor uses +threads, other events may get lost, because the machine is no longer in a state where +events can be processed. Thus, deferring some of these events lets the machine +preserve them. The following example shows how to configure such an arrangement: + +``` +@Configuration +@EnableStateMachine +static class Config6 extends StateMachineConfigurerAdapter { + + @Override + public void configure(StateMachineStateConfigurer states) + throws Exception { + states + .withStates() + .initial("READY") + .state("DEPLOY", "DEPLOY") + .state("DONE") + .and() + .withStates() + .parent("DEPLOY") + .initial("DEPLOYPREPARE") + .state("DEPLOYPREPARE", "DONE") + .state("DEPLOYEXECUTE"); + } + + @Override + public void configure(StateMachineTransitionConfigurer transitions) + throws Exception { + transitions + .withExternal() + .source("READY").target("DEPLOY") + .event("DEPLOY") + .and() + .withExternal() + .source("DEPLOYPREPARE").target("DEPLOYEXECUTE") + .and() + .withExternal() + .source("DEPLOYEXECUTE").target("READY") + .and() + .withExternal() + .source("READY").target("DONE") + .event("DONE") + .and() + .withExternal() + .source("DEPLOY").target("DONE") + .event("DONE"); + } +} +``` + +In the preceding example, the state machine uses nested states instead of a flat +state model, so the `DEPLOY` event can be deferred directly in a substate. +It also shows the concept of deferring the `DONE` event in a +sub-state that would then override the anonymous transition between +the `DEPLOY` and `DONE` states if the state machine happens to be in a`DEPLOYPREPARE` state when the `DONE` event is dispatched. In the`DEPLOYEXECUTE` state when the `DONE` event is not deferred, this event would +be handled in a super state. + +## Using Scopes + +Support for scopes in a state machine is very limited, but you can +enable `session` scope by using a normal Spring `@Scope` annotation in one of two ways: + +* If the state machine is built manually by using a builder and returned into the + context as a `@Bean`. + +* Through a configuration adapter. + +Both of +these need `@Scope` to be present, with `scopeName` set to`session` and `proxyMode` set to `ScopedProxyMode.TARGET_CLASS`. The following examples +show both use cases: + +``` +@Configuration +public class Config3 { + + @Bean + @Scope(scopeName="session", proxyMode=ScopedProxyMode.TARGET_CLASS) + StateMachine stateMachine() throws Exception { + Builder builder = StateMachineBuilder.builder(); + builder.configureConfiguration() + .withConfiguration() + .autoStartup(true); + builder.configureStates() + .withStates() + .initial("S1") + .state("S2"); + builder.configureTransitions() + .withExternal() + .source("S1") + .target("S2") + .event("E1"); + StateMachine stateMachine = builder.build(); + return stateMachine; + } + +} +``` + +``` +@Configuration +@EnableStateMachine +@Scope(scopeName="session", proxyMode=ScopedProxyMode.TARGET_CLASS) +public static class Config4 extends StateMachineConfigurerAdapter { + + @Override + public void configure(StateMachineConfigurationConfigurer config) throws Exception { + config + .withConfiguration() + .autoStartup(true); + } + + @Override + public void configure(StateMachineStateConfigurer states) throws Exception { + states + .withStates() + .initial("S1") + .state("S2"); + } + + @Override + public void configure(StateMachineTransitionConfigurer transitions) throws Exception { + transitions + .withExternal() + .source("S1") + .target("S2") + .event("E1"); + } + +} +``` + +TIP:See [Scope](#statemachine-examples-scope) for how to use session scoping. + +Once you have scoped a state machine into `session`, autowiring it into +a `@Controller` gives a new state machine instance per session. +Each state machine is then destroyed when `HttpSession` is invalidated. +The following example shows how to use a state machine in a controller: + +``` +@Controller +public class StateMachineController { + + @Autowired + StateMachine stateMachine; + + @RequestMapping(path="/state", method=RequestMethod.POST) + public HttpEntity setState(@RequestParam("event") String event) { + stateMachine + .sendEvent(Mono.just(MessageBuilder + .withPayload(event).build())) + .subscribe(); + return new ResponseEntity(HttpStatus.ACCEPTED); + } + + @RequestMapping(path="/state", method=RequestMethod.GET) + @ResponseBody + public String getState() { + return stateMachine.getState().getId(); + } +} +``` + +| |Using state machines in a `session` scopes needs careful planning,
mostly because it is a relatively heavy component.| +|---|-------------------------------------------------------------------------------------------------------------------------| + +| |Spring Statemachine poms have no dependencies to Spring MVC
classes, which you will need to work with session scope. However, if you are
working with a web application, you have already pulled those dependencies
directly from Spring MVC or Spring Boot.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +## Using Actions + +Actions are one of the most useful components that you can use to +interact and collaborate with a state machine. You can run actions +in various places in a state machine and its states lifecycle — for example, +entering or exiting states or during transitions. +The following example shows how to use actions in a state machine: + +``` +@Override +public void configure(StateMachineStateConfigurer states) + throws Exception { + states + .withStates() + .initial(States.SI) + .state(States.S1, action1(), action2()) + .state(States.S2, action1(), action2()) + .state(States.S3, action1(), action3()); +} +``` + +In the preceding example, the `action1` and `action2` beans are attached to the `entry` and`exit` states, respectively. The following example defines those actions (and `action3`): + +``` +@Bean +public Action action1() { + return new Action() { + + @Override + public void execute(StateContext context) { + } + }; +} + +@Bean +public BaseAction action2() { + return new BaseAction(); +} + +@Bean +public SpelAction action3() { + ExpressionParser parser = new SpelExpressionParser(); + return new SpelAction( + parser.parseExpression( + "stateMachine.sendEvent(T(org.springframework.statemachine.docs.Events).E1)")); +} + +public class BaseAction implements Action { + + @Override + public void execute(StateContext context) { + } +} + +public class SpelAction extends SpelExpressionAction { + + public SpelAction(Expression expression) { + super(expression); + } +} +``` + +You can directly implement `Action` as an anonymous function or create +your own implementation and define the appropriate implementation as a +bean. + +In the preceding example, `action3` uses a SpEL expression to send the `Events.E1` event into +a state machine. + +| |`StateContext` is described in [Using `StateContext`](#sm-statecontext).| +|---|------------------------------------------------------------------------| + +### SpEL Expressions with Actions + +You can also use a SpEL expression as a replacement for a +full `Action` implementation. + +### Reactive Actions + +Normal `Action` interface is a simple functional method taking `StateContext`and returning *void*. There’s nothing blocking here until you block +in a method itself and this is a bit of a problem as framework cannot +know what’s exactly happening inside of it. + +``` +public interface Action { + void execute(StateContext context); +} +``` + +To overcome this issue we’ve internally changed `Action` handling to +process a plain java’s `Function` taking `StateContext` and returning`Mono`. This way we can call action and fully in a reactive way to +execute action only when it’s subscribed and in a non-blocking way +to wait completion. + +``` +public interface ReactiveAction extends Function, Mono> { +} +``` + +| |Internally old `Action` interface is wrapped with a Reactor Mono Runnable as it
shares same return type. We have no control what you do in that method!| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------| + +## Using Guards + +As shown in [Things to Remember](#statemachine-config-thingstoremember), the `guard1` and `guard2` beans are attached to the entry and +exit states, respectively. +The following example also uses guards on events: + +``` +@Override +public void configure(StateMachineTransitionConfigurer transitions) + throws Exception { + transitions + .withExternal() + .source(States.SI).target(States.S1) + .event(Events.E1) + .guard(guard1()) + .and() + .withExternal() + .source(States.S1).target(States.S2) + .event(Events.E1) + .guard(guard2()) + .and() + .withExternal() + .source(States.S2).target(States.S3) + .event(Events.E2) + .guardExpression("extendedState.variables.get('myvar')"); +} +``` + +You can directly implement `Guard` as an anonymous function or create +your own implementation and define the appropriate implementation as a +bean. In the preceding example, `guardExpression` checkS whether the extended +state variable named `myvar` evaluates to `TRUE`. +The following example implements some sample guards: + +``` +@Bean +public Guard guard1() { + return new Guard() { + + @Override + public boolean evaluate(StateContext context) { + return true; + } + }; +} + +@Bean +public BaseGuard guard2() { + return new BaseGuard(); +} + +public class BaseGuard implements Guard { + + @Override + public boolean evaluate(StateContext context) { + return false; + } +} +``` + +| |`StateContext` is described in section [Using `StateContext`](#sm-statecontext).| +|---|--------------------------------------------------------------------------------| + +### SpEL Expressions with Guards + +You can also use a SpEL expression as a replacement for a +full Guard implementation. The only requirement is that the expression needs +to return a `Boolean` value to satisfy the `Guard` implementation. This can be +demonstrated with a `guardExpression()` function that takes an +expression as an argument. + +### Reactive Guards + +Normal `Guard` interface is a simple functional method taking `StateContext`and returning *boolean*. There’s nothing blocking here until you block +in a method itself and this is a bit of a problem as framework cannot +know what’s exactly happening inside of it. + +``` +public interface Guard { + boolean evaluate(StateContext context); +} +``` + +To overcome this issue we’ve internally changed `Guard` handling to +process a plain java’s `Function` taking `StateContext` and returning`Mono`. This way we can call guard and fully in a reactive way +to evaluate it only when it’s subscribed and in a non-blocking way +to wait completion with a return value. + +``` +public interface ReactiveGuard extends Function, Mono> { +} +``` + +| |Internally old `Guard` interface is wrapped with a Reactor Mono Function. We have no
control what you do in that method!| +|---|----------------------------------------------------------------------------------------------------------------------------| + +## Using Extended State + +Assume that you need to create a state machine that tracks how +many times a user is pressing a key on a keyboard and then terminates +when keys are pressed 1000 times. A possible but really naive solution +would be to create a new state for each 1000 key presses. +You might suddenly have an astronomical number of +states, which, naturally, is not very practical. + +This is where extended state variables come to the rescue by not needing +to add more states to drive state machine changes. Instead, +you can do a simple variable change during a transition. + +`StateMachine` has a method called `getExtendedState()`. It returns an +interface called `ExtendedState`, which gives access to extended state +variables. You can access these variables directly through a state machine or through`StateContext` during a callback from actions or transitions. +The following example shows how to do so: + +``` +public Action myVariableAction() { + return new Action() { + + @Override + public void execute(StateContext context) { + context.getExtendedState() + .getVariables().put("mykey", "myvalue"); + } + }; +} +``` + +If you need to get notified for extended state variable +changes, you have two options: either use `StateMachineListener` or +listen for `extendedStateChanged(key, value)` callbacks. The following example +uses the `extendedStateChanged` method: + +``` +public class ExtendedStateVariableListener + extends StateMachineListenerAdapter { + + @Override + public void extendedStateChanged(Object key, Object value) { + // do something with changed variable + } +} +``` + +Alternatively, you can implement a Spring Application context listener for`OnExtendedStateChanged`. As mentioned in [Listening to State Machine Events](#sm-listeners), +you can also listen all `StateMachineEvent` events. +The following example uses `onApplicationEvent` to listen for state changes: + +``` +public class ExtendedStateVariableEventListener + implements ApplicationListener { + + @Override + public void onApplicationEvent(OnExtendedStateChanged event) { + // do something with changed variable + } +} +``` + +## Using `StateContext` + +[`StateContext`](https://docs.spring.io/spring-statemachine/docs/3.0.1/api/org/springframework/statemachine/StateContext.html) is one of the most important objects +when working with a state machine, as it is passed into various methods +and callbacks to give the current state of a state machine and +where it is possibly going. You can think of it as a +snapshot of the current state machine stage when +is when `StateContext` is retreived. + +| |In Spring Statemachine 1.0.x, `StateContext` usage was relatively naive
in terms of how it was used to pass stuff around as a simple “POJO”.
Starting from Spring Statemachine 1.1.x, its role has been greatly
improved by making it a first class citizen in a state machine.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +You can use `StateContext` to get access to the following: + +* The current `Message` or `Event` (or their `MessageHeaders`, if known). + +* The state machine’s `Extended State`. + +* The `StateMachine` itself. + +* To possible state machine errors. + +* To the current `Transition`, if applicable. + +* The source state of the state machine. + +* The target state of the state machine. + +* The current `Stage`, as described in [Stages](#sm-statecontext-stage). + +`StateContext` is passed into various components, such as`Action` and `Guard`. + +### Stages + +[`Stage`](https://docs.spring.io/spring-statemachine/docs/3.0.1/api/org/springframework/statemachine/StateContext.Stage.html) is arepresentation of a `stage` on +which a state machine is currently interacting with a user. The currently available +stages are `EVENT_NOT_ACCEPTED`, `EXTENDED_STATE_CHANGED`,`STATE_CHANGED`, `STATE_ENTRY`, `STATE_EXIT`, `STATEMACHINE_ERROR`,`STATEMACHINE_START`, `STATEMACHINE_STOP`, `TRANSITION`,`TRANSITION_START`, and `TRANSITION_END`. These states may look familiar, as +they match how you can interact with listeners (as described in[Listening to State Machine Events](#sm-listeners)). + +## Triggering Transitions + +Driving a state machine is done by using transitions, which are triggered +by triggers. The currently supported triggers are `EventTrigger` and`TimerTrigger`. + +### Using `EventTrigger` + +`EventTrigger` is the most useful trigger, because it lets you +directly interact with a state machine by sending events to it. These +events are also called signals. You can add a trigger to a transition +by associating a state with it during configuration. +The following example shows how to do so: + +``` +@Autowired +StateMachine stateMachine; + +void signalMachine() { + stateMachine + .sendEvent(Mono.just(MessageBuilder + .withPayload("E1").build())) + .subscribe(); + + Message message = MessageBuilder + .withPayload("E2") + .setHeader("foo", "bar") + .build(); + stateMachine.sendEvent(Mono.just(message)).subscribe(); +} +``` + +Whether you send one event or multiple events, result is always a sequence +of results. This is so because in a presence multiple reqions, results will +come back from multiple machines in those regions. This is shown +with method `sendEventCollect` which gives a list of results. Method +itself is a just a syntactic sugar collecting `Flux` as list. If there is +just one region, this list contains one result. + +``` +Message message1 = MessageBuilder + .withPayload("E1") + .build(); + +Mono>> results = + stateMachine.sendEventCollect(Mono.just(message1)); + +results.subscribe(); +``` + +| |Nothing happens until returned flux is subscribed. See more about it from[StateMachineEventResult](#sm-triggers-statemachineeventresult).| +|---|-----------------------------------------------------------------------------------------------------------------------------------------| + +The preceding example sends an events by constructing a `Mono` wrapping +a `Message` and subscribing into returned `Flux` of results. `Message` lets +us add arbitrary extra information to an event, which is then visible +to `StateContext` when (for example) you implement actions. + +| |Message headers are generally passed on until machine runs to
completion for a specific event. For example if an event is causing
transition into a state `A` which have an anonymous transition into a
state `B`, original event is available for actions or guards in state`B`.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +It is also possible to send a `Flux` of messages instead of sending just +one with a `Mono`. + +``` +Message message1 = MessageBuilder + .withPayload("E1") + .build(); +Message message2 = MessageBuilder + .withPayload("E2") + .build(); + +Flux> results = + stateMachine.sendEvents(Flux.just(message1, message2)); + +results.subscribe(); +``` + +#### StateMachineEventResult + +`StateMachineEventResult` contains more detailed information about a result +of a event sending. From this you can get a `Region` which handled an event,`Message` itself and what was an actual `ResultType`. From `ResultType` you +can see if message was accepted, denied or deferred. Generally speaking when +subscribtion completes, events are passed into a machine. + +### Using `TimerTrigger` + +`TimerTrigger` is useful when something needs to be triggered +automatically without any user interaction. `Trigger` is added to a +transition by associating a timer with it during a configuration. + +Currently, there are two types of supported timers, one that fires +continuously and one that fires once a source state is entered. +The following example shows how to use the triggers: + +``` +@Configuration +@EnableStateMachine +public class Config2 extends StateMachineConfigurerAdapter { + + @Override + public void configure(StateMachineStateConfigurer states) + throws Exception { + states + .withStates() + .initial("S1") + .state("S2") + .state("S3"); + } + + @Override + public void configure(StateMachineTransitionConfigurer transitions) + throws Exception { + transitions + .withExternal() + .source("S1").target("S2").event("E1") + .and() + .withExternal() + .source("S1").target("S3").event("E2") + .and() + .withInternal() + .source("S2") + .action(timerAction()) + .timer(1000) + .and() + .withInternal() + .source("S3") + .action(timerAction()) + .timerOnce(1000); + } + + @Bean + public TimerAction timerAction() { + return new TimerAction(); + } +} + +public class TimerAction implements Action { + + @Override + public void execute(StateContext context) { + // do something in every 1 sec + } +} +``` + +The preceding example has three states: `S1`, `S2`, and `S3`. We have a normal +external transition from `S1` to `S2` and from `S1` to `S3` with +events `E1` and `E2`, respectively. The interesting parts +for working with `TimerTrigger` are when we define +internal transitions for source states `S2` and `S3`. + +For both transitions, we invoke the `Action` bean (`timerAction`), where +source state `S2` uses `timer` and `S3` uses `timerOnce`. +Values given are in milliseconds (`1000` milliseconds, or one second, in both cases). + +Once a state machine receives event `E1`, it does a transition +from `S1` to `S2` and the timer kicks in. When the state is `S2`,`TimerTrigger` runs and causes a transition associated with that +state — in this case, the internal transition that has the`timerAction` defined. + +Once a state machine receives the `E2`, event it does a transition +from `S1` to `S3` and the timer kicks in. This timer is executed only once +after the state is entered (after a delay defined in a timer). + +| |Behind the scenes, timers are simple triggers that may cause a
transition to happen. Defining a transition with a `timer()` keeps
firing triggers and causes transition only if the source state is active.
Transition with `timerOnce()` is a little different, as it
triggers only after a delay when a source state is actually entered.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Use `timerOnce()` if you want something to happen after a delay
exactly once when state is entered.| +|---|-------------------------------------------------------------------------------------------------------| + +## Listening to State Machine Events + +There are use cases where you want to know what is happening with +a state machine, react to something, or get logging details for +debugging purposes. Spring Statemachine provides interfaces for adding listeners. These listeners +then give an option to get callbacks when various state changes, +actions, and so on happen. + +You basically have two options: listen to Spring application +context events or directly attach a listener to a state machine. Both of +these basically provide the same information. One produces +events as event classes, and the other produces callbacks via a listener +interface. Both of these have pros and cons, which we discuss later. + +### Application Context Events + +Application context events classes are `OnTransitionStartEvent`,`OnTransitionEvent`, `OnTransitionEndEvent`, `OnStateExitEvent`,`OnStateEntryEvent`, `OnStateChangedEvent`, `OnStateMachineStart`,`OnStateMachineStop`, and others that extend the base event class,`StateMachineEvent`. These can be used as is with a Spring`ApplicationListener`. + +`StateMachine` sends context events through `StateMachineEventPublisher`. +The default implementation is automatically created if a `@Configuration`class is annotated with `@EnableStateMachine`. +The following example gets a `StateMachineApplicationEventListener`from a bean defined in a `@Configuration` class: + +``` +public class StateMachineApplicationEventListener + implements ApplicationListener { + + @Override + public void onApplicationEvent(StateMachineEvent event) { + } +} + +@Configuration +public class ListenerConfig { + + @Bean + public StateMachineApplicationEventListener contextListener() { + return new StateMachineApplicationEventListener(); + } +} +``` + +Context events are also automatically enabled by using `@EnableStateMachine`, +with `StateMachine` used to build a machine and registered as a bean, +as the following example shows: + +``` +@Configuration +@EnableStateMachine +public class ManualBuilderConfig { + + @Bean + public StateMachine stateMachine() throws Exception { + + Builder builder = StateMachineBuilder.builder(); + builder.configureStates() + .withStates() + .initial("S1") + .state("S2"); + builder.configureTransitions() + .withExternal() + .source("S1") + .target("S2") + .event("E1"); + return builder.build(); + } +} +``` + +### Using `StateMachineListener` + +By using `StateMachineListener`, you can either extend it and +implement all callback methods or use the `StateMachineListenerAdapter`class, which contains stub method implementations and choose which ones +to override. +The following example uses the latter approach: + +``` +public class StateMachineEventListener + extends StateMachineListenerAdapter { + + @Override + public void stateChanged(State from, State to) { + } + + @Override + public void stateEntered(State state) { + } + + @Override + public void stateExited(State state) { + } + + @Override + public void transition(Transition transition) { + } + + @Override + public void transitionStarted(Transition transition) { + } + + @Override + public void transitionEnded(Transition transition) { + } + + @Override + public void stateMachineStarted(StateMachine stateMachine) { + } + + @Override + public void stateMachineStopped(StateMachine stateMachine) { + } + + @Override + public void eventNotAccepted(Message event) { + } + + @Override + public void extendedStateChanged(Object key, Object value) { + } + + @Override + public void stateMachineError(StateMachine stateMachine, Exception exception) { + } + + @Override + public void stateContext(StateContext stateContext) { + } +} +``` + +In the preceding example, we created our own listener class +(`StateMachineEventListener`) that extends`StateMachineListenerAdapter`. + +The `stateContext` listener method gives access to various`StateContext` changes on a different stages. You can find more about about it in[Using `StateContext`](#sm-statecontext). + +Once you have defined your own listener, you can registered it in a +state machine by using the `addStateListener` method. It is a matter of +flavor whether to hook it up within a spring configuration or do it +manually at any time during the application life-cycle. +The following example shows how to attach a listener: + +``` +public class Config7 { + + @Autowired + StateMachine stateMachine; + + @Bean + public StateMachineEventListener stateMachineEventListener() { + StateMachineEventListener listener = new StateMachineEventListener(); + stateMachine.addStateListener(listener); + return listener; + } + +} +``` + +### Limitations and Problems + +Spring application context is not the fastest event bus out there, so we +advise giving some thought to the rate of events the state machine +sends. For better performance, it may be better to use the`StateMachineListener` interface. For this specific reason, +you can use the `contextEvents` flag with `@EnableStateMachine` and`@EnableStateMachineFactory` to disable Spring application context +events, as shown in the preceding section. +The following example shows how to disable Spring application context events: + +``` +@Configuration +@EnableStateMachine(contextEvents = false) +public class Config8 + extends EnumStateMachineConfigurerAdapter { +} + +@Configuration +@EnableStateMachineFactory(contextEvents = false) +public class Config9 + extends EnumStateMachineConfigurerAdapter { +} +``` + +## Context Integration + +It is a little limited to do interaction with a state machine by +either listening to its events or using actions with states and +transitions. From time to time, this approach is going be too limited and +verbose to create interaction with the application with which a state machine +works. For this specific use case, we have made a Spring-style +context integration that easily inserts state machine functionality +into your beans. + +The available annotations has been harmonized to enable access to the same +state machine execution points that are available from[Listening to State Machine Events](#sm-listeners). + +You can use the `@WithStateMachine` annotation to associate a state +machine with an existing bean. Then you can start adding +supported annotations to the methods of that bean. +The following example shows how to do so: + +``` +@WithStateMachine +public class Bean1 { + + @OnTransition + public void anyTransition() { + } +} +``` + +You can also attach any other state machine from an +application context by using the annotation `name` field. +The following example shows how to do so: + +``` +@WithStateMachine(name = "myMachineBeanName") +public class Bean2 { + + @OnTransition + public void anyTransition() { + } +} +``` + +Sometimes, it is more convenient to use `machine id`, which is something +you can set to better identify multiple instances. This ID maps to +the `getId()` method in the `StateMachine` interface. +The following example shows how to use it: + +``` +@WithStateMachine(id = "myMachineId") +public class Bean16 { + + @OnTransition + public void anyTransition() { + } +} +``` + +You can also use `@WithStateMachine` as a meta-annotation, as shown +in the preceding example. In this case, you could annotate your bean with `WithMyBean`. +The following example shows how to do so: + +``` +@Target(ElementType.TYPE) +@Retention(RetentionPolicy.RUNTIME) +@WithStateMachine(name = "myMachineBeanName") +public @interface WithMyBean { +} +``` + +| |The return type of these methods does not matter and is effectively
discarded.| +|---|----------------------------------------------------------------------------------| + +### Enabling Integration + +You can enable all the features of `@WithStateMachine` by using +the `@EnableWithStateMachine` annotation, which imports the needed +configuration into the Spring Application Context. Both`@EnableStateMachine` and `@EnableStateMachineFactory` are already +annotated with this annotation, so there is no need to add it again. +However, if a machine is built and configured without +configuration adapters, you must use `@EnableWithStateMachine`to use these features with `@WithStateMachine`. +The following example shows how to do so: + +``` +public static StateMachine buildMachine(BeanFactory beanFactory) throws Exception { + Builder builder = StateMachineBuilder.builder(); + + builder.configureConfiguration() + .withConfiguration() + .machineId("myMachineId") + .beanFactory(beanFactory); + + builder.configureStates() + .withStates() + .initial("S1") + .state("S2"); + + builder.configureTransitions() + .withExternal() + .source("S1") + .target("S2") + .event("E1"); + + return builder.build(); +} + +@WithStateMachine(id = "myMachineId") +static class Bean17 { + + @OnStateChanged + public void onStateChanged() { + } +} +``` + +| |If a machine is not created as a bean, you need to set`BeanFactory` for a machine, as shown in the prededing example. Otherwise, tge machine is
unaware of handlers that call your `@WithStateMachine` methods.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### Method Parameters + +Every annotation support exactly the same set of possible method +parameters, but runtime behavior differs, depending on the +annotation itself and the stage in which the annotated method is called. To +better understand how context works, see[Using `StateContext`](#sm-statecontext). + +| |For differences between method parameters, see the sections that desdribe the
individual annotation, later in this document.| +|---|--------------------------------------------------------------------------------------------------------------------------------| + +Effectively, all annotated methods are called by using Spring SPel +expressions, which are built dynamically during the process. To make +this work, these expressions needs to have a root object (against which they evaluate). +This root object is a `StateContext`. We have also made some +tweaks internally so that it is possible to access `StateContext` methods +directly without going through the context handle. + +The simplest method parameter is a `StateContext` itself. +The following example shows how to use it: + +``` +@WithStateMachine +public class Bean3 { + + @OnTransition + public void anyTransition(StateContext stateContext) { + } +} +``` + +You can access the rest of the `StateContext` content. +The number and order of the parameters does not matter. +The following example shows how to access the various parts of the `StateContext` content: + +``` +@WithStateMachine +public class Bean4 { + + @OnTransition + public void anyTransition( + @EventHeaders Map headers, + @EventHeader("myheader1") Object myheader1, + @EventHeader(name = "myheader2", required = false) String myheader2, + ExtendedState extendedState, + StateMachine stateMachine, + Message message, + Exception e) { + } +} +``` + +| |Instead of getting all event headers with `@EventHeaders`, you can use`@EventHeader`, which can bound to a single header.| +|---|-------------------------------------------------------------------------------------------------------------------------| + +### Transition Annotations + +The annotations for transitions are `@OnTransition`, `@OnTransitionStart`, +and `@OnTransitionEnd`. + +These annotations behave exactly the same. To show how they work, we show +how `@OnTransition` is used. Within this annotation, a property’s +you can use `source` and `target` to qualify a transition. If`source` and `target` are left empty, any transition is matched. +The following example shows how to use the `@OnTransition` annotation +(remember that `@OnTransitionStart` and `@OnTransitionEnd` work the same way): + +``` +@WithStateMachine +public class Bean5 { + + @OnTransition(source = "S1", target = "S2") + public void fromS1ToS2() { + } + + @OnTransition + public void anyTransition() { + } +} +``` + +By default, you cannot use the `@OnTransition` annotation with a state and +event enumerations that you have created, due to Java language limitations. +For this reason, you need to use string representations. + +Additionally, you can access `Event Headers` and`ExtendedState` by adding the needed arguments to a method. The method +is then called automatically with these arguments. +The following example shows how to do so: + +``` +@WithStateMachine +public class Bean6 { + + @StatesOnTransition(source = States.S1, target = States.S2) + public void fromS1ToS2(@EventHeaders Map headers, ExtendedState extendedState) { + } +} +``` + +However, if you want to have a type-safe annotation, you can +create a new annotation and use `@OnTransition` as a meta-annotation. +This user-level annotation can make references to actual states and +events enumerations, and the framework tries to match these in the same way. +The following example shows how to do so: + +``` +@Target(ElementType.METHOD) +@Retention(RetentionPolicy.RUNTIME) +@OnTransition +public @interface StatesOnTransition { + + States[] source() default {}; + + States[] target() default {}; +} +``` + +In the preceding example, we created a `@StatesOnTransition` annotation that defines`source` and `target` in a type-safe manner. +The following example uses that annotation in a bean: + +``` +@WithStateMachine +public class Bean7 { + + @StatesOnTransition(source = States.S1, target = States.S2) + public void fromS1ToS2() { + } +} +``` + +### State Annotations + +The following annotations for states are available: `@OnStateChanged`, `@OnStateEntry`, and`@OnStateExit`. The following example shows how to use `OnStateChanged` annotation (the +other two work the same way): + +``` +@WithStateMachine +public class Bean8 { + + @OnStateChanged + public void anyStateChange() { + } +} +``` + +As you can with [Transition Annotations](#state-machine-transition-annotations), you can define +target and source states. The following example shows how to do so: + +``` +@WithStateMachine +public class Bean9 { + + @OnStateChanged(source = "S1", target = "S2") + public void stateChangeFromS1toS2() { + } +} +``` + +For type safety, new annotations need to be created for enumerations by using`@OnStateChanged` as a meta-annotation. The following examples show how to do so: + +``` +@Target(ElementType.METHOD) +@Retention(RetentionPolicy.RUNTIME) +@OnStateChanged +public @interface StatesOnStates { + + States[] source() default {}; + + States[] target() default {}; +} +``` + +``` +@WithStateMachine +public class Bean10 { + + @StatesOnStates(source = States.S1, target = States.S2) + public void fromS1ToS2() { + } +} +``` + +The methods for state entry and exit behave in the same way, as the following example shows: + +``` +@WithStateMachine +public class Bean11 { + + @OnStateEntry + public void anyStateEntry() { + } + + @OnStateExit + public void anyStateExit() { + } +} +``` + +### Event Annotation + +There is one event-related annotation. It is named `@OnEventNotAccepted`. +If you specify the `event` property, you can listen for a specific event not being +accepted. If you do not specify an event, you can list for any event not being +accepted. The following example shows both ways to use the `@OnEventNotAccepted`annotation: + +``` +@WithStateMachine +public class Bean12 { + + @OnEventNotAccepted + public void anyEventNotAccepted() { + } + + @OnEventNotAccepted(event = "E1") + public void e1EventNotAccepted() { + } +} +``` + +### State Machine Annotations + +The following annotations are available for a state machine: `@OnStateMachineStart`,`@OnStateMachineStop`, and `@OnStateMachineError`. + +During a state machine’s start and stop, lifecycle methods are called. +The following example shows how to use `@OnStateMachineStart` and`@OnStateMachineStop` to listen to these events: + +``` +@WithStateMachine +public class Bean13 { + + @OnStateMachineStart + public void onStateMachineStart() { + } + + @OnStateMachineStop + public void onStateMachineStop() { + } +} +``` + +If a state machine goes into an error with exception, `@OnStateMachineStop`annotation is called. The following example shows how to use it: + +``` +@WithStateMachine +public class Bean14 { + + @OnStateMachineError + public void onStateMachineError() { + } +} +``` + +### Extended State Annotation + +There is one extended state-related annotation. It is named`@OnExtendedStateChanged`. You can also listen to changes only +for specific `key` changes. The following example shows how to use the`@OnExtendedStateChanged`, both with and without a `key` property: + +``` +@WithStateMachine +public class Bean15 { + + @OnExtendedStateChanged + public void anyStateChange() { + } + + @OnExtendedStateChanged(key = "key1") + public void key1Changed() { + } +} +``` + +## Using `StateMachineAccessor` + +`StateMachine` is the main interface for communicating with a state machine. +From time to time, you may need to get more dynamic and +programmatic access to internal structures of a state machine and its +nested machines and regions. For these use cases, `StateMachine`exposes a functional interface called `StateMachineAccessor`, which provides +an interface to get access to individual `StateMachine` and`Region` instances. + +`StateMachineFunction` is a simple functional interface that lets +you apply the `StateMachineAccess` interface to a state machine. With +JDK 7, these create code that is a little verbose code. However, with JDK 8 lambdas, +the doce is relatively non-verbose. + +The `doWithAllRegions` method gives access to all `Region` instances in +a state machine. The following example shows how to use it: + +``` +stateMachine.getStateMachineAccessor().doWithAllRegions(function -> function.setRelay(stateMachine)); + +stateMachine.getStateMachineAccessor() + .doWithAllRegions(access -> access.setRelay(stateMachine)); +``` + +The `doWithRegion` method gives access to single `Region` instance in a +state machine. The following example shows how to use it: + +``` +stateMachine.getStateMachineAccessor().doWithRegion(function -> function.setRelay(stateMachine)); + +stateMachine.getStateMachineAccessor() + .doWithRegion(access -> access.setRelay(stateMachine)); +``` + +The `withAllRegions` method gives access to all of the `Region` instances in +a state machine. The following example shows how to use it: + +``` +for (StateMachineAccess access : stateMachine.getStateMachineAccessor().withAllRegions()) { + access.setRelay(stateMachine); +} + +stateMachine.getStateMachineAccessor().withAllRegions() + .stream().forEach(access -> access.setRelay(stateMachine)); +``` + +The `withRegion` method gives access to single `Region` instance in a +state machine. The following example shows how to use it: + +``` +stateMachine.getStateMachineAccessor() + .withRegion().setRelay(stateMachine); +``` + +## Using `StateMachineInterceptor` + +Instead of using a `StateMachineListener` interface, you can +use a `StateMachineInterceptor`. One conceptual difference is that you can use an +interceptor to intercept and stop a current state +change or change its transition logic. Instead of implementing a full interface, +you can use an adapter class called `StateMachineInterceptorAdapter` to override +the default no-op methods. + +| |One recipe ([Persist](#statemachine-recipes-persist)) and one sample
([Persist](#statemachine-examples-persist)) are related to using an
interceptor.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------| + +You can register an interceptor through `StateMachineAccessor`. The concept of +an interceptor is a relatively deep internal feature and, thus, is not +exposed directly through the `StateMachine` interface. + +The following example shows how to add a `StateMachineInterceptor` and override selected +methods: + +``` +stateMachine.getStateMachineAccessor() + .withRegion().addStateMachineInterceptor(new StateMachineInterceptor() { + + @Override + public Message preEvent(Message message, StateMachine stateMachine) { + return message; + } + + @Override + public StateContext preTransition(StateContext stateContext) { + return stateContext; + } + + @Override + public void preStateChange(State state, Message message, + Transition transition, StateMachine stateMachine, + StateMachine rootStateMachine) { + } + + @Override + public StateContext postTransition(StateContext stateContext) { + return stateContext; + } + + @Override + public void postStateChange(State state, Message message, + Transition transition, StateMachine stateMachine, + StateMachine rootStateMachine) { + } + + @Override + public Exception stateMachineError(StateMachine stateMachine, + Exception exception) { + return exception; + } + }); +``` + +| |For more about the error handling shown in preceding example, see[State Machine Error Handling](#sm-error-handling).| +|---|--------------------------------------------------------------------------------------------------------------------| + +## State Machine Security + +Security features are built atop of functionality from[Spring Security](https://projects.spring.io/spring-security). Security features are +handy when it is required to protect part of a state machine +execution and interaction with it. + +| |We expect you to be fairly familiar with Spring Security, meaning
that we do not go into details of how the overall security framework works. For
this information, you should read the Spring Security reference documentation
(available [here](https://spring.io/projects/spring-security#learn)).| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The first level of defense with security is naturally protecting events, +which really drive what is going to +happen in a state machine. You can then define more fine-grained security settings +for transitions and actions. This parallel to giving an employee access to a building +and then giving access to specific rooms within the building and even the ability +to turn on and off the lights in specific rooms. If you trust +your users, event security may be all you need. If not, +you need to apply more detailed security. + +You can find more detailed information in [Understanding Security](#sm-security-details). + +| |For a complete example, see the [Security](#statemachine-examples-security) sample.| +|---|-----------------------------------------------------------------------------------| + +### Configuring Security + +All generic configurations for security are done in`SecurityConfigurer`, which is obtained from`StateMachineConfigurationConfigurer`. By default, security is disabled, +even if Spring Security classes are +present. The following example shows how to enable security: + +``` +@Configuration +@EnableStateMachine +static class Config4 extends StateMachineConfigurerAdapter { + + @Override + public void configure(StateMachineConfigurationConfigurer config) + throws Exception { + config + .withSecurity() + .enabled(true) + .transitionAccessDecisionManager(null) + .eventAccessDecisionManager(null); + } +} +``` + +If you absolutely need to, you can customize `AccessDecisionManager` for both events and +transitions. If you do not define decision managers or +set them to `null`, default managers are created internally. + +### Securing Events + +Event security is defined on a global level by a `SecurityConfigurer`. +The following example shows how to enable event security: + +``` +@Configuration +@EnableStateMachine +static class Config1 extends StateMachineConfigurerAdapter { + + @Override + public void configure(StateMachineConfigurationConfigurer config) + throws Exception { + config + .withSecurity() + .enabled(true) + .event("true") + .event("ROLE_ANONYMOUS", ComparisonType.ANY); + } +} +``` + +In the preceding configuration example, we use an expression of `true`, which always evaluates +to `TRUE`. Using an expression that always evaluates to `TRUE`would not make sense in a real application but shows the point that +expression needs to return either `TRUE` or `FALSE`. We also defined an +attribute of `ROLE_ANONYMOUS` and a `ComparisonType` of `ANY`. For more about using attributes +and expressions, see [Using Security Attributes and Expressions](#sm-security-attributes-expressions). + +### Securing Transitions + +You can define transition security globally, as the following example shows. + +``` +@Configuration +@EnableStateMachine +static class Config6 extends StateMachineConfigurerAdapter { + + @Override + public void configure(StateMachineConfigurationConfigurer config) + throws Exception { + config + .withSecurity() + .enabled(true) + .transition("true") + .transition("ROLE_ANONYMOUS", ComparisonType.ANY); + } +} +``` + +If security is defined in a transition itself, it override any +globally set security. The following example shows how to do so: + +``` +@Configuration +@EnableStateMachine +static class Config2 extends StateMachineConfigurerAdapter { + + @Override + public void configure(StateMachineTransitionConfigurer transitions) + throws Exception { + transitions + .withExternal() + .source("S0") + .target("S1") + .event("A") + .secured("ROLE_ANONYMOUS", ComparisonType.ANY) + .secured("hasTarget('S1')"); + } +} +``` + +For more about using attributes and expressions, see [Using Security Attributes and Expressions](#sm-security-attributes-expressions). + +### Securing Actions + +There are no dedicated security definitions for actions in a state +machine, but you can secure actions by using a global method security +from Spring Security. This requires that an `Action` be +defined as a proxied `@Bean` and its `execute` method be annotated with`@Secured`. The following example shows how to do so: + +``` +@Configuration +@EnableStateMachine +static class Config3 extends StateMachineConfigurerAdapter { + + @Override + public void configure(StateMachineConfigurationConfigurer config) + throws Exception { + config + .withSecurity() + .enabled(true); + } + + @Override + public void configure(StateMachineStateConfigurer states) + throws Exception { + states + .withStates() + .initial("S0") + .state("S1"); + } + + @Override + public void configure(StateMachineTransitionConfigurer transitions) + throws Exception { + transitions + .withExternal() + .source("S0") + .target("S1") + .action(securedAction()) + .event("A"); + } + + @Scope(proxyMode = ScopedProxyMode.TARGET_CLASS) + @Bean + public Action securedAction() { + return new Action() { + + @Secured("ROLE_ANONYMOUS") + @Override + public void execute(StateContext context) { + } + }; + } + +} +``` + +Global method security needs to be enabled with Spring Security. +The following example shows how to do so: + +``` +@Configuration +@EnableGlobalMethodSecurity(securedEnabled = true) +public static class Config5 extends WebSecurityConfigurerAdapter { + + @Autowired + public void configureGlobal(AuthenticationManagerBuilder auth) throws Exception { + auth + .inMemoryAuthentication() + .withUser("user").password("password").roles("USER"); + } +} +``` + +See the Spring Security reference guide (available[here](https://spring.io/projects/spring-security#learn)) for more detail. + +### Using Security Attributes and Expressions + +Generally, you can define security properties in either of two ways: by +using security attributes and by using security expressions. +Attributes are easier to use but are relatively limited in terms of +functionality. Expressions provide more features but are a little bit +harder to use. + +#### Generic Attribute Usage + +By default, `AccessDecisionManager` instances for events and +transitions both use a `RoleVoter`, meaning you can use role attributes +from Spring Security. + +For attributes, we have three different comparison types: `ANY`, `ALL`, and`MAJORITY`. These comparison types map onto default access decision managers +(`AffirmativeBased`, `UnanimousBased`, and `ConsensusBased`, respectively). +If you have defined a custom `AccessDecisionManager`, the comparison type is +effectively discarded, as it is used only to create a default manager. + +#### Generic Expression Usage + +Security expressions must return either `TRUE` or `FALSE`. + +The base class for the expression root objects is`SecurityExpressionRoot`. It provides some common expressions, which +are available in both transition and event security. The following table +describes the most often used built-in expressions: + +| Expression | Description | +|--------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `hasRole([role])` | Returns `true` if the current principal has the specified role. By
default, if the supplied role does not start with `ROLE_`, it is
added. You can customize this by modifying the `defaultRolePrefix` on`DefaultWebSecurityExpressionHandler`. | +| `hasAnyRole([role1,role2])` |Returns `true` if the current principal has any of the supplied
roles (given as a comma-separated list of strings). By default, if each
supplied role does not start with `ROLE_`, it is added. You can customize this
by modifying the `defaultRolePrefix` on`DefaultWebSecurityExpressionHandler`.| +| `hasAuthority([authority])` | Returns `true` if the current principal has the specified authority. | +| `hasAnyAuthority([authority1,authority2])` | Returns `true` if the current principal has any of the supplied
roles (given as a comma-separated list of strings). | +| `principal` | Allows direct access to the principal object that represents the
current user. | +| `authentication` | Allows direct access to the current `Authentication` object obtained
from the `SecurityContext`. | +| `permitAll` | Always evaluates to `true`. | +| `denyAll` | Always evaluates to `false`. | +| `isAnonymous()` | Returns `true` if the current principal is an anonymous user. | +| `isRememberMe()` | Returns `true` if the current principal is a remember-me user. | +| `isAuthenticated()` | Returns `true` if the user is not anonymous. | +| `isFullyAuthenticated()` | Returns `true` if the user is not an anonymous or a remember-me user. | +| `hasPermission(Object target, Object permission)` | Returns `true` if the user has access to the provided target for the
given permission — for example, `hasPermission(domainObject, 'read')`. | +|`hasPermission(Object targetId, String targetType, Object
permission)`| Returns `true` if the user has access to the provided target for the
given permission — for example, `hasPermission(1,
'com.example.domain.Message', 'read')`. | + +#### Event Attributes + +You can match an event ID by using a prefix of `EVENT_`. For example, matching +event `A` would match an attribute of `EVENT_A`. + +#### Event Expressions + +The base class for the expression root object for events is`EventSecurityExpressionRoot`. It provides access to a `Message`object, which is passed around with eventing. `EventSecurityExpressionRoot`has only one method, which the following table describes: + +| Expression | Description | +|------------------------|------------------------------------------------| +|`hasEvent(Object event)`|Returns `true` if the event matches given event.| + +#### Transition Attributes + +When matching transition sources and targets, you can use the`TRANSITION_SOURCE_` and `TRANSITION_TARGET_` prefixes respectively. + +#### Transition Expressions + +The base class for the expression root object for transitions is`TransitionSecurityExpressionRoot`. It provides access to a`Transition`object, which is passed around for transition changes.`TransitionSecurityExpressionRoot` has two methods, which the following +table describes: + +| Expression | Description | +|--------------------------|-------------------------------------------------------------| +|`hasSource(Object source)`|Returns `true` if the transition source matches given source.| +|`hasTarget(Object target)`|Returns `true` if the transition target matches given target.| + +### Understanding Security + +This section provides more detailed information about how security works within a +state machine. You may not really need to know, but it is +always better to be transparent instead of hiding all the magic what +happens behind the scenes. + +| |Security makes sense only if Spring Statemachine runs in a walled
garden where user have no direct access to the application and could consequently
modify Spring Security’s `SecurityContext` hold in a local thread.
If the user controls the JVM, then effectively there is no security
at all.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The integration point for security is created with a[`StateMachineInterceptor`](#sm-interceptor), which is then automatically added into a +state machine if security is enabled. The specific class is`StateMachineSecurityInterceptor`, which intercepts events and +transitions. This interceptor then consults Spring Security’s`AccessDecisionManager` to determine whether an event can be sent or whether a transition can be +executed. Effectively, if a decision or a vote with a `AccessDecisionManager`results in an exception, the event or transition is denied. + +Due to how `AccessDecisionManager` from Spring Security works, we +need one instance of it per secured object. This is one reason why there +are different managers for events and transitions. In this case, events +and transitions are different class objects that we secure. + +By default, for events, voters (`EventExpressionVoter`, `EventVoter`, and`RoleVoter`) are added into an `AccessDecisionManager`. + +By default, for transitions, voters (`TransitionExpressionVoter`,`TransitionVoter`, and `RoleVoter`) are added into an `AccessDecisionManager`. + +## State Machine Error Handling + +If a state machine detects an internal error during a state transition +logic, it may throw an exception. Before this exception is processed +internally, you are given a chance to intercept. + +Normally, you can use `StateMachineInterceptor` to intercept errors and the +following listing shows an example of it: + +``` +StateMachine stateMachine; + +void addInterceptor() { + stateMachine.getStateMachineAccessor() + .doWithRegion(function -> + function.addStateMachineInterceptor(new StateMachineInterceptorAdapter() { + @Override + public Exception stateMachineError(StateMachine stateMachine, + Exception exception) { + return exception; + } + }) + ); + +} +``` + +When errors are detected, the normal event notify mechanism is executed. +This lets you use either a `StateMachineListener` or a Spring Application +context event listener. For more about these, see[Listening to State Machine Events](#sm-listeners). + +Having said that, the following example shows a simple listener: + +``` +public class ErrorStateMachineListener + extends StateMachineListenerAdapter { + + @Override + public void stateMachineError(StateMachine stateMachine, Exception exception) { + // do something with error + } +} +``` + +The following example shows a generic `ApplicationListener` checking `StateMachineEvent`: + +``` +public class GenericApplicationEventListener + implements ApplicationListener { + + @Override + public void onApplicationEvent(StateMachineEvent event) { + if (event instanceof OnStateMachineError) { + // do something with error + } + } +} +``` + +You can also directly define `ApplicationListener` to +recognize only `StateMachineEvent` instances, as the following example shows: + +``` +public class ErrorApplicationEventListener + implements ApplicationListener { + + @Override + public void onApplicationEvent(OnStateMachineError event) { + // do something with error + } +} +``` + +| |Actions defined for transitions also have their own error handling
logic. See [Transition Action Error Handling](#statemachine-config-transition-actions-errorhandling).| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +With a reactive api’s it is possible to get *Action* execution error +back from a *StateMachineEventResult*. Having simple machine which +errors within action transitioning into state `S1`. + +``` +@Configuration +@EnableStateMachine +static class Config1 extends StateMachineConfigurerAdapter { + + @Override + public void configure(StateMachineStateConfigurer states) throws Exception { + states + .withStates() + .initial("SI") + .stateEntry("S1", (context) -> { + throw new RuntimeException("example error"); + }); + } + + @Override + public void configure(StateMachineTransitionConfigurer transitions) throws Exception { + transitions + .withExternal() + .source("SI") + .target("S1") + .event("E1"); + } +} +``` + +Below test concept shows how possible error can be consumed +from a *StateMachineEventResult*. + +``` +@Autowired +private StateMachine machine; + +@Test +public void testActionEntryErrorWithEvent() throws Exception { + StepVerifier.create(machine.startReactively()).verifyComplete(); + assertThat(machine.getState().getIds()).containsExactlyInAnyOrder("SI"); + + StepVerifier.create(machine.sendEvent(Mono.just(MessageBuilder.withPayload("E1").build()))) + .consumeNextWith(result -> { + StepVerifier.create(result.complete()).consumeErrorWith(e -> { + assertThat(e).isInstanceOf(StateMachineException.class).hasMessageContaining("example error"); + }).verify(); + }) + .verifyComplete(); + + assertThat(machine.getState().getIds()).containsExactlyInAnyOrder("S1"); +} +``` + +| |Error in entry/exit actions will not prevent transition to happen.| +|---|------------------------------------------------------------------| + +## State Machine Services + +StateMachine services are higher-level implementations meant to +provide more user-level functionalities to ease normal runtime +operations. Currently, only one service interface +(`StateMachineService`) exists. + +### Using `StateMachineService` + +`StateMachineService` is an interface that is meant to handle running machines +and have simple methods to “acquire” and “release” machines. It has +one default implementation, named `DefaultStateMachineService`. + +## Persisting a State Machine + +Traditionally, an instance of a state machine is used as is within a +running program. You can achieve more dynamic behavior by using +dynamic builders and factories, which allows state machine +instantiation on-demand. Building an instance of a state machine is a +relatively heavy operation. Consequently, if you need to (for example) handle +an arbitrary state change in a database by using a state machine, you need to +find a better and faster way to do it. + +The persist feature lets you save a state of a state machine +into an external repository and later reset a state machine based off the +serialized state. For example, if you have a database table keeping +orders, it would be way too expensive to update an order state with a state +machine if a new instance would need to be built for every change. +The persist feature lets you reset a state machine state without +instantiating a new state machine instance. + +| |There is one recipe (see [Persist](#statemachine-recipes-persist)) and one sample
(see [Persist](#statemachine-examples-persist)) that provide more info about
persisting states.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +While you can build a custom persistence feature by using a`StateMachineListener`, it has one conceptual problem. When a listener +notifies about a change of state, the state change has already happened. If a +custom persistent method within a listener fails to update the serialized +state in an external repository, the state in a state machine and the state in +an external repository are then in an inconsistent state. + +You can instead use a state machine interceptor to try to save the +serialized state into external storage during the state +change within a state machine. If this interceptor callback fails, +you can halt the state change attempt and, instead of ending in an +inconsistent state, you can then handle this error manually. See[Using `StateMachineInterceptor`](#sm-interceptor) for how to use interceptors. + +### Using `StateMachineContext` + +You cannot persist a `StateMachine` by using normal java +serialization, as the object graph is too rich and contains too many +dependencies on other Spring context classes. `StateMachineContext`is a runtime representation of a state machine that you can use to +restore an existing machine into a state represented by a particular`StateMachineContext` object. + +`StateMachineContext` contains two different ways to include information +for a child context. These are generally used when a machine contains +orthogonal regions. First, a context can have a list of child contexts +that can be used as is if they exist. Second, you can +include a list of references that are used if raw context children +are not in place. These child references are really the only way to +persist a machine where multiple parallel regions are running +independently. + +| |The [Data Multi Persist](#statemachine-examples-datajpamultipersist) sample shows
how you can persist parallel regions.| +|---|---------------------------------------------------------------------------------------------------------------------------| + +### Using `StateMachinePersister` + +Building a `StateMachineContext` and then restoring a state machine +from it has always been a little bit of “black magic” if done +manually. The `StateMachinePersister` interface aims to ease these +operations by providing `persist` and `restore` methods. The default +implementation of this interface is `DefaultStateMachinePersister`. + +We can show how to use a `StateMachinePersister` by following +a snippets from tests. We start by creating two similar configurations +(`machine1` and `machine2`) for a state machine. Note that we could build different +machines for this demonstration in other ways but this way +works for this case. The following example configures the two state machines: + +``` +@Configuration +@EnableStateMachine(name = "machine1") +static class Config1 extends Config { +} + +@Configuration +@EnableStateMachine(name = "machine2") +static class Config2 extends Config { +} + +static class Config extends StateMachineConfigurerAdapter { + + @Override + public void configure(StateMachineStateConfigurer states) throws Exception { + states + .withStates() + .initial("S1") + .state("S1") + .state("S2"); + } + + @Override + public void configure(StateMachineTransitionConfigurer transitions) throws Exception { + transitions + .withExternal() + .source("S1") + .target("S2") + .event("E1"); + } +} +``` + +As we are using a `StateMachinePersist` object, we can create an in-memory +implementation. + +| |This in-memory sample is only for demonstration purposes. For real
applications, you should use a real persistent storage implementation.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------| + +The following listing shows how to use the in-memory sample: + +``` +static class InMemoryStateMachinePersist implements StateMachinePersist { + + private final HashMap> contexts = new HashMap<>(); + + @Override + public void write(StateMachineContext context, String contextObj) throws Exception { + contexts.put(contextObj, context); + } + + @Override + public StateMachineContext read(String contextObj) throws Exception { + return contexts.get(contextObj); + } +} +``` + +After we have instantiated the two different machines, we can transfer`machine1` into state `S2` through event `E1`. Then we can persist it and restore`machine2`. The following example shows how to do so: + +``` +InMemoryStateMachinePersist stateMachinePersist = new InMemoryStateMachinePersist(); +StateMachinePersister persister = new DefaultStateMachinePersister<>(stateMachinePersist); + +StateMachine stateMachine1 = context.getBean("machine1", StateMachine.class); +StateMachine stateMachine2 = context.getBean("machine2", StateMachine.class); +stateMachine1.startReactively().block(); + +stateMachine1 + .sendEvent(Mono.just(MessageBuilder + .withPayload("E1").build())) + .blockLast(); +assertThat(stateMachine1.getState().getIds()).containsExactly("S2"); + +persister.persist(stateMachine1, "myid"); +persister.restore(stateMachine2, "myid"); +assertThat(stateMachine2.getState().getIds()).containsExactly("S2"); +``` + +### Using Redis + +`RepositoryStateMachinePersist` (which implements`StateMachinePersist`) offers support for persisting a state machine into Redis. +The specific implementation is a`RedisStateMachineContextRepository`, which uses `kryo` serialization to +persist a `StateMachineContext` into `Redis`. + +For `StateMachinePersister`, we have a Redis-related`RedisStateMachinePersister` implementation, which takes an instance of +a `StateMachinePersist` and uses `String` as its context object. + +| |See the [Event Service](#statemachine-examples-eventservice) sample for detailed usage.| +|---|---------------------------------------------------------------------------------------| + +`RedisStateMachineContextRepository` needs a`RedisConnectionFactory` for it to work. We recommend using a`JedisConnectionFactory` for it, as the preceding example shows. + +### Using `StateMachineRuntimePersister` + +`StateMachineRuntimePersister` is a simple extension to`StateMachinePersist` that adds an interface-level method to get`StateMachineInterceptor` associated with it. This interceptor is then +required to persist a machine during state changes without needing to +stop and start a machine. + +Currently, there are implementations for this interface for the +supported Spring Data Repositories. These implementations are`JpaPersistingStateMachineInterceptor`, `MongoDbPersistingStateMachineInterceptor`, +and `RedisPersistingStateMachineInterceptor`. + +| |See the [Data Persist](#statemachine-examples-datapersist) sample for detailed usage.| +|---|-------------------------------------------------------------------------------------| + +## Spring Boot Support + +The auto-configuration module (`spring-statemachine-autoconfigure`) contains all +the logic for integrating with Spring Boot, which provides functionality for +auto-configuration and actuators. All you need is to have this Spring Statemachine +library as part of a boot application. + +### Monitoring and Tracing + +`BootStateMachineMonitor` is created automatically and associated with +a state machine. `BootStateMachineMonitor` is a custom `StateMachineMonitor`implementation that integrates with Spring Boot’s `MeterRegistry` and endpoints +through a custom `StateMachineTraceRepository`. Optionally, you can disable this auto-configuration +by setting the `spring.statemachine.monitor.enabled` key to`false`. The[Monitoring](#statemachine-examples-monitoring) sample shows how to use this auto-configuration. + +### Repository Config + +If the required classes are found from the classpath, Spring Data Repositories +and entity class scanning is automatically auto-configured +for [Repository Support](#sm-repository). + +The currently supported configurations are `JPA`, `Redis`, and`MongoDB`. You can disable repository auto-configuration by using the`spring.statemachine.data.jpa.repositories.enabled`,`spring.statemachine.data.redis.repositories.enabled` and`spring.statemachine.data.mongo.repositories.enabled` properties, respectively. + +## Monitoring a State Machine + +You can use `StateMachineMonitor` to get more information about the +durations of how long transitions and actions take to execute. The following listing +shows how this interface is implemented. + +``` +public class TestStateMachineMonitor extends AbstractStateMachineMonitor { + + @Override + public void transition(StateMachine stateMachine, Transition transition, + long duration) { + } + + @Override + public void action(StateMachine stateMachine, + Function, Mono> action, long duration) { + } +} +``` + +Once you have a `StateMachineMonitor` implementation, you can add it to +a state machine through configuration, as the following example shows: + +``` +@Configuration +@EnableStateMachine +public class Config1 extends StateMachineConfigurerAdapter { + + @Override + public void configure(StateMachineConfigurationConfigurer config) + throws Exception { + config + .withMonitoring() + .monitor(stateMachineMonitor()); + } + + @Override + public void configure(StateMachineStateConfigurer states) throws Exception { + states + .withStates() + .initial("S1") + .state("S2"); + } + + @Override + public void configure(StateMachineTransitionConfigurer transitions) throws Exception { + transitions + .withExternal() + .source("S1") + .target("S2") + .event("E1"); + } + + @Bean + public StateMachineMonitor stateMachineMonitor() { + return new TestStateMachineMonitor(); + } +} +``` + +| |See the [Monitoring](#statemachine-examples-monitoring) sample for detailed usage.| +|---|----------------------------------------------------------------------------------| + +## Using Distributed States + +Distributed state is probably one of a most complicated concepts of a +Spring state machine. What exactly is a distributed state? A state +within a single state machine is naturally really simple to understand, +but, when there is a need to introduce a shared distributed state +through a state machine, things get a little complicated. + +| |Distributed state functionality is still a preview feature and is not
yet considered to be stable in this particular release. We expect this
feature to mature towards its first official release.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +For information about generic configuration support, see[Configuring Common Settings](#statemachine-config-commonsettings). For an actual usage example, see +the [Zookeeper](#statemachine-examples-zookeeper) sample. + +A distributed state machine is implemented through a`DistributedStateMachine` class that wraps an actual instance +of a `StateMachine`. `DistributedStateMachine` intercepts +communication with a `StateMachine` instance and works with +distributed state abstractions handled through the`StateMachineEnsemble` interface. Depending on the actual implementation, +you can also use the `StateMachinePersist` interface to serialize a`StateMachineContext`, which contains enough information to reset a`StateMachine`. + +While a distributed state machine is implemented through an abstraction, +only one implementation currently exists. It is based on Zookeeper. + +The following example shows how to configure a Zookeeper-based distributed state +machine`: + +``` +@Configuration +@EnableStateMachine +public class Config + extends StateMachineConfigurerAdapter { + + @Override + public void configure(StateMachineConfigurationConfigurer config) + throws Exception { + config + .withDistributed() + .ensemble(stateMachineEnsemble()) + .and() + .withConfiguration() + .autoStartup(true); + } + + @Override + public void configure(StateMachineStateConfigurer states) + throws Exception { + // config states + } + + @Override + public void configure(StateMachineTransitionConfigurer transitions) + throws Exception { + // config transitions + } + + @Bean + public StateMachineEnsemble stateMachineEnsemble() + throws Exception { + return new ZookeeperStateMachineEnsemble(curatorClient(), "/zkpath"); + } + + @Bean + public CuratorFramework curatorClient() + throws Exception { + CuratorFramework client = CuratorFrameworkFactory + .builder() + .defaultData(new byte[0]) + .connectString("localhost:2181").build(); + client.start(); + return client; + } + +} +``` + +You can find the current technical documentation for a Zookeeker-based distributed +state machine [in the appendix](#appendices-zookeeper). + +### Using `ZookeeperStateMachineEnsemble` + +`ZookeeperStateMachineEnsemble` itself needs two mandatory settings, +an instance of `curatorClient` and a `basePath`. The client is a`CuratorFramework`, and the path is the root of a tree in a `Zookeeper` instance. + +Optionally, you can set `cleanState`, which defaults to `TRUE`and clears existing data if no members exists in an ensemble. You can set +it to `FALSE` if you want to preserve distributed state within +application restarts. + +Optionally, you can set the size of a `logSize` (defaults +to `32`) to a keep history of state changes. The value of this +setting must be a power of two. `32` is generally a good default +value. If a particular state machine is left behind by more than the +size of the log, it is put into an error state and disconnected from the +ensemble, indicating it has lost its history and its ability to fully reconstruct the +synchronized status. + +## Testing Support + +We have also added a set of utility classes to ease testing of state +machine instances. These are used in the framework itself but are also +very useful for end users. + +`StateMachineTestPlanBuilder` builds a `StateMachineTestPlan`, +which has one method (called `test()`). That method runs a plan.`StateMachineTestPlanBuilder` contains a fluent builder API to let you add +steps to a plan. During these steps, you can send events and check +various conditions, such as state changes, transitions, and extended state +variables. + +The following example uses `StateMachineBuilder` to build a state machine: + +``` +private StateMachine buildMachine() throws Exception { + StateMachineBuilder.Builder builder = StateMachineBuilder.builder(); + + builder.configureConfiguration() + .withConfiguration() + .autoStartup(true); + + builder.configureStates() + .withStates() + .initial("SI") + .state("S1"); + + builder.configureTransitions() + .withExternal() + .source("SI").target("S1") + .event("E1") + .action(c -> { + c.getExtendedState().getVariables().put("key1", "value1"); + }); + + return builder.build(); +} +``` + +In the following test plan, we have two steps. First, we check that the initial +state (`SI`) is indeed set. Second, we send an event (`E1`) and expect +one state change to happen and expect the machine to end up in a state of `S1`. +The following listing shows the test plan: + +``` +StateMachine machine = buildMachine(); +StateMachineTestPlan plan = + StateMachineTestPlanBuilder.builder() + .defaultAwaitTime(2) + .stateMachine(machine) + .step() + .expectStates("SI") + .and() + .step() + .sendEvent("E1") + .expectStateChanged(1) + .expectStates("S1") + .expectVariable("key1") + .expectVariable("key1", "value1") + .expectVariableWith(hasKey("key1")) + .expectVariableWith(hasValue("value1")) + .expectVariableWith(hasEntry("key1", "value1")) + .expectVariableWith(not(hasKey("key2"))) + .and() + .build(); +plan.test(); +``` + +These utilities are also used within a framework to test distributed +state machine features. Note that you can add multiple machines to a plan. +If you add multiple machines, yuo can also choose to +send an event a particular machine, a random machine, or all machines. + +The preceding testing example uses the following Hamcrest imports: + +``` +import static org.hamcrest.CoreMatchers.not; +import static org.hamcrest.collection.IsMapContaining.hasKey; +import static org.hamcrest.collection.IsMapContaining.hasValue; + +import org.junit.jupiter.api.Test; + +import static org.hamcrest.collection.IsMapContaining.hasEntry; +``` + +| |All possible options for expected results are documented in the Javadoc for[`StateMachineTestPlanStepBuilder`](https://docs.spring.io/spring-statemachine/docs/3.0.1/api/org/springframework/statemachine/test/StateMachineTestPlanBuilder.StateMachineTestPlanStepBuilder.html).| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +## Eclipse Modeling Support + +Defining a state machine configuration with UI modeling is supported +through the Eclipse Papyrus framework. + +From the Eclipse wizard, you can create a new Papyrus Model with the UML Diagram +Language. In this example, it is named `simple-machine`. Then you +have an option to choose from various diagram kinds, and you must choose a `StateMachine +Diagram`. + +We want to create a machine that has two states (`S1` and `S2`), where`S1` is the initial state. Then, we need to create event `E1` to do a transition +from `S1` to `S2`. In Papyrus, a machine would then look like something +the following example: + +simple machine + +Behind the scenes, a raw UML file would look like the following example: + +``` + + + + + + + + + + + + + + + + +``` + +| |When opening an existing model that has been defined as UML, you have three
files: `.di`, `.notation`, and `.uml`. If a model was not created in your
eclipse’s session, it does not understand how to open an actual state
chart. This is a known issue in the Papyrus plugin, and there is an easy
workaround. In a Papyrus perspective, you can see a model explorer for
your model. Double click Diagram StateMachine Diagram, which
instructs Eclipse to open this specific model in its proper Papyrus
modeling plugin.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### Using `UmlStateMachineModelFactory` + +After a UML file is in place in your project, you can import it into your +configuration by using `StateMachineModelConfigurer`, where`StateMachineModelFactory` is associated with a model.`UmlStateMachineModelFactory` is a special factory that knows how to +process a Eclipse Papyrus\_generated UML structure. The source UML file can +either be given as a Spring `Resource` or as a normal location string. +The following example shows how to create an instance of`UmlStateMachineModelFactory`: + +``` +@Configuration +@EnableStateMachine +public static class Config1 extends StateMachineConfigurerAdapter { + + @Override + public void configure(StateMachineModelConfigurer model) throws Exception { + model + .withModel() + .factory(modelFactory()); + } + + @Bean + public StateMachineModelFactory modelFactory() { + return new UmlStateMachineModelFactory("classpath:org/springframework/statemachine/uml/docs/simple-machine.uml"); + } +} +``` + +As usual, Spring Statemachine works with guards and +actions, which are defined as beans. Those need to be hooked into UML +by its internal modeling structure. The following sections show +how customized bean references are defined within UML definitions. +Note that it is also possible to register particular methods manually +without defining those as beans. + +If `UmlStateMachineModelFactory` is created as a bean, its`ResourceLoader` is automatically wired to find registered actions and +guards. You can also manually define a`StateMachineComponentResolver`, which is then used to find these +components. The factory also has *registerAction* and*registerGuard* methods, which you can use to register these components. For more +about this, see [Using `StateMachineComponentResolver`](#sm-papyrus-statemachinecomponentresolver). + +A UML model is relatively loose when it comes to an implementation such as +Spring Statemachine itself. Spring Statemachine leaves how to implement a lot of features and +functionalities up to the actual implementation. The following sections go +through how Spring Statemachine implements UML models based on +the Eclipse Papyrus plugin. + +#### Using `StateMachineComponentResolver` + +The next example shows how `UmlStateMachineModelFactory` is defined with +a `StateMachineComponentResolver`, which registers the`myAction` and `myGuard` functions, respectively. Note that these components +are not created as beans. The following listing shows the example: + +``` +@Configuration +@EnableStateMachine +public static class Config2 extends StateMachineConfigurerAdapter { + + @Override + public void configure(StateMachineModelConfigurer model) throws Exception { + model + .withModel() + .factory(modelFactory()); + } + + @Bean + public StateMachineModelFactory modelFactory() { + UmlStateMachineModelFactory factory = new UmlStateMachineModelFactory( + "classpath:org/springframework/statemachine/uml/docs/simple-machine.uml"); + factory.setStateMachineComponentResolver(stateMachineComponentResolver()); + return factory; + } + + @Bean + public StateMachineComponentResolver stateMachineComponentResolver() { + DefaultStateMachineComponentResolver resolver = new DefaultStateMachineComponentResolver<>(); + resolver.registerAction("myAction", myAction()); + resolver.registerGuard("myGuard", myGuard()); + return resolver; + } + + public Action myAction() { + return new Action() { + + @Override + public void execute(StateContext context) { + } + }; + } + + public Guard myGuard() { + return new Guard() { + + @Override + public boolean evaluate(StateContext context) { + return false; + } + }; + } +} +``` + +### Creating a Model + +We start by creating an empty state machine model, shown in the following image: + +papyrus gs 1 + +You can start by creating a new model and giving it a name, as the following image shows: + +papyrus gs 2 + +Then you need to choose StateMachine Diagram, as follows: + +![papyrus gs 3](https://docs.spring.io/spring-statemachine/docs/3.0.1/reference/images/papyrus-gs-3.png) + +You end up with an empty state machine. + +In the preceding images, you should have created a sample named `model`. +You should have wound up with three files: `model.di`,`model.notation`, and `model.uml`. You can then used these files in any other +Eclipse instance. Further, you can import `model.uml` into a +Spring Statemachine. + +### Defining States + +The state identifier comes from a component name in a diagram. +You must have an initial state in your machine, which you can do by adding +a root element and then drawing a transition to your own initial state, +as the following image shows: + +![papyrus gs 4](https://docs.spring.io/spring-statemachine/docs/3.0.1/reference/images/papyrus-gs-4.png) + +In the preceding image, we added a root element and an initial state (`S1`). Then we drew a transition +between those two to indicate that `S1` is an initial state. + +![papyrus gs 5](https://docs.spring.io/spring-statemachine/docs/3.0.1/reference/images/papyrus-gs-5.png) + +In the preceding image, we added a second state (`S2`) and added a transition between +S1 and S2 (indicating that we have two states). + +### Defining Events + +To associate an event with a transition, you need to create a Signal +(`E1`, in this case). To do so, choose RootElement → New Child → Signal. +The following image shows the result: + +![papyrus gs 6](https://docs.spring.io/spring-statemachine/docs/3.0.1/reference/images/papyrus-gs-6.png) + +Then you need to crate a SignalEvent with the new Signal, `E1`. +To do so, choose RootElement → New Child → SignalEvent. +The following image shows the result: + +![papyrus gs 7](https://docs.spring.io/spring-statemachine/docs/3.0.1/reference/images/papyrus-gs-7.png) + +Now that you have defined a `SignalEvent`, you can use it to associate +a trigger with a transition. For more about that, see[Defining Transitions](#sm-papyrus-transitions). + +#### Deferring an Event + +You can defer events to process them at a more appropriate time. In +UML, this is done from a state itself. Choose any state, create a +new trigger under **Deferrable trigger** and choose the SignalEvent which +matches the Signal you want to defer. + +### Defining Transitions + +You can create a transition by drawing a transition line between the +source and target states. In the preceding images, we have states `S1` and `S2` and an +anonymous transition between the two. We want to associate event`E1` with that transition. We choose a transition, create a new +trigger, and define SignalEventE1 for that, as the following image shows: + +![papyrus gs 8](https://docs.spring.io/spring-statemachine/docs/3.0.1/reference/images/papyrus-gs-8.png) + +This gives you something like the arrangement shown in the following image: + +![papyrus gs 9](https://docs.spring.io/spring-statemachine/docs/3.0.1/reference/images/papyrus-gs-9.png) + +| |If you omit SignalEvent for a transition, it becomes an
anonymous transition.| +|---|---------------------------------------------------------------------------------| + +### Defining Timers + +Transitions can also happen based on timed events. Spring Statemachine +support two types of timers, ones which fires continuously on a +background and ones which fires once with a delay when state is +entered. + +To add a new TimeEvent child to Model Explorer, modify When as an +expression defined as LiteralInteger. The value of it (in milliseconds) becomes the timer. +Leave Is Relative false to make the timer fire continuously. + +![papyrus gs 10](https://docs.spring.io/spring-statemachine/docs/3.0.1/reference/images/papyrus-gs-10.png) + +To define one timed based event that triggers when a state is entered, the process is exactly +same as described earlier, but leave Is Relative set to true. The following image +shows the result: + +![papyrus gs 11](https://docs.spring.io/spring-statemachine/docs/3.0.1/reference/images/papyrus-gs-11.png) + +Then the user can pick one of these timed events instead of a +signal event for a particular transition. + +### Defining a Choice + +A choice is defined by drawing one incoming transition into a +CHOICE state and drawing multiple outgoing transitions from it to target +states. The configuration model in our `StateConfigurer` lets you define +an if/elseif/else structure. However, with UML, we need to work with +individual Guards for outgoing transitions. + +You must ensure that the guards defined for transitions do not overlap so that, +whatever happens, only one guard evaluates to TRUE at any given +time. This gives precise and predictable results for choice branch +evaluation. Also we recommend leaving one transition without a guard +so that at least one transition path is guaranteed. +The following image shows the result of making a choice with three branches: + +![papyrus gs 16](https://docs.spring.io/spring-statemachine/docs/3.0.1/reference/images/papyrus-gs-16.png) + +| |Junction works similarly same, except that it allows multiple incoming
transitions. Thus, its behavior compared to Choice is purely
academic. The actual logic to select the outgoing transition is exactly the same.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### Defining a Junction + +See [Defining a Choice](#sm-papyrus-choice). + +### Defining Entry and Exit Points + +You can use EntryPoint and ExitPoint to create controlled entry and exit +with states that have sub-states. In the following state chart, events `E1` and`E2` have normal state behavior by entering and exiting state`S2`, where normal state behavior happens by entering initial state`S21`. + +Using event `E3` takes the machine into the `ENTRY` EntryPoint, which then +leads to `S22` without activating initial state `S21` at any time. +Similarly the `EXIT` ExitPoint with event `E4` controls the specific exit +into state `S4`, while normal exit behavior from `S2` would take the +machine into state `S3`. While on state `S22`, you can choose from +events `E4` and `E2` to take the machine into states `S3` or `S4`, +respectively. The following image shows the result: + +![papyrus gs 17](https://docs.spring.io/spring-statemachine/docs/3.0.1/reference/images/papyrus-gs-17.png) + +| |If state is defined as a sub-machine reference and you need to use entry and exit points,
you must externally define a ConnectionPointReference, with
its entry and exit reference set to point to a correct entry or exit point
within a submachine reference. Only after that, is it possible to
target a transition that correctly links from the outside to the inside of
a sub-machine reference. With ConnectionPointReference, you may need
to find these settings from Properties → Advanced → UML →
Entry/Exit. The UML specification lets you define multiple entries and exits. However,
with a state machine, only one is allowed.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### Defining History States + +When working with history states, three different concepts are in play. +UML defines a Deep History and a Shallow History. The Default History +State comes into play when history state is not yet known. These are +represented in following sections. + +#### Shallow History + +In the following image, Shallow History is selected and a transition is defined into it: + +![papyrus gs 18](https://docs.spring.io/spring-statemachine/docs/3.0.1/reference/images/papyrus-gs-18.png) + +#### Deep History + +Deep History is used for state that has other deep nested states, +thus giving a chance to save whole nested state structure. +The following image shows a definition that uses Deep History: + +![papyrus gs 19](https://docs.spring.io/spring-statemachine/docs/3.0.1/reference/images/papyrus-gs-19.png) + +#### Default History + +In cases where a Transition terminates on a history when +the state has not been entered before it had reached its +final state, there is an option to force +a transition to a specific substate, using the default +history mechanism. For this to happen, you must define a transition +into this default state. This is the transition from `SH` to`S22`. + +In the following image, state `S22` is entered if state `S2` has +never been active, as its history has never been recorded. If state`S2` has been active, then either `S20` or `S21` gets chosen. + +![papyrus gs 20](https://docs.spring.io/spring-statemachine/docs/3.0.1/reference/images/papyrus-gs-20.png) + +### Defining Forks and Joins + +Both Fork and Join are represented as bars in Papyrus. As shown +in the next image, you need to draw one outgoing transition from `FORK` into state`S2` to have orthogonal regions. `JOIN` is then the reverse, where +joined states are collected together from incoming transitions. + +![papyrus gs 21](https://docs.spring.io/spring-statemachine/docs/3.0.1/reference/images/papyrus-gs-21.png) + +### Defining Actions + +You can assoiate swtate entry and exit actions by using a behavior. +For more about this, see [Defining a Bean Reference](#sm-papyrus-beanref). + +#### Using an Initial Action + +An initial action (as shown in [Configuring Actions](#statemachine-config-actions)) is defined +in UML by adding an action in the transition that leads from the Initial State +marker into the actual state. This Action is then run when the state +machine is started. + +### Defining Guards + +You can define a guard by first adding a Constraint and then defining +its Specification as OpaqueExpression, which works in the same way +as [Defining a Bean Reference](#sm-papyrus-beanref). + +### Defining a Bean Reference + +When you need to make a bean reference in any UML effect, +action, or guard, you can do so with`FunctionBehavior` or `OpaqueBehavior`, where the defined language needs to +be `bean` and the language body msut have a bean reference id. + +### Defining a SpEL Reference + +When you need to use a SpEL expression instead of a bean reference in +any UML effect, action, or guard, you can do so by using`FunctionBehavior` or `OpaqueBehavior`, where the defined language needs to +be `spel` and the language body must be a SpEL expression. + +### Using a Sub-Machine Reference + +Normally, when you use sub-states, you draw those into the state +chart itself. The chart may become too complex and big to +follow, so we also support defining a sub-state as a state machine +reference. + +To create a sub-machine reference, you must first create a new diagram and give it a name +(for example, SubStateMachine Diagram). The following image shows the menu choices to use: + +![papyrus gs 12](https://docs.spring.io/spring-statemachine/docs/3.0.1/reference/images/papyrus-gs-12.png) + +Give the new diagram the design you need. +The following image shows a simple design as an example: + +![papyrus gs 13](https://docs.spring.io/spring-statemachine/docs/3.0.1/reference/images/papyrus-gs-13.png) + +From the state you want to link (in this case,m state `S2`), click the`Submachine` field and choose your linked machine (in our example,`SubStateMachine`). + +![papyrus gs 14](https://docs.spring.io/spring-statemachine/docs/3.0.1/reference/images/papyrus-gs-14.png) + +Finally, in the following image, you can see that state `S2` is linked to `SubStateMachine` as a +sub-state. + +![papyrus gs 15](https://docs.spring.io/spring-statemachine/docs/3.0.1/reference/images/papyrus-gs-15.png) + +### Using a Machine Import + +It’s also possible to use import functionality where uml files can reference to other models. + +![papyrus gs 22](https://docs.spring.io/spring-statemachine/docs/3.0.1/reference/images/papyrus-gs-22.png) + +Within `UmlStateMachineModelFactory` it’s possible to use additional resources or locations +to define referenced model files. + +``` +@Configuration +@EnableStateMachine +public static class Config3 extends StateMachineConfigurerAdapter { + + @Override + public void configure(StateMachineModelConfigurer model) throws Exception { + model + .withModel() + .factory(modelFactory()); + } + + @Bean + public StateMachineModelFactory modelFactory() { + return new UmlStateMachineModelFactory( + "classpath:org/springframework/statemachine/uml/import-main/import-main.uml", + new String[] { "classpath:org/springframework/statemachine/uml/import-sub/import-sub.uml" }); + } +} +``` + +| |Links between files in uml models needs to be relative as
otherwise things break when model files are copied out from a
classpath to a temporary directory so that eclipse parsing classes can
read those.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +## Repository Support + +This section contains documentation related to using 'Spring Data +Repositories' in Spring Statemachine. + +### Repository Configuration + +You can keep machine configuration in external +storage, from which it can be loaded on demand, instead of creating a static +configuration by using either Java configuration or UML-based configuration. This +integration works through a Spring Data Repository abstraction. + +We have created a special `StateMachineModelFactory` implementation +called `RepositoryStateMachineModelFactory`. It can use the base +repository interfaces (`StateRepository`, `TransitionRepository`,`ActionRepository` and `GuardRepository`) and base entity +interfaces (`RepositoryState`, `RepositoryTransition`,`RepositoryAction`, and `RepositoryGuard`). + +Due to way how entities and repositories work in Spring Data, +from a user perspective, read access can be fully abstracted as it is +done in `RepositoryStateMachineModelFactory`. There is no need to +know the actual mapped entity class with which a repository works. +Writing into a repository is always dependent on using a real +repository-specific entity class. From a machine-configuration point +of view, we do not need to know these, meaning that we do not need to know +whether the actual implementation is JPA, Redis, or anything else +that Spring Data supports. Using an actual repository-related +entity class comes into play when you manually try to write new +states or transitions into a backed repository. + +| |Entity classes for `RepositoryState` and `RepositoryTransition` have a`machineId` field, which is at your disposal and can be used to
differentiate between configurations — for example, if machines are built
via `StateMachineFactory`.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Actual implementations are documented in later sections. +The following images are UML-equivalent state charts of repository +configurations. + +![sm repository simplemachine](https://docs.spring.io/spring-statemachine/docs/3.0.1/reference/images/sm-repository-simplemachine.png) + +Figure 1. SimpleMachine + +![sm repository simplesubmachine](https://docs.spring.io/spring-statemachine/docs/3.0.1/reference/images/sm-repository-simplesubmachine.png) + +Figure 2. SimpleSubMachine + +![sm repository showcasemachine](https://docs.spring.io/spring-statemachine/docs/3.0.1/reference/images/sm-repository-showcasemachine.png) + +Figure 3. ShowcaseMachine + +#### JPA + +The actual repository implementations for JPA are`JpaStateRepository`, `JpaTransitionRepository`, `JpaActionRepository`, +and `JpaGuardRepository`, which are backed by the +entity classes `JpaRepositoryState`, `JpaRepositoryTransition`,`JpaRepositoryAction`, and `JpaRepositoryGuard`, respectively. + +| |Unfortunately, version '1.2.8' had to make a change in JPA’s entity
model regarding used table names. Previously, generated table names
always had a prefix of `JPA_REPOSITORY_`, derived from entity class
names. As this caused breaking issues with databases imposing
restrictions on database object lengths, all entity classes have
spesific definitions to force table names. For example,`JPA_REPOSITORY_STATE` is now 'STATE' — and so on with other
ntity classes.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The generic way to update states and transitions manually for JPA is shown +in the following example (equivalent to the machine shown in[SimpleMachine](#image-sm-repository-simplemachine)): + +``` +@Autowired +StateRepository stateRepository; + +@Autowired +TransitionRepository transitionRepository; + +void addConfig() { + JpaRepositoryState stateS1 = new JpaRepositoryState("S1", true); + JpaRepositoryState stateS2 = new JpaRepositoryState("S2"); + JpaRepositoryState stateS3 = new JpaRepositoryState("S3"); + + stateRepository.save(stateS1); + stateRepository.save(stateS2); + stateRepository.save(stateS3); + + JpaRepositoryTransition transitionS1ToS2 = new JpaRepositoryTransition(stateS1, stateS2, "E1"); + JpaRepositoryTransition transitionS2ToS3 = new JpaRepositoryTransition(stateS2, stateS3, "E2"); + + transitionRepository.save(transitionS1ToS2); + transitionRepository.save(transitionS2ToS3); +} +``` + +The following example is also equivalent to the machine shown in[SimpleSubMachine](#image-sm-repository-simplesubmachine). + +``` +@Autowired +StateRepository stateRepository; + +@Autowired +TransitionRepository transitionRepository; + +void addConfig() { + JpaRepositoryState stateS1 = new JpaRepositoryState("S1", true); + JpaRepositoryState stateS2 = new JpaRepositoryState("S2"); + JpaRepositoryState stateS3 = new JpaRepositoryState("S3"); + + JpaRepositoryState stateS21 = new JpaRepositoryState("S21", true); + stateS21.setParentState(stateS2); + JpaRepositoryState stateS22 = new JpaRepositoryState("S22"); + stateS22.setParentState(stateS2); + + stateRepository.save(stateS1); + stateRepository.save(stateS2); + stateRepository.save(stateS3); + stateRepository.save(stateS21); + stateRepository.save(stateS22); + + JpaRepositoryTransition transitionS1ToS2 = new JpaRepositoryTransition(stateS1, stateS2, "E1"); + JpaRepositoryTransition transitionS2ToS3 = new JpaRepositoryTransition(stateS21, stateS22, "E2"); + JpaRepositoryTransition transitionS21ToS22 = new JpaRepositoryTransition(stateS2, stateS3, "E3"); + + transitionRepository.save(transitionS1ToS2); + transitionRepository.save(transitionS2ToS3); + transitionRepository.save(transitionS21ToS22); +} +``` + +First, you must access all repositories. +The following example shows how to do so: + +``` +@Autowired +StateRepository stateRepository; + +@Autowired +TransitionRepository transitionRepository; + +@Autowired +ActionRepository actionRepository; + +@Autowired +GuardRepository guardRepository; +``` + +Second, you mus create actions and guards. +The following example shows how to do so: + +``` +JpaRepositoryGuard foo0Guard = new JpaRepositoryGuard(); +foo0Guard.setName("foo0Guard"); + +JpaRepositoryGuard foo1Guard = new JpaRepositoryGuard(); +foo1Guard.setName("foo1Guard"); + +JpaRepositoryAction fooAction = new JpaRepositoryAction(); +fooAction.setName("fooAction"); + +guardRepository.save(foo0Guard); +guardRepository.save(foo1Guard); +actionRepository.save(fooAction); +``` + +Third, you must create states. +The following example shows how to do so: + +``` +JpaRepositoryState stateS0 = new JpaRepositoryState("S0", true); +stateS0.setInitialAction(fooAction); +JpaRepositoryState stateS1 = new JpaRepositoryState("S1", true); +stateS1.setParentState(stateS0); +JpaRepositoryState stateS11 = new JpaRepositoryState("S11", true); +stateS11.setParentState(stateS1); +JpaRepositoryState stateS12 = new JpaRepositoryState("S12"); +stateS12.setParentState(stateS1); +JpaRepositoryState stateS2 = new JpaRepositoryState("S2"); +stateS2.setParentState(stateS0); +JpaRepositoryState stateS21 = new JpaRepositoryState("S21", true); +stateS21.setParentState(stateS2); +JpaRepositoryState stateS211 = new JpaRepositoryState("S211", true); +stateS211.setParentState(stateS21); +JpaRepositoryState stateS212 = new JpaRepositoryState("S212"); +stateS212.setParentState(stateS21); + +stateRepository.save(stateS0); +stateRepository.save(stateS1); +stateRepository.save(stateS11); +stateRepository.save(stateS12); +stateRepository.save(stateS2); +stateRepository.save(stateS21); +stateRepository.save(stateS211); +stateRepository.save(stateS212); +``` + +Fourth and finally, you must create transitions. +The following example shows how to do so: + +``` +JpaRepositoryTransition transitionS1ToS1 = new JpaRepositoryTransition(stateS1, stateS1, "A"); +transitionS1ToS1.setGuard(foo1Guard); + +JpaRepositoryTransition transitionS1ToS11 = new JpaRepositoryTransition(stateS1, stateS11, "B"); +JpaRepositoryTransition transitionS21ToS211 = new JpaRepositoryTransition(stateS21, stateS211, "B"); +JpaRepositoryTransition transitionS1ToS2 = new JpaRepositoryTransition(stateS1, stateS2, "C"); +JpaRepositoryTransition transitionS1ToS0 = new JpaRepositoryTransition(stateS1, stateS0, "D"); +JpaRepositoryTransition transitionS211ToS21 = new JpaRepositoryTransition(stateS211, stateS21, "D"); +JpaRepositoryTransition transitionS0ToS211 = new JpaRepositoryTransition(stateS0, stateS211, "E"); +JpaRepositoryTransition transitionS1ToS211 = new JpaRepositoryTransition(stateS1, stateS211, "F"); +JpaRepositoryTransition transitionS2ToS21 = new JpaRepositoryTransition(stateS2, stateS21, "F"); +JpaRepositoryTransition transitionS11ToS211 = new JpaRepositoryTransition(stateS11, stateS211, "G"); + +JpaRepositoryTransition transitionS0 = new JpaRepositoryTransition(stateS0, stateS0, "H"); +transitionS0.setKind(TransitionKind.INTERNAL); +transitionS0.setGuard(foo0Guard); +transitionS0.setActions(new HashSet<>(Arrays.asList(fooAction))); + +JpaRepositoryTransition transitionS1 = new JpaRepositoryTransition(stateS1, stateS1, "H"); +transitionS1.setKind(TransitionKind.INTERNAL); + +JpaRepositoryTransition transitionS2 = new JpaRepositoryTransition(stateS2, stateS2, "H"); +transitionS2.setKind(TransitionKind.INTERNAL); +transitionS2.setGuard(foo1Guard); +transitionS2.setActions(new HashSet<>(Arrays.asList(fooAction))); + +JpaRepositoryTransition transitionS11ToS12 = new JpaRepositoryTransition(stateS11, stateS12, "I"); +JpaRepositoryTransition transitionS12ToS212 = new JpaRepositoryTransition(stateS12, stateS212, "I"); +JpaRepositoryTransition transitionS211ToS12 = new JpaRepositoryTransition(stateS211, stateS12, "I"); + +JpaRepositoryTransition transitionS11 = new JpaRepositoryTransition(stateS11, stateS11, "J"); +JpaRepositoryTransition transitionS2ToS1 = new JpaRepositoryTransition(stateS2, stateS1, "K"); + +transitionRepository.save(transitionS1ToS1); +transitionRepository.save(transitionS1ToS11); +transitionRepository.save(transitionS21ToS211); +transitionRepository.save(transitionS1ToS2); +transitionRepository.save(transitionS1ToS0); +transitionRepository.save(transitionS211ToS21); +transitionRepository.save(transitionS0ToS211); +transitionRepository.save(transitionS1ToS211); +transitionRepository.save(transitionS2ToS21); +transitionRepository.save(transitionS11ToS211); +transitionRepository.save(transitionS0); +transitionRepository.save(transitionS1); +transitionRepository.save(transitionS2); +transitionRepository.save(transitionS11ToS12); +transitionRepository.save(transitionS12ToS212); +transitionRepository.save(transitionS211ToS12); +transitionRepository.save(transitionS11); +transitionRepository.save(transitionS2ToS1); +``` + +You can find a complete example[here](#statemachine-examples-datajpa). This example also shows how you can +pre-populate a repository from an existing JSON file that has +definitions for entity classes. + +#### Redis + +The actual repository implementations for a Redis instance are`RedisStateRepository`, `RedisTransitionRepository`, `RedisActionRepository`, +and `RedisGuardRepository`, which are backed by the +entity classes `RedisRepositoryState`, `RedisRepositoryTransition`,`RedisRepositoryAction`, and `RedisRepositoryGuard`, respectively. + +The next example shows the generic way to manually update states and transitions for Redis. +This is equivalent to machine shown in[SimpleMachine](#image-sm-repository-simplemachine). + +``` +@Autowired +StateRepository stateRepository; + +@Autowired +TransitionRepository transitionRepository; + +void addConfig() { + RedisRepositoryState stateS1 = new RedisRepositoryState("S1", true); + RedisRepositoryState stateS2 = new RedisRepositoryState("S2"); + RedisRepositoryState stateS3 = new RedisRepositoryState("S3"); + + stateRepository.save(stateS1); + stateRepository.save(stateS2); + stateRepository.save(stateS3); + + RedisRepositoryTransition transitionS1ToS2 = new RedisRepositoryTransition(stateS1, stateS2, "E1"); + RedisRepositoryTransition transitionS2ToS3 = new RedisRepositoryTransition(stateS2, stateS3, "E2"); + + transitionRepository.save(transitionS1ToS2); + transitionRepository.save(transitionS2ToS3); +} +``` + +The following example is equivalent to machine shown in[SimpleSubMachine](#image-sm-repository-simplesubmachine): + +``` +@Autowired +StateRepository stateRepository; + +@Autowired +TransitionRepository transitionRepository; + +void addConfig() { + RedisRepositoryState stateS1 = new RedisRepositoryState("S1", true); + RedisRepositoryState stateS2 = new RedisRepositoryState("S2"); + RedisRepositoryState stateS3 = new RedisRepositoryState("S3"); + + stateRepository.save(stateS1); + stateRepository.save(stateS2); + stateRepository.save(stateS3); + + RedisRepositoryTransition transitionS1ToS2 = new RedisRepositoryTransition(stateS1, stateS2, "E1"); + RedisRepositoryTransition transitionS2ToS3 = new RedisRepositoryTransition(stateS2, stateS3, "E2"); + + transitionRepository.save(transitionS1ToS2); + transitionRepository.save(transitionS2ToS3); +} +``` + +#### MongoDB + +The actual repository implementations for a MongoDB instance are`MongoDbStateRepository`, `MongoDbTransitionRepository`, `MongoDbActionRepository`, +and `MongoDbGuardRepository`, which are backed by the +entity classes `MongoDbRepositoryState`, `MongoDbRepositoryTransition`,`MongoDbRepositoryAction`, and `MongoDbRepositoryGuard`, respectively. + +The next example shows the generic way to manually update states and transitions for MongoDB. +This is equivalent to the machine shown in[SimpleMachine](#image-sm-repository-simplemachine). + +``` +@Autowired +StateRepository stateRepository; + +@Autowired +TransitionRepository transitionRepository; + +void addConfig() { + MongoDbRepositoryState stateS1 = new MongoDbRepositoryState("S1", true); + MongoDbRepositoryState stateS2 = new MongoDbRepositoryState("S2"); + MongoDbRepositoryState stateS3 = new MongoDbRepositoryState("S3"); + + stateRepository.save(stateS1); + stateRepository.save(stateS2); + stateRepository.save(stateS3); + + MongoDbRepositoryTransition transitionS1ToS2 = new MongoDbRepositoryTransition(stateS1, stateS2, "E1"); + MongoDbRepositoryTransition transitionS2ToS3 = new MongoDbRepositoryTransition(stateS2, stateS3, "E2"); + + transitionRepository.save(transitionS1ToS2); + transitionRepository.save(transitionS2ToS3); +} +``` + +The following example is equivalent to the machine shown in[SimpleSubMachine](#image-sm-repository-simplesubmachine). + +``` +@Autowired +StateRepository stateRepository; + +@Autowired +TransitionRepository transitionRepository; + +void addConfig() { + MongoDbRepositoryState stateS1 = new MongoDbRepositoryState("S1", true); + MongoDbRepositoryState stateS2 = new MongoDbRepositoryState("S2"); + MongoDbRepositoryState stateS3 = new MongoDbRepositoryState("S3"); + + MongoDbRepositoryState stateS21 = new MongoDbRepositoryState("S21", true); + stateS21.setParentState(stateS2); + MongoDbRepositoryState stateS22 = new MongoDbRepositoryState("S22"); + stateS22.setParentState(stateS2); + + stateRepository.save(stateS1); + stateRepository.save(stateS2); + stateRepository.save(stateS3); + stateRepository.save(stateS21); + stateRepository.save(stateS22); + + MongoDbRepositoryTransition transitionS1ToS2 = new MongoDbRepositoryTransition(stateS1, stateS2, "E1"); + MongoDbRepositoryTransition transitionS2ToS3 = new MongoDbRepositoryTransition(stateS21, stateS22, "E2"); + MongoDbRepositoryTransition transitionS21ToS22 = new MongoDbRepositoryTransition(stateS2, stateS3, "E3"); + + transitionRepository.save(transitionS1ToS2); + transitionRepository.save(transitionS2ToS3); + transitionRepository.save(transitionS21ToS22); +} +``` + +### Repository Persistence + +Apart from storing machine configuration (as shown in[Repository Configuration](#sm-repository-config)), in an external repository, you canx also +persist machines into repositories. + +The `StateMachineRepository` interface is a central access point that +interacts with machine persistence and is backed by the entity class`RepositoryStateMachine`. + +#### JPA + +The actual repository implementation for JPA is`JpaStateMachineRepository`, which is backed by the entity class`JpaRepositoryStateMachine`. + +The following example shows the generic way to persist a machine for JPA: + +``` +@Autowired +StateMachineRepository stateMachineRepository; + +void persist() { + + JpaRepositoryStateMachine machine = new JpaRepositoryStateMachine(); + machine.setMachineId("machine"); + machine.setState("S1"); + // raw byte[] representation of a context + machine.setStateMachineContext(new byte[] { 0 }); + + stateMachineRepository.save(machine); +} +``` + +#### Redis + +The actual repository implementation for a Redis is`RedisStateMachineRepository`, which is backed by the entity class`RedisRepositoryStateMachine`. + +The following example shows the generic way to persist a machine for Redis: + +``` +@Autowired +StateMachineRepository stateMachineRepository; + +void persist() { + + RedisRepositoryStateMachine machine = new RedisRepositoryStateMachine(); + machine.setMachineId("machine"); + machine.setState("S1"); + // raw byte[] representation of a context + machine.setStateMachineContext(new byte[] { 0 }); + + stateMachineRepository.save(machine); +} +``` + +#### MongoDB + +The actual repository implementation for MongoDB is`MongoDbStateMachineRepository`, which is backed by the entity class`MongoDbRepositoryStateMachine`. + +The following example shows the generic way to persist a machine for MongoDB: + +``` +@Autowired +StateMachineRepository stateMachineRepository; + +void persist() { + + MongoDbRepositoryStateMachine machine = new MongoDbRepositoryStateMachine(); + machine.setMachineId("machine"); + machine.setState("S1"); + // raw byte[] representation of a context + machine.setStateMachineContext(new byte[] { 0 }); + + stateMachineRepository.save(machine); +} +``` + +# Recipes + +This chapter contains documentation for existing built-in state +machine recipes. + +Spring Statemachine is a foundational framework. That is, it does not have much +higher-level functionality or many dependencies beyond Spring Framework. +Consequently, correctly using a state machine may be difficult. To help, +we have created a set of recipe modules that address common use cases. + +What exactly is a recipe? A state machine recipe is a module that addresses a common +use case. In essence, a state machine recipe is both an example that we have tried to +make it easy for you to reuse and extend. + +| |Recipes are a great way to make external contributions to the Spring
Statemachine project. If you are not ready to contribute to the
framework core itself, a custom and common recipe is a great way to
share functionality with other users.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +## Persist + +The persist recipe is a simple utility that lets you use a single state +machine instance to persist and update the state of an arbitrary item in +a repository. + +The recipe’s main class is `PersistStateMachineHandler`, which makes three assumptions: + +* An instance of a `StateMachine` needs to be used + with a `PersistStateMachineHandler`. Note that states and Events are required + to be type of `String`. + +* `PersistStateChangeListener` needs to be registered with handler + to react to persist request. + +* The `handleEventWithState` method is used to orchestrate state changes. + +You can find a sample that shows how to use this recipe at[Persist](#statemachine-examples-persist). + +## Tasks + +The tasks recipe is a concept to run DAG (Directed Acrylic Graph) of `Runnable` instances that use +a state machine. This recipe has been developed from ideas introduced +in [Tasks](#statemachine-examples-tasks) sample. + +The next image shows the generic concept of a state machine. In this state chart, +everything under `TASKS` shows a generic concept of how a single +task is executed. Because this recipe lets you register a deep +hierarchical DAG of tasks (meaning a real state chart would be a deeply +nested collection of sub-states and regions), we have no need to be +more precise. + +For example, if you have only two registered tasks, the following state chart +would be correct when `TASK_id` is replaced with `TASK_1` and `TASK_2` (assuming +the registered tasks IDs are `1` and `2`). + +statechart9 + +Executing a `Runnable` may result an error. Especially if a complex +DAG of tasks is involved, you want to have a way to handle +task execution errors and then have a way to continue execution +without executing already successfully executed tasks. Also, +it would be nice if some execution errors can be handled +automatically. As a last fallback, if an error cannot be handled +automatically, the state machine is put into a state where the user can handle +errors manually. + +`TasksHandler` contains a builder method to configure a handler instance +and follows a simple builder pattern. You can use this builder to +register `Runnable` tasks and `TasksListener` instances and define`StateMachinePersist` hook. + +Now we can take a simple `Runnable` that runs a simple sleep as the following +example shows: + +``` +private Runnable sleepRunnable() { + return new Runnable() { + + @Override + public void run() { + try { + Thread.sleep(2000); + } catch (InterruptedException e) { + } + } + }; +} +``` + +| |The preceding example is the base for all of the examples in this chapter.| +|---|--------------------------------------------------------------------------| + +To execute multiple `sleepRunnable` tasks, you can register tasks and +execute `runTasks()` method from `TasksHandler`, as the following example shows: + +``` +TasksHandler handler = TasksHandler.builder() + .task("1", sleepRunnable()) + .task("2", sleepRunnable()) + .task("3", sleepRunnable()) + .build(); + +handler.runTasks(); +``` + +To listen to what is happening with a task execution, you can register an instance of +a `TasksListener` with a `TasksHandler`. This recipe +provides an adapter `TasksListenerAdapter` if you do not want to +implement a full interface. The listener provides a various hooks to +listen tasks execution events. The following example shows the definition of the`MyTasksListener` class: + +``` +private class MyTasksListener extends TasksListenerAdapter { + + @Override + public void onTasksStarted() { + } + + @Override + public void onTasksContinue() { + } + + @Override + public void onTaskPreExecute(Object id) { + } + + @Override + public void onTaskPostExecute(Object id) { + } + + @Override + public void onTaskFailed(Object id, Exception exception) { + } + + @Override + public void onTaskSuccess(Object id) { + } + + @Override + public void onTasksSuccess() { + } + + @Override + public void onTasksError() { + } + + @Override + public void onTasksAutomaticFix(TasksHandler handler, StateContext context) { + } +} +``` + +You can either register listeners by using a builder or register them directly with a`TasksHandler` as the following example shows: + +``` +MyTasksListener listener1 = new MyTasksListener(); +MyTasksListener listener2 = new MyTasksListener(); + +TasksHandler handler = TasksHandler.builder() + .task("1", sleepRunnable()) + .task("2", sleepRunnable()) + .task("3", sleepRunnable()) + .listener(listener1) + .build(); + +handler.addTasksListener(listener2); +handler.removeTasksListener(listener2); + +handler.runTasks(); +``` + +Every task +needs to have a unique identifier, and (optionally) a task can be +defined to be a sub-task. Effectively, this creates a DAG of tasks. +The following example shows how to create a deep nested DAG of tasks: + +``` +TasksHandler handler = TasksHandler.builder() + .task("1", sleepRunnable()) + .task("1", "12", sleepRunnable()) + .task("1", "13", sleepRunnable()) + .task("2", sleepRunnable()) + .task("2", "22", sleepRunnable()) + .task("2", "23", sleepRunnable()) + .task("3", sleepRunnable()) + .task("3", "32", sleepRunnable()) + .task("3", "33", sleepRunnable()) + .build(); + +handler.runTasks(); +``` + +When an error happens and the state machine running these tasks goes into an`ERROR` state, you can call `fixCurrentProblems` handler method to +reset the current state of the tasks kept in the state machine’s extended state +variables. You can then use the `continueFromError` handler method to +instruct the state machine to transition from the `ERROR` state back to the`READY` state, where you can again run tasks. +The following example shows how to do so: + +``` +TasksHandler handler = TasksHandler.builder() + .task("1", sleepRunnable()) + .task("2", sleepRunnable()) + .task("3", sleepRunnable()) + .build(); + + handler.runTasks(); + handler.fixCurrentProblems(); + handler.continueFromError(); +``` + +# State Machine Examples + +This part of the reference documentation explains the use of state +machines together with sample code and UML state charts. We use a few +shortcuts when representing the relationship between a state chart, Spring Statemachine +configuration, and what an application does with a state machine. For +complete examples, you should study the samples repository. + +Samples are built directly from a main source distribution during a +normal build cycle. This chapter includes the following samples: + +[Turnstile](#statemachine-examples-turnstile) + +[Turnstile Reactive](#statemachine-examples-turnstilereactive) + +[Showcase](#statemachine-examples-showcase) + +[CD Player](#statemachine-examples-cdplayer) + +[Tasks](#statemachine-examples-tasks) + +[Washer](#statemachine-examples-washer) + +[Persist](#statemachine-examples-persist) + +[Zookeeper](#statemachine-examples-zookeeper) + +[Web](#statemachine-examples-web) + +[Scope](#statemachine-examples-scope) + +[Security](#statemachine-examples-security) + +[Event Service](#statemachine-examples-eventservice) + +[Deploy](#statemachine-examples-deploy) + +[Order Shipping](#statemachine-examples-ordershipping) + +[JPA Configuration](#statemachine-examples-datajpa) + +[Data Persist](#statemachine-examples-datapersist) + +[Data JPA Persist](#statemachine-examples-datajpapersist) + +[Data Multi Persist](#statemachine-examples-datajpamultipersist) + +[Monitoring](#statemachine-examples-monitoring) + +The following listing shows how to build the samples: + +``` +./gradlew clean build -x test +``` + +Every sample is located in its own directory under`spring-statemachine-samples`. The samples are based on Spring Boot and +Spring Shell, and you can find the usual Boot fat jars under every sample +project’s `build/libs` directory. + +| |The filenames for the jars to which we refer in this section are populated during a
build of this document, meaning that, if you build samples from
master, you have files with a `BUILD-SNAPSHOT` postfix.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +## Turnstile + +Turnstile is a simple device that gives you access if payment is +made. It is a concept that is simple to model using a state machine. In its +simplest, form there are only two states: `LOCKED` and `UNLOCKED`. Two +events, `COIN` and `PUSH` can happen, depending on whether someone +makes a payment or tries to go through the turnstile. +The following image shows the state machine: + +statechart1 + +The following listing shows the enumeration that defines the possible states: + +States + +``` +public enum States { + LOCKED, UNLOCKED +} +``` + +The following listing shows the enumeration that defines the events: + +Events + +``` +public enum Events { + COIN, PUSH +} +``` + +The following listing shows the code that configures the state machine: + +Configuration + +``` +@Configuration +@EnableStateMachine +static class StateMachineConfig + extends EnumStateMachineConfigurerAdapter { + + @Override + public void configure(StateMachineStateConfigurer states) + throws Exception { + states + .withStates() + .initial(States.LOCKED) + .states(EnumSet.allOf(States.class)); + } + + @Override + public void configure(StateMachineTransitionConfigurer transitions) + throws Exception { + transitions + .withExternal() + .source(States.LOCKED) + .target(States.UNLOCKED) + .event(Events.COIN) + .and() + .withExternal() + .source(States.UNLOCKED) + .target(States.LOCKED) + .event(Events.PUSH); + } + +} +``` + +You can see how this sample state machine interacts with events by +running the `turnstile` sample. The following listing shows how to do so +and shows the command’s output: + +``` +$ java -jar spring-statemachine-samples-turnstile-3.0.1.jar + +sm>sm print ++----------------------------------------------------------------+ +| SM | ++----------------------------------------------------------------+ +| | +| +----------------+ +----------------+ | +| *-->| LOCKED | | UNLOCKED | | +| +----------------+ +----------------+ | +| +---| entry/ | | entry/ |---+ | +| | | exit/ | | exit/ | | | +| | | | | | | | +| PUSH| | |---COIN-->| | |COIN | +| | | | | | | | +| | | | | | | | +| | | |<--PUSH---| | | | +| +-->| | | |<--+ | +| | | | | | +| +----------------+ +----------------+ | +| | ++----------------------------------------------------------------+ + +sm>sm start +State changed to LOCKED +State machine started + +sm>sm event COIN +State changed to UNLOCKED +Event COIN send + +sm>sm event PUSH +State changed to LOCKED +Event PUSH send +``` + +## Turnstile Reactive + +Turnstile reactive is an enhacement to [Turnstile](#statemachine-examples-turnstile) sample using +same *StateMachine* concept and adding a reactive web layer communicating reactively with +a *StateMachine* reactive interfaces. + +`StateMachineController` is a simple `@RestController` where we autowire our `StateMachine`. + +``` +@Autowired +private StateMachine stateMachine; +``` + +We create first mapping to return a machine state. As state doesn’t come out from +a machine reactively, we can *defer* it so that when a returned `Mono` is subscribed, +actual state is requested. + +``` +@GetMapping("/state") +public Mono state() { + return Mono.defer(() -> Mono.justOrEmpty(stateMachine.getState().getId())); +} +``` + +To send a single event or multiple events to a machine we can use a `Flux` in both +incoming and outgoing layers. `EventResult` here is just for this sample and simply +wraps `ResultType` and event. + +``` +@PostMapping("/events") +public Flux events(@RequestBody Flux eventData) { + return eventData + .filter(ed -> ed.getEvent() != null) + .map(ed -> MessageBuilder.withPayload(ed.getEvent()).build()) + .flatMap(m -> stateMachine.sendEvent(Mono.just(m))) + .map(EventResult::new); +} +``` + +You can use the following command to run the sample: + +``` +$ java -jar spring-statemachine-samples-turnstilereactive-3.0.1.jar +``` + +Example of getting a state: + +``` +GET http://localhost:8080/state +``` + +Would then response: + +``` +"LOCKED" +``` + +Example of sending an event: + +``` +POST http://localhost:8080/events +content-type: application/json + +{ + "event": "COIN" +} +``` + +Would then response: + +``` +[ + { + "event": "COIN", + "resultType": "ACCEPTED" + } +] +``` + +You can post multiple events: + +``` +POST http://localhost:8080/events +content-type: application/json + +[ + { + "event": "COIN" + }, + { + "event": "PUSH" + } +] +``` + +Response then contains results for both events: + +``` +[ + { + "event": "COIN", + "resultType": "ACCEPTED" + }, + { + "event": "PUSH", + "resultType": "ACCEPTED" + } +] +``` + +## Showcase + +Showcase is a complex state machine that shows all possible transition +topologies up to four levels of state nesting. +The following image shows the state machine: + +statechart2 + +The following listing shows the enumeration that defines the possible states: + +States + +``` +public enum States { + S0, S1, S11, S12, S2, S21, S211, S212 +} +``` + +The following listing shows the enumeration that defines the events: + +Events + +``` +public enum Events { + A, B, C, D, E, F, G, H, I +} +``` + +The following listing shows the code that configures the state machine: + +Configuration - states + +``` +@Override +public void configure(StateMachineStateConfigurer states) + throws Exception { + states + .withStates() + .initial(States.S0, fooAction()) + .state(States.S0) + .and() + .withStates() + .parent(States.S0) + .initial(States.S1) + .state(States.S1) + .and() + .withStates() + .parent(States.S1) + .initial(States.S11) + .state(States.S11) + .state(States.S12) + .and() + .withStates() + .parent(States.S0) + .state(States.S2) + .and() + .withStates() + .parent(States.S2) + .initial(States.S21) + .state(States.S21) + .and() + .withStates() + .parent(States.S21) + .initial(States.S211) + .state(States.S211) + .state(States.S212); +} +``` + +The following listing shows the code that configures the state machine’s transitions: + +Configuration - transitions + +``` +@Override +public void configure(StateMachineTransitionConfigurer transitions) + throws Exception { + transitions + .withExternal() + .source(States.S1).target(States.S1).event(Events.A) + .guard(foo1Guard()) + .and() + .withExternal() + .source(States.S1).target(States.S11).event(Events.B) + .and() + .withExternal() + .source(States.S21).target(States.S211).event(Events.B) + .and() + .withExternal() + .source(States.S1).target(States.S2).event(Events.C) + .and() + .withExternal() + .source(States.S2).target(States.S1).event(Events.C) + .and() + .withExternal() + .source(States.S1).target(States.S0).event(Events.D) + .and() + .withExternal() + .source(States.S211).target(States.S21).event(Events.D) + .and() + .withExternal() + .source(States.S0).target(States.S211).event(Events.E) + .and() + .withExternal() + .source(States.S1).target(States.S211).event(Events.F) + .and() + .withExternal() + .source(States.S2).target(States.S11).event(Events.F) + .and() + .withExternal() + .source(States.S11).target(States.S211).event(Events.G) + .and() + .withExternal() + .source(States.S211).target(States.S0).event(Events.G) + .and() + .withInternal() + .source(States.S0).event(Events.H) + .guard(foo0Guard()) + .action(fooAction()) + .and() + .withInternal() + .source(States.S2).event(Events.H) + .guard(foo1Guard()) + .action(fooAction()) + .and() + .withInternal() + .source(States.S1).event(Events.H) + .and() + .withExternal() + .source(States.S11).target(States.S12).event(Events.I) + .and() + .withExternal() + .source(States.S211).target(States.S212).event(Events.I) + .and() + .withExternal() + .source(States.S12).target(States.S212).event(Events.I); + +} +``` + +The following listing shows the code that configures the state machine’s actions and guards: + +Configuration - actions and guards + +``` +@Bean +public FooGuard foo0Guard() { + return new FooGuard(0); +} + +@Bean +public FooGuard foo1Guard() { + return new FooGuard(1); +} + +@Bean +public FooAction fooAction() { + return new FooAction(); +} +``` + +The following listing shows how the single action is defined: + +Action + +``` +private static class FooAction implements Action { + + @Override + public void execute(StateContext context) { + Map variables = context.getExtendedState().getVariables(); + Integer foo = context.getExtendedState().get("foo", Integer.class); + if (foo == null) { + log.info("Init foo to 0"); + variables.put("foo", 0); + } else if (foo == 0) { + log.info("Switch foo to 1"); + variables.put("foo", 1); + } else if (foo == 1) { + log.info("Switch foo to 0"); + variables.put("foo", 0); + } + } +} +``` + +The following listing shows how the single guard is defined: + +Guard + +``` +private static class FooGuard implements Guard { + + private final int match; + + public FooGuard(int match) { + this.match = match; + } + + @Override + public boolean evaluate(StateContext context) { + Object foo = context.getExtendedState().getVariables().get("foo"); + return !(foo == null || !foo.equals(match)); + } +} +``` + +The following listing shows the output that this state machine produces when it runs and +various events are sent to it: + +``` +sm>sm start +Init foo to 0 +Entry state S0 +Entry state S1 +Entry state S11 +State machine started + +sm>sm event A +Event A send + +sm>sm event C +Exit state S11 +Exit state S1 +Entry state S2 +Entry state S21 +Entry state S211 +Event C send + +sm>sm event H +Switch foo to 1 +Internal transition source=S0 +Event H send + +sm>sm event C +Exit state S211 +Exit state S21 +Exit state S2 +Entry state S1 +Entry state S11 +Event C send + +sm>sm event A +Exit state S11 +Exit state S1 +Entry state S1 +Entry state S11 +Event A send +``` + +In the preceding output, we can see that: + +* The state machine is started, which takes it to its initial state (`S11`) + through superstates (`S1`) and (`S0`). Also, the extended state variable, `foo`, is + initialized to `0`. + +* We try to execute a self transition in state `S1` with event `A`, but + nothing happens because the transition is guarded by variable `foo` to + be `1`. + +* We send event `C`, which takes us to the other state machine, where + the initial state (`S211`) and its superstates are entered. In there, we + can use event `H`, which does a simple internal transition to flip the`foo` variable. Then we go back by using event `C`. + +* Event `A` is sent again, and now `S1` does a self transition because the + guard evaluates to `true`. + +The following example offers a closer look at how hierarchical states and their event +handling works: + +``` +sm>sm variables +No variables + +sm>sm start +Init foo to 0 +Entry state S0 +Entry state S1 +Entry state S11 +State machine started + +sm>sm variables +foo=0 + +sm>sm event H +Internal transition source=S1 +Event H send + +sm>sm variables +foo=0 + +sm>sm event C +Exit state S11 +Exit state S1 +Entry state S2 +Entry state S21 +Entry state S211 +Event C send + +sm>sm variables +foo=0 + +sm>sm event H +Switch foo to 1 +Internal transition source=S0 +Event H send + +sm>sm variables +foo=1 + +sm>sm event H +Switch foo to 0 +Internal transition source=S2 +Event H send + +sm>sm variables +foo=0 +``` + +In the preceding sample: + +* We print extended state variables in various stages. + +* With event `H`, we end up running an internal transition, + which is logged with its source state. + +* Note how event `H` is handled in + different states (`S0`, `S1`, and `S2`). This is a good example of how + hierarchical states and their event handling works. If state `S2` is + unable to handle event `H` due to a guard condition, its parent is + checked next. This guarantees that, while the machine is on state `S2`, the `foo` flag + is always flipped around. However, in state `S1`, event `H` always + matches to its dummy transition without guard or action, so it never + happens. + +## CD Player + +CD Player is a sample which resembles a use case that many people have +used in the real world. CD Player itself is a really simple entity that allows a +user to open a deck, insert or change a disk, and then drive the player’s +functionality by pressing various buttons (`eject`, `play`,`stop`, `pause`, `rewind`, and `backward`). + +How many of us have really given thought to what it will take to +make code that interacts with hardware to drive a CD Player. Yes, the +concept of a player is simple, but, if you look behind the scenes, +things actually get a bit convoluted. + +You have probably noticed that, if your deck is open and you press play, the +deck closes and a song starts to play (if a CD was inserted). +In a sense, when the deck is open, you first need to close +it and then try to start playing (again, if a CD is actually inserted). Hopefully, +you have now realized that a simple CD Player is so simple. +Sure, you can wrap all this with a simple class that has a few boolean variables +and probably a few nested if-else clauses. That will do the job, but what +about if you need to make all this behavior much more complex? Do you +really want to keep adding more flags and if-else clauses? + +The following image shows the state machine for our simple CD player: + +statechart3 + +The rest of this section goes through how this sample and its state machine is designed and +how those two interacts with each other. The following three configuration sections +are used within an `EnumStateMachineConfigurerAdapter`. + +``` +@Override +public void configure(StateMachineStateConfigurer states) + throws Exception { + states + .withStates() + .initial(States.IDLE) + .state(States.IDLE) + .and() + .withStates() + .parent(States.IDLE) + .initial(States.CLOSED) + .state(States.CLOSED, closedEntryAction(), null) + .state(States.OPEN) + .and() + .withStates() + .state(States.BUSY) + .and() + .withStates() + .parent(States.BUSY) + .initial(States.PLAYING) + .state(States.PLAYING) + .state(States.PAUSED); + +} +``` + +``` +@Override +public void configure(StateMachineTransitionConfigurer transitions) + throws Exception { + transitions + .withExternal() + .source(States.CLOSED).target(States.OPEN).event(Events.EJECT) + .and() + .withExternal() + .source(States.OPEN).target(States.CLOSED).event(Events.EJECT) + .and() + .withExternal() + .source(States.OPEN).target(States.CLOSED).event(Events.PLAY) + .and() + .withExternal() + .source(States.PLAYING).target(States.PAUSED).event(Events.PAUSE) + .and() + .withInternal() + .source(States.PLAYING) + .action(playingAction()) + .timer(1000) + .and() + .withInternal() + .source(States.PLAYING).event(Events.BACK) + .action(trackAction()) + .and() + .withInternal() + .source(States.PLAYING).event(Events.FORWARD) + .action(trackAction()) + .and() + .withExternal() + .source(States.PAUSED).target(States.PLAYING).event(Events.PAUSE) + .and() + .withExternal() + .source(States.BUSY).target(States.IDLE).event(Events.STOP) + .and() + .withExternal() + .source(States.IDLE).target(States.BUSY).event(Events.PLAY) + .action(playAction()) + .guard(playGuard()) + .and() + .withInternal() + .source(States.OPEN).event(Events.LOAD).action(loadAction()); +} +``` + +``` +@Bean +public ClosedEntryAction closedEntryAction() { + return new ClosedEntryAction(); +} + +@Bean +public LoadAction loadAction() { + return new LoadAction(); +} + +@Bean +public TrackAction trackAction() { + return new TrackAction(); +} + +@Bean +public PlayAction playAction() { + return new PlayAction(); +} + +@Bean +public PlayingAction playingAction() { + return new PlayingAction(); +} + +@Bean +public PlayGuard playGuard() { + return new PlayGuard(); +} +``` + +In the preceding configuration: + +* We used `EnumStateMachineConfigurerAdapter` to configure states and + transitions. + +* The `CLOSED` and `OPEN` states are defined as substates of `IDLE`, and + the `PLAYING` and `PAUSED` states are defined as substates of `BUSY`. + +* With the `CLOSED` state, we added an entry action as a bean called`closedEntryAction`. + +* In the transitions we mostly map events to expected state + transitions, such as `EJECT` closing and opening a deck and `PLAY`, `STOP`, + and `PAUSE` doing their natural transitions. For other transitions, we did the following: + + * For source state `PLAYING`, we added a timer trigger, which is + needed to automatically track elapsed time within a playing track and + to have a facility for making the decision about when to switch the to next track. + + * For the `PLAY` event, if the source state is `IDLE` and the target state is`BUSY`, we defined an action called `playAction` and a guard called `playGuard`. + + * For the `LOAD` event and the `OPEN` state, we defined an internal + transition with an action called `loadAction`, which tracks inserting a disc with + extended-state variables. + + * The `PLAYING` state defines three internal transitions. One is + triggered by a timer that runs an action called `playingAction`, which updates + the extended state variables. The other two transitions use `trackAction`with different events (`BACK` and `FORWARD`, respectively) to handle + when the user wants to go back or forward in tracks. + +This machine has only have six states, which are defined by the following enumeration: + +``` +public enum States { + // super state of PLAYING and PAUSED + BUSY, + PLAYING, + PAUSED, + // super state of CLOSED and OPEN + IDLE, + CLOSED, + OPEN +} +``` + +Events represent the buttons the user can +press and whether the user loads a disc into the player. +The following enumeration defines the events: + +``` +public enum Events { + PLAY, STOP, PAUSE, EJECT, LOAD, FORWARD, BACK +} +``` + +The `cdPlayer` and `library` beans are used to drive the application. +The following listing shows the definition of these two beans: + +``` +@Bean +public CdPlayer cdPlayer() { + return new CdPlayer(); +} + +@Bean +public Library library() { + return Library.buildSampleLibrary(); +} +``` + +We define extended state variable keys as simple enumerations, +as the following listing shows: + +``` +public enum Variables { + CD, TRACK, ELAPSEDTIME +} + +public enum Headers { + TRACKSHIFT +} +``` + +We wanted to make this sample type safe, so we define our own +annotation (`@StatesOnTransition`), which has a mandatory meta +annotation (`@OnTransition`). +The following listing defines the `@StatesOnTransition` annotation: + +``` +@Target(ElementType.METHOD) +@Retention(RetentionPolicy.RUNTIME) +@OnTransition +public @interface StatesOnTransition { + + States[] source() default {}; + + States[] target() default {}; + +} +``` + +`ClosedEntryAction` is an entry action for the `CLOSED` state, to +send a `PLAY` event to the state machine if a disc is present. +The following listing defines `ClosedEntryAction`: + +``` +public static class ClosedEntryAction implements Action { + + @Override + public void execute(StateContext context) { + if (context.getTransition() != null + && context.getEvent() == Events.PLAY + && context.getTransition().getTarget().getId() == States.CLOSED + && context.getExtendedState().getVariables().get(Variables.CD) != null) { + context.getStateMachine() + .sendEvent(Mono.just(MessageBuilder + .withPayload(Events.PLAY).build())) + .subscribe(); + } + } +} +``` + +`LoadAction` update an extended state variable if event +headers contain information about a disc to load. +The following listing defines `LoadAction`: + +``` +public static class LoadAction implements Action { + + @Override + public void execute(StateContext context) { + Object cd = context.getMessageHeader(Variables.CD); + context.getExtendedState().getVariables().put(Variables.CD, cd); + } +} +``` + +`PlayAction` resets the player’s elapsed time, which is kept as +an extended state variable. +The following listing defines `PlayAction`: + +``` +public static class PlayAction implements Action { + + @Override + public void execute(StateContext context) { + context.getExtendedState().getVariables().put(Variables.ELAPSEDTIME, 0l); + context.getExtendedState().getVariables().put(Variables.TRACK, 0); + } +} +``` + +`PlayGuard` guards the transition from `IDLE` to `BUSY` with the`PLAY` event if the `CD` extended state variable does not indicate that a +disc has been loaded. +The following listing defines `PlayGuard`: + +``` +public static class PlayGuard implements Guard { + + @Override + public boolean evaluate(StateContext context) { + ExtendedState extendedState = context.getExtendedState(); + return extendedState.getVariables().get(Variables.CD) != null; + } +} +``` + +`PlayingAction` updates an extended state variable called `ELAPSEDTIME`, which +the player can use to read and update its LCD status display. `PlayingAction` also handles +track shifting when the user goe back or forward in tracks. +The following example defines `PlayingAction`: + +``` +public static class PlayingAction implements Action { + + @Override + public void execute(StateContext context) { + Map variables = context.getExtendedState().getVariables(); + Object elapsed = variables.get(Variables.ELAPSEDTIME); + Object cd = variables.get(Variables.CD); + Object track = variables.get(Variables.TRACK); + if (elapsed instanceof Long) { + long e = ((Long)elapsed) + 1000l; + if (e > ((Cd) cd).getTracks()[((Integer) track)].getLength()*1000) { + context.getStateMachine() + .sendEvent(Mono.just(MessageBuilder + .withPayload(Events.FORWARD) + .setHeader(Headers.TRACKSHIFT.toString(), 1).build())) + .subscribe(); + } else { + variables.put(Variables.ELAPSEDTIME, e); + } + } + } +} +``` + +`TrackAction` handles track shift actions when the user goes back or forward +in tracks. If a track is the last on a disc, playing is stopped and the `STOP`event is sent to a state machine. +The following example defines `TrackAction`: + +``` +public static class TrackAction implements Action { + + @Override + public void execute(StateContext context) { + Map variables = context.getExtendedState().getVariables(); + Object trackshift = context.getMessageHeader(Headers.TRACKSHIFT.toString()); + Object track = variables.get(Variables.TRACK); + Object cd = variables.get(Variables.CD); + if (trackshift instanceof Integer && track instanceof Integer && cd instanceof Cd) { + int next = ((Integer)track) + ((Integer)trackshift); + if (next >= 0 && ((Cd)cd).getTracks().length > next) { + variables.put(Variables.ELAPSEDTIME, 0l); + variables.put(Variables.TRACK, next); + } else if (((Cd)cd).getTracks().length <= next) { + context.getStateMachine() + .sendEvent(Mono.just(MessageBuilder + .withPayload(Events.STOP).build())) + .subscribe(); + } + } + } +} +``` + +One other important aspect of state machines is that they have their +own responsibilities (mostly around handling states) and that all application +level logic should be kept outside. This means that applications need +to have a ways to interact with a state machine. Also, note +that we annotated `CdPlayer` with `@WithStateMachine`, which instructs a +state machine to find methods from your POJO, which are then called +with various transitions. +The following example shows how it updates its LCD status display: + +``` +@OnTransition(target = "BUSY") +public void busy(ExtendedState extendedState) { + Object cd = extendedState.getVariables().get(Variables.CD); + if (cd != null) { + cdStatus = ((Cd)cd).getName(); + } +} +``` + +In the preceding example, we use the `@OnTransition` annotation to hook a callback +when a transition happens with a target state of `BUSY`. + +The following listing shows how our state machine handles whether the player is closed: + +``` +@StatesOnTransition(target = {States.CLOSED, States.IDLE}) +public void closed(ExtendedState extendedState) { + Object cd = extendedState.getVariables().get(Variables.CD); + if (cd != null) { + cdStatus = ((Cd)cd).getName(); + } else { + cdStatus = "No CD"; + } + trackStatus = ""; +} +``` + +`@OnTransition` (which we used in the preceding examples) can only be +used with strings that are matched from enumerations. `@StatesOnTransition`lets you create your own type-safe annotations that use real enumerations. + +The following example shows how this state machine actually works. + +``` +sm>sm start +Entry state IDLE +Entry state CLOSED +State machine started + +sm>cd lcd +No CD + +sm>cd library +0: Greatest Hits + 0: Bohemian Rhapsody 05:56 + 1: Another One Bites the Dust 03:36 +1: Greatest Hits II + 0: A Kind of Magic 04:22 + 1: Under Pressure 04:08 + +sm>cd eject +Exit state CLOSED +Entry state OPEN + +sm>cd load 0 +Loading cd Greatest Hits + +sm>cd play +Exit state OPEN +Entry state CLOSED +Exit state CLOSED +Exit state IDLE +Entry state BUSY +Entry state PLAYING + +sm>cd lcd +Greatest Hits Bohemian Rhapsody 00:03 + +sm>cd forward + +sm>cd lcd +Greatest Hits Another One Bites the Dust 00:04 + +sm>cd stop +Exit state PLAYING +Exit state BUSY +Entry state IDLE +Entry state CLOSED + +sm>cd lcd +Greatest Hits +``` + +In the preceding run: + +* The state machine is started, which causes the machine to be initialized. + +* The CD player’s LCD screen status is printed. + +* The CD library is printed. + +* The CD player’s deck is opened. + +* The CD with index 0 is loaded into a deck. + +* Play causes the deck to get closed and immediate play, because a disc + was inserted. + +* We print the LCD status and request the next track. + +* We stop playing. + +## Tasks + +The Tasks sample demonstrates parallel task handling within +regions and adds error handling to either +automatically or manually fix task problems before continuing back +to a state where the tasks can be run again. +The following image shows the Tasks state machine: + +statechart5 + +On a high level, in this state machine: + +* We always try to get into the `READY` state so that we can use the + RUN event to execute tasks. + +* Tkhe `TASKS` state, which is composed of three independent regions, has been + put in the middle of `FORK` and `JOIN` states, which will cause the regions to + go into their initial states and to be joined by their end states. + +* From the `JOIN` state, we automatically go into a `CHOICE` state, which checks + for the existence of error flags in extended state variables. Tasks can set + these flags, and doing so gives the `CHOICE` state the ability to go into the `ERROR`state, where errors can be handled either automatically or manually. + +* The `AUTOMATIC` state in `ERROR` can try to automatically fix an error and goes + back to `READY` if it succeeds. If the error is something what + cannot be handled automatically, user intervention is needed and the + machine is put into the `MANUAL` state by the `FALLBACK` event. + +The following listing shows the enumeration that defines the possible states: + +States + +``` +public enum States { + READY, + FORK, JOIN, CHOICE, + TASKS, T1, T1E, T2, T2E, T3, T3E, + ERROR, AUTOMATIC, MANUAL +} +``` + +The following listing shows the enumeration that defines the events: + +Events + +``` +public enum Events { + RUN, FALLBACK, CONTINUE, FIX; +} +``` + +The following listing configures the possible states: + +Configuration - states + +``` +@Override +public void configure(StateMachineStateConfigurer states) + throws Exception { + states + .withStates() + .initial(States.READY) + .fork(States.FORK) + .state(States.TASKS) + .join(States.JOIN) + .choice(States.CHOICE) + .state(States.ERROR) + .and() + .withStates() + .parent(States.TASKS) + .initial(States.T1) + .end(States.T1E) + .and() + .withStates() + .parent(States.TASKS) + .initial(States.T2) + .end(States.T2E) + .and() + .withStates() + .parent(States.TASKS) + .initial(States.T3) + .end(States.T3E) + .and() + .withStates() + .parent(States.ERROR) + .initial(States.AUTOMATIC) + .state(States.AUTOMATIC, automaticAction(), null) + .state(States.MANUAL); +} +``` + +The following listing configures the possible transitions: + +Configuration - transitions + +``` +@Override +public void configure(StateMachineTransitionConfigurer transitions) + throws Exception { + transitions + .withExternal() + .source(States.READY).target(States.FORK) + .event(Events.RUN) + .and() + .withFork() + .source(States.FORK).target(States.TASKS) + .and() + .withExternal() + .source(States.T1).target(States.T1E) + .and() + .withExternal() + .source(States.T2).target(States.T2E) + .and() + .withExternal() + .source(States.T3).target(States.T3E) + .and() + .withJoin() + .source(States.TASKS).target(States.JOIN) + .and() + .withExternal() + .source(States.JOIN).target(States.CHOICE) + .and() + .withChoice() + .source(States.CHOICE) + .first(States.ERROR, tasksChoiceGuard()) + .last(States.READY) + .and() + .withExternal() + .source(States.ERROR).target(States.READY) + .event(Events.CONTINUE) + .and() + .withExternal() + .source(States.AUTOMATIC).target(States.MANUAL) + .event(Events.FALLBACK) + .and() + .withInternal() + .source(States.MANUAL) + .action(fixAction()) + .event(Events.FIX); +} +``` + +The following guard sends a choice entry into the `ERROR` state and needs to +return `TRUE` if an error has happened. This guard checks that +all extended state variables(`T1`, `T2`, and `T3`) are `TRUE`. + +``` +@Bean +public Guard tasksChoiceGuard() { + return new Guard() { + + @Override + public boolean evaluate(StateContext context) { + Map variables = context.getExtendedState().getVariables(); + return !(ObjectUtils.nullSafeEquals(variables.get("T1"), true) + && ObjectUtils.nullSafeEquals(variables.get("T2"), true) + && ObjectUtils.nullSafeEquals(variables.get("T3"), true)); + } + }; +} +``` + +The following actions below send events to the state machine to request +the next step, which is either to fall back or to continue back to ready. + +``` +@Bean +public Action automaticAction() { + return new Action() { + + @Override + public void execute(StateContext context) { + Map variables = context.getExtendedState().getVariables(); + if (ObjectUtils.nullSafeEquals(variables.get("T1"), true) + && ObjectUtils.nullSafeEquals(variables.get("T2"), true) + && ObjectUtils.nullSafeEquals(variables.get("T3"), true)) { + context.getStateMachine() + .sendEvent(Mono.just(MessageBuilder + .withPayload(Events.CONTINUE).build())) + .subscribe(); + } else { + context.getStateMachine() + .sendEvent(Mono.just(MessageBuilder + .withPayload(Events.FALLBACK).build())) + .subscribe(); + } + } + }; +} + +@Bean +public Action fixAction() { + return new Action() { + + @Override + public void execute(StateContext context) { + Map variables = context.getExtendedState().getVariables(); + variables.put("T1", true); + variables.put("T2", true); + variables.put("T3", true); + context.getStateMachine() + .sendEvent(Mono.just(MessageBuilder + .withPayload(Events.CONTINUE).build())) + .subscribe(); + } + }; +} +``` + +Default region execution is synchronous meaning a regions would be processed +sequentially. In this sample we simply want all task regions to get processed +parallel. This can be accomplished by defining `RegionExecutionPolicy`: + +``` +@Override +public void configure(StateMachineConfigurationConfigurer config) + throws Exception { + config + .withConfiguration() + .regionExecutionPolicy(RegionExecutionPolicy.PARALLEL); +} +``` + +The following example shows how this state machine actually works: + +``` +sm>sm start +State machine started +Entry state READY + +sm>tasks run +Exit state READY +Entry state TASKS +run task on T2 +run task on T1 +run task on T3 +run task on T2 done +run task on T1 done +run task on T3 done +Entry state T2 +Entry state T1 +Entry state T3 +Exit state T2 +Exit state T1 +Exit state T3 +Entry state T3E +Entry state T1E +Entry state T2E +Exit state TASKS +Entry state READY +``` + +In the preceding listing, we can see that tasks run multiple times. +In the next listing, we introduce errors: + +``` +sm>tasks list +Tasks {T1=true, T3=true, T2=true} + +sm>tasks fail T1 + +sm>tasks list +Tasks {T1=false, T3=true, T2=true} + +sm>tasks run +Entry state TASKS +run task on T1 +run task on T3 +run task on T2 +run task on T1 done +run task on T3 done +run task on T2 done +Entry state T1 +Entry state T3 +Entry state T2 +Entry state T1E +Entry state T2E +Entry state T3E +Exit state TASKS +Entry state JOIN +Exit state JOIN +Entry state ERROR +Entry state AUTOMATIC +Exit state AUTOMATIC +Exit state ERROR +Entry state READY +``` + +In the preceding listing, if we simulate a failure for task T1, it is fixed +automatically. +In the next listing, we introduce more errors: + +``` +sm>tasks list +Tasks {T1=true, T3=true, T2=true} + +sm>tasks fail T2 + +sm>tasks run +Entry state TASKS +run task on T2 +run task on T1 +run task on T3 +run task on T2 done +run task on T1 done +run task on T3 done +Entry state T2 +Entry state T1 +Entry state T3 +Entry state T1E +Entry state T2E +Entry state T3E +Exit state TASKS +Entry state JOIN +Exit state JOIN +Entry state ERROR +Entry state AUTOMATIC +Exit state AUTOMATIC +Entry state MANUAL + +sm>tasks fix +Exit state MANUAL +Exit state ERROR +Entry state READY +``` + +In the precding example, if we simulate failure for either task `T2` or `T3`, the state +machine goes to the `MANUAL` state, where problem needs to be fixed manually +before it can go back to the `READY` state. + +## Washer + +The washer sample demonstrates how to use a history state to recover a +running state configuration with a simulated power-off situation. + +Anyone who has ever used a washing machine knows that if you somehow pause +the program, it continue from the same state when unpaused. +You can implement this kind of behavior in a state machine by using +a history pseudo state. +The following image shows our state machine for a washer: + +statechart6 + +The following listing shows the enumeration that defines the possible states: + +States + +``` +public enum States { + RUNNING, HISTORY, END, + WASHING, RINSING, DRYING, + POWEROFF +} +``` + +The following listing shows the enumeration that defines the events: + +Events + +``` +public enum Events { + RINSE, DRY, STOP, + RESTOREPOWER, CUTPOWER +} +``` + +The following listing configures the possible states: + +Configuration - states + +``` +@Override +public void configure(StateMachineStateConfigurer states) + throws Exception { + states + .withStates() + .initial(States.RUNNING) + .state(States.POWEROFF) + .end(States.END) + .and() + .withStates() + .parent(States.RUNNING) + .initial(States.WASHING) + .state(States.RINSING) + .state(States.DRYING) + .history(States.HISTORY, History.SHALLOW); +} +``` + +The following listing configures the possible transitions: + +Configuration - transitions + +``` +@Override +public void configure(StateMachineTransitionConfigurer transitions) + throws Exception { + transitions + .withExternal() + .source(States.WASHING).target(States.RINSING) + .event(Events.RINSE) + .and() + .withExternal() + .source(States.RINSING).target(States.DRYING) + .event(Events.DRY) + .and() + .withExternal() + .source(States.RUNNING).target(States.POWEROFF) + .event(Events.CUTPOWER) + .and() + .withExternal() + .source(States.POWEROFF).target(States.HISTORY) + .event(Events.RESTOREPOWER) + .and() + .withExternal() + .source(States.RUNNING).target(States.END) + .event(Events.STOP); +} +``` + +The following example shows how this state machine actually works: + +``` +sm>sm start +Entry state RUNNING +Entry state WASHING +State machine started + +sm>sm event RINSE +Exit state WASHING +Entry state RINSING +Event RINSE send + +sm>sm event DRY +Exit state RINSING +Entry state DRYING +Event DRY send + +sm>sm event CUTPOWER +Exit state DRYING +Exit state RUNNING +Entry state POWEROFF +Event CUTPOWER send + +sm>sm event RESTOREPOWER +Exit state POWEROFF +Entry state RUNNING +Entry state WASHING +Entry state DRYING +Event RESTOREPOWER send +``` + +In the preceding run: + +* The state machine is started, which causes machine to get initialized. + +* The state machine goes to RINSING state. + +* The state machine goes to DRYING state. + +* The state machine cuts power and goes to POWEROFF state. + +* The state is restored from the HISTORY state, which takes state machine back + to its previous known state. + +## Persist + +Persist is a sample that uses the [Persist](#statemachine-recipes-persist) recipe to +demonstrate how database entry update logic can be controlled by a +state machine. + +The following image shows the state machine logic and configuration: + +statechart10 + +The following listing shows the state machine configuration: + +StateMachine Config + +``` +@Configuration +@EnableStateMachine +static class StateMachineConfig + extends StateMachineConfigurerAdapter { + + @Override + public void configure(StateMachineStateConfigurer states) + throws Exception { + states + .withStates() + .initial("PLACED") + .state("PROCESSING") + .state("SENT") + .state("DELIVERED"); + } + + @Override + public void configure(StateMachineTransitionConfigurer transitions) + throws Exception { + transitions + .withExternal() + .source("PLACED").target("PROCESSING") + .event("PROCESS") + .and() + .withExternal() + .source("PROCESSING").target("SENT") + .event("SEND") + .and() + .withExternal() + .source("SENT").target("DELIVERED") + .event("DELIVER"); + } + +} +``` + +The following configuration creates `PersistStateMachineHandler`: + +Handler Config + +``` +@Configuration +static class PersistHandlerConfig { + + @Autowired + private StateMachine stateMachine; + + @Bean + public Persist persist() { + return new Persist(persistStateMachineHandler()); + } + + @Bean + public PersistStateMachineHandler persistStateMachineHandler() { + return new PersistStateMachineHandler(stateMachine); + } + +} +``` + +The following listing shows the `Order` class used with this sample: + +Order Class + +``` +public static class Order { + int id; + String state; + + public Order(int id, String state) { + this.id = id; + this.state = state; + } + + @Override + public String toString() { + return "Order [id=" + id + ", state=" + state + "]"; + } + +} +``` + +The following example shows the state machine’s output: + +``` +sm>persist db +Order [id=1, state=PLACED] +Order [id=2, state=PROCESSING] +Order [id=3, state=SENT] +Order [id=4, state=DELIVERED] + +sm>persist process 1 +Exit state PLACED +Entry state PROCESSING + +sm>persist db +Order [id=2, state=PROCESSING] +Order [id=3, state=SENT] +Order [id=4, state=DELIVERED] +Order [id=1, state=PROCESSING] + +sm>persist deliver 3 +Exit state SENT +Entry state DELIVERED + +sm>persist db +Order [id=2, state=PROCESSING] +Order [id=4, state=DELIVERED] +Order [id=1, state=PROCESSING] +Order [id=3, state=DELIVERED] +``` + +In the preceding run, the state machine: + +* Listed rows from an existing embedded database, which is already + populated with sample data. + +* Requested to update order `1` into the `PROCESSING` state. + +* List database entries again and see that the state has been changed from`PLACED` to `PROCESSING`. + +* Update order `3` to update its state from `SENT` to`DELIVERED`. + +| |You may wonder where the database is, because there are literally no
signs of it in the sample code. The sample is based on Spring Boot and,
because the necessary classes are in a classpath, an embedded `HSQL` instance
is created automatically.

Spring Boot even creates an instance of `JdbcTemplate`, which you
can autowire, as we did in `Persist.java`, shown in the following listing:

```
@Autowired
private JdbcTemplate jdbcTemplate;
```| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Next, we need to handle state changes. The following listing shows how we do so: + +``` +public void change(int order, String event) { + Order o = jdbcTemplate.queryForObject("select id, state from orders where id = ?", + new RowMapper() { + public Order mapRow(ResultSet rs, int rowNum) throws SQLException { + return new Order(rs.getInt("id"), rs.getString("state")); + } + }, new Object[] { order }); + handler.handleEventWithStateReactively(MessageBuilder + .withPayload(event).setHeader("order", order).build(), o.state) + .subscribe(); +} +``` + +Finally, we use a `PersistStateChangeListener` to update the database, as the +following listing shows: + +``` +private class LocalPersistStateChangeListener implements PersistStateChangeListener { + + @Override + public void onPersist(State state, Message message, + Transition transition, StateMachine stateMachine) { + if (message != null && message.getHeaders().containsKey("order")) { + Integer order = message.getHeaders().get("order", Integer.class); + jdbcTemplate.update("update orders set state = ? where id = ?", state.getId(), order); + } + } +} +``` + +## Zookeeper + +Zookeeper is a distributed version from the[Turnstile](#statemachine-examples-turnstile) sample. + +| |This sample needs an external `Zookeeper` instance that is accessible from`localhost` and has the default port and settings.| +|---|----------------------------------------------------------------------------------------------------------------------------| + +Configuration of this sample is almost the same as the `turnstile` sample. We +add only the configuration for the distributed state machine where we +configure `StateMachineEnsemble`, as the following listing shows: + +``` +@Override +public void configure(StateMachineConfigurationConfigurer config) throws Exception { + config + .withDistributed() + .ensemble(stateMachineEnsemble()); +} +``` + +The actual `StateMachineEnsemble` needs to be created as a bean, together +with the `CuratorFramework` client, as the following example shows: + +``` +@Bean +public StateMachineEnsemble stateMachineEnsemble() throws Exception { + return new ZookeeperStateMachineEnsemble(curatorClient(), "/foo"); +} + +@Bean +public CuratorFramework curatorClient() throws Exception { + CuratorFramework client = CuratorFrameworkFactory.builder().defaultData(new byte[0]) + .retryPolicy(new ExponentialBackoffRetry(1000, 3)) + .connectString("localhost:2181").build(); + client.start(); + return client; +} +``` + +For the next example, we need to create two different shell instances. +We need to create one instance, see what happens, and then create the second instance. +The following command starts the shell instances (remember to start only one instance for now): + +``` +@n1:~# java -jar spring-statemachine-samples-zookeeper-3.0.1.jar +``` + +When state machine is started, its initial state is`LOCKED`. Then it sends a `COIN` event to transition into `UNLOCKED` state. +The following example shows what happens: + +Shell1 + +``` +sm>sm start +Entry state LOCKED +State machine started + +sm>sm event COIN +Exit state LOCKED +Entry state UNLOCKED +Event COIN send + +sm>sm state +UNLOCKED +``` + +Now you can open a second shell instance and start a state machine, +by using the same command that you used to start the first state machine. You should see +that the distributed state (`UNLOCKED`) is entered instead of the default +initial state (`LOCKED`). + +The following example shows the state machine and its output: + +Shell2 + +``` +sm>sm start +State machine started + +sm>sm state +UNLOCKED +``` + +Then from either shell (we use second instance in the next example), send a`PUSH` event to transit from the `UNLOCKED` into the `LOCKED` state. +The following example shows the state machine command and its output: + +Shell2 + +``` +sm>sm event PUSH +Exit state UNLOCKED +Entry state LOCKED +Event PUSH send +``` + +In the other shell (the first shell if you ran the preceding command in the second shell), +you should see the state be changed automatically, +based on distributed state kept in Zookeeper. +The following example shows the state machine command and its output: + +Shell1 + +``` +sm>Exit state UNLOCKED +Entry state LOCKED +``` + +## Web + +Web is a distributed state machine example that uses a zookeeper state machine to handle +distributed state. See [Zookeeper](#statemachine-examples-zookeeper). + +| |This example is meant to be run on multiple
browser sessions against multiple different hosts.| +|---|--------------------------------------------------------------------------------------------------| + +This sample uses a modified state machine structure from[Showcase](#statemachine-examples-showcase) to work with a distributed state +machine. The following image shows the state machine logic: + +statechart11 + +| |Due to the nature of this sample, an instance of a `Zookeeper` state machine is expected to
be available from a localhost for every individual sample instance.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +This demonstration uses an example that starts three different sample instances. +If you run different instances on the same host, you need to +distinguish the port each one uses by adding `--server.port=` to the command. +Otherwise the default port for each host is `8080`. + +In this sample run, we have three hosts: `n1`, `n2`, and `n3`. Each one +has a local zookeeper instance running and a state machine sample running +on a port `8080`. + +In there different terminals, start the three different state machines by running +the following command: + +``` +# java -jar spring-statemachine-samples-web-3.0.1.jar +``` + +When all instances are running, you should see that all show similar +information when you access them with a browser. The states should be `S0`, `S1`, and `S11`. +The extended state variable named `foo` should have a value of `0`. The main state is `S11`. + +sm dist n1 1 + +When you press the `Event C` button in any of the browser windows, the +distributed state is changed to `S211,` which is the target state +denoted by the transition associated with an event of type `C`. +The following image shows the change: + +sm dist n2 2 + +Now we can press the `Event H` button and see that the +internal transition runs on all state machines to change the +the value of the extended state variable named `foo` from `0` to `1`. This change is +first done on the state machine that receives the event and is then propagated +to the other state machines. You should see only the variable named `foo` change +from `0` to `1`. + +sm dist n3 3 + +Finally, we can send `Event K`, which takes the state +machine state back to state `S11`. You should see this happen in +all of the browsers. The following image shows the result in one browser: + +sm dist n1 4 + +## Scope + +Scope is a state machine example that uses session scope to provide an +individual instance for every user. +The following image shows the states and events within the Scope state machine: + +statechart12 + +This simple state machine has three states: `S0`, `S1`, and `S2`. +Transitions between those are controlled by three events: `A`, `B`, and `C`. + +To start the state machine, run the following command in a terminal: + +``` +# java -jar spring-statemachine-samples-scope-3.0.1.jar +``` + +When the instance is running, you can open a browser and play with the state +machine. If you open the same page in a different browser, (for example, one in +Chrome and one in Firefox), you should get a new state machine +instance for each user session. +The following image shows the state machine in a browser: + +sm scope 1 + +## Security + +Security is a state machine example that uses most of the possible combinations of +securing a state machine. It secures sending events, transitions, +and actions. +The following image shows the state machine’s states and events: + +statechart13 + +To start the state machine, run the following command: + +``` +# java -jar spring-statemachine-samples-secure-3.0.1.jar +``` + +We secure event sending by requiring that users have a role of `USER`. +Spring Security ensures that no other users can send events to this +state machine. +The following listing secures event sending: + +``` +@Override +public void configure(StateMachineConfigurationConfigurer config) + throws Exception { + config + .withConfiguration() + .autoStartup(true) + .and() + .withSecurity() + .enabled(true) + .event("hasRole('USER')"); +} +``` + +In this sample we define two users: + +* A user named `user` who has a role of `USER` + +* A user named `admin` who has two roles: `USER` and `ADMIN` + +The password for both users is `password`. +The following listing configures the two users: + +``` +@EnableWebSecurity +@EnableGlobalMethodSecurity(securedEnabled = true) +static class SecurityConfig extends WebSecurityConfigurerAdapter { + + @Autowired + public void configureGlobal(AuthenticationManagerBuilder auth) throws Exception { + auth + .inMemoryAuthentication() + .withUser("user") + .password("password") + .roles("USER") + .and() + .withUser("admin") + .password("password") + .roles("USER", "ADMIN"); + } +} +``` + +We define various transitions between states according to the state chart +shown at the beginning of the example. Only a user with an active `ADMIN` role can run +the external transitions between `S2` and `S3`. Similarly only an `ADMIN` can +run the internal transition the `S1` state. +The following listing defines the transitions, including their security: + +``` +@Override +public void configure(StateMachineTransitionConfigurer transitions) + throws Exception { + transitions + .withExternal() + .source(States.S0).target(States.S1).event(Events.A) + .and() + .withExternal() + .source(States.S1).target(States.S2).event(Events.B) + .and() + .withExternal() + .source(States.S2).target(States.S0).event(Events.C) + .and() + .withExternal() + .source(States.S2).target(States.S3).event(Events.E) + .secured("ROLE_ADMIN", ComparisonType.ANY) + .and() + .withExternal() + .source(States.S3).target(States.S0).event(Events.C) + .and() + .withInternal() + .source(States.S0).event(Events.D) + .action(adminAction()) + .and() + .withInternal() + .source(States.S1).event(Events.F) + .action(transitionAction()) + .secured("ROLE_ADMIN", ComparisonType.ANY); +} +``` + +The following listing uses a method called `adminAction` whose return type is `Action` to +specify that the action is secured with a role of `ADMIN`: + +``` +@Scope(proxyMode = ScopedProxyMode.TARGET_CLASS) +@Bean +public Action adminAction() { + return new Action() { + + @Secured("ROLE_ADMIN") + @Override + public void execute(StateContext context) { + log.info("Executed only for admin role"); + } + }; +} +``` + +The following `Action` runs an internal transition in state `S` when event `F` is sent. + +``` +@Bean +public Action transitionAction() { + return new Action() { + + @Override + public void execute(StateContext context) { + log.info("Executed only for admin role"); + } + }; +} +``` + +The transition itself is secured with a +role of `ADMIN`, so this transition does not run if the current user +does not hate that role. + +## Event Service + +The event service example shows how you can use state machine concepts as +a processing engine for events. This sample evolved from a question: + +Can I use Spring Statemachine as a microservice to feed events to +different state machine instances? In fact, Spring Statemachine can feed +events to potentially millions of different state machine instances. + +This example uses a `Redis` instance to persist state machine +instances. + +Obviously, a million state machine instances in a JVM would be +a bad idea, due to memory constraints. This leads to +other features of Spring Statemachine that let you persist a`StateMachineContext` and re-use existing instances. + +For this example, we assume that a shopping application +sends different types of `PageView` events to a separate +microservice which then tracks user behavior by using a state +machine. The following image shows the state model, which has a few states +that represent a user navigating a product items list, adding and removing +items from a cart, going to a payment page, and initiating a payment +operation: + +statechart14 + +An actual shopping application would send these events into +this service by (for example) using a rest call. More about this +later. + +| |Remember that the focus here is to have an application that exposes a`REST` API that the user can use to send events that can be processed by a
state machine for each request.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The following state machine configuration models what we have in a +state chart. Various actions update the state machine’s `Extended +State` to track the number of entries into various states and also how +many times the internal transitions for `ADD` and `DEL` are called and whether`PAY` has been executed: + +``` +@Bean(name = "stateMachineTarget") +@Scope(scopeName="prototype") +public StateMachine stateMachineTarget() throws Exception { + Builder builder = StateMachineBuilder.builder(); + + builder.configureConfiguration() + .withConfiguration() + .autoStartup(true); + + builder.configureStates() + .withStates() + .initial(States.HOME) + .states(EnumSet.allOf(States.class)); + + builder.configureTransitions() + .withInternal() + .source(States.ITEMS).event(Events.ADD) + .action(addAction()) + .and() + .withInternal() + .source(States.CART).event(Events.DEL) + .action(delAction()) + .and() + .withInternal() + .source(States.PAYMENT).event(Events.PAY) + .action(payAction()) + .and() + .withExternal() + .source(States.HOME).target(States.ITEMS) + .action(pageviewAction()) + .event(Events.VIEW_I) + .and() + .withExternal() + .source(States.CART).target(States.ITEMS) + .action(pageviewAction()) + .event(Events.VIEW_I) + .and() + .withExternal() + .source(States.ITEMS).target(States.CART) + .action(pageviewAction()) + .event(Events.VIEW_C) + .and() + .withExternal() + .source(States.PAYMENT).target(States.CART) + .action(pageviewAction()) + .event(Events.VIEW_C) + .and() + .withExternal() + .source(States.CART).target(States.PAYMENT) + .action(pageviewAction()) + .event(Events.VIEW_P) + .and() + .withExternal() + .source(States.ITEMS).target(States.HOME) + .action(resetAction()) + .event(Events.RESET) + .and() + .withExternal() + .source(States.CART).target(States.HOME) + .action(resetAction()) + .event(Events.RESET) + .and() + .withExternal() + .source(States.PAYMENT).target(States.HOME) + .action(resetAction()) + .event(Events.RESET); + + return builder.build(); +} +``` + +Do not focus on `stateMachineTarget` or`@Scope` for now, as we explain those later in this section. + +We set up a `RedisConnectionFactory` that defaults to +localhost and default port. We use `StateMachinePersist` with a`RepositoryStateMachinePersist` implementation. Finally, we create a`RedisStateMachinePersister` that uses a previously +created `StateMachinePersist` bean. + +These are then used in a `Controller` that handles `REST` calls, +as the following listing shows: + +``` +@Bean +public RedisConnectionFactory redisConnectionFactory() { + return new JedisConnectionFactory(); +} + +@Bean +public StateMachinePersist stateMachinePersist(RedisConnectionFactory connectionFactory) { + RedisStateMachineContextRepository repository = + new RedisStateMachineContextRepository(connectionFactory); + return new RepositoryStateMachinePersist(repository); +} + +@Bean +public RedisStateMachinePersister redisStateMachinePersister( + StateMachinePersist stateMachinePersist) { + return new RedisStateMachinePersister(stateMachinePersist); +} +``` + +We create a bean named `stateMachineTarget`. +State machine instantiation is a relatively +expensive operation, so it is better to try to pool instances instead +of instantiating a new instance for every request. To do so, we first +create a `poolTargetSource` that wraps `stateMachineTarget` and pools +it with a max size of three. When then proxy this `poolTargetSource` with`ProxyFactoryBean` by using a `request` scope. Effectively, this means +that every `REST` request gets a pooled state machine instance from +a bean factory. Later, we show how these instances are used. +The following listing shows how we create the `ProxyFactoryBean`and set the target source: + +``` +@Bean +@Scope(value = "request", proxyMode = ScopedProxyMode.TARGET_CLASS) +public ProxyFactoryBean stateMachine() { + ProxyFactoryBean pfb = new ProxyFactoryBean(); + pfb.setTargetSource(poolTargetSource()); + return pfb; +} +``` + +The following listing shows we set the maximum size and set the target bean name: + +``` +@Bean +public CommonsPool2TargetSource poolTargetSource() { + CommonsPool2TargetSource pool = new CommonsPool2TargetSource(); + pool.setMaxSize(3); + pool.setTargetBeanName("stateMachineTarget"); + return pool; +} +``` + +Now we can get into actual demo. You need to have a Redis server running on +localhost with default settings. Then you need to run the Boot-based sample +application by running the following command: + +``` +# java -jar spring-statemachine-samples-eventservice-3.0.1.jar +``` + +In a browser, you see something like the following: + +sm eventservice 1 + +In this UI, you can use three users: `joe`, `bob`, and `dave`. +Clicking a button shows the current state and the extended state. Enabling a +radio button before clicking a button sends a particular event for that +user. This arrangement lets you play with the UI. + +In our `StateMachineController`, we autowire `StateMachine` and`StateMachinePersister`. `StateMachine` is `request` scoped, so you +get a new instance for each request, while `StateMachinePersist` is a normal +singleton bean. +The following listing autowires `StateMachine` and`StateMachinePersist`: + +``` +@Autowired +private StateMachine stateMachine; + +@Autowired +private StateMachinePersister stateMachinePersister; +``` + +In the following listing, `feedAndGetState` is used with a UI to do same things that an +actual `REST` api might do: + +``` +@RequestMapping("/state") +public String feedAndGetState(@RequestParam(value = "user", required = false) String user, + @RequestParam(value = "id", required = false) Events id, Model model) throws Exception { + model.addAttribute("user", user); + model.addAttribute("allTypes", Events.values()); + model.addAttribute("stateChartModel", stateChartModel); + // we may get into this page without a user so + // do nothing with a state machine + if (StringUtils.hasText(user)) { + resetStateMachineFromStore(user); + if (id != null) { + feedMachine(user, id); + } + model.addAttribute("states", stateMachine.getState().getIds()); + model.addAttribute("extendedState", stateMachine.getExtendedState().getVariables()); + } + return "states"; +} +``` + +In the following listing, `feedPageview` is a `REST` method that accepts a post with +JSON content. + +``` +@RequestMapping(value = "/feed",method= RequestMethod.POST) +@ResponseStatus(HttpStatus.OK) +public void feedPageview(@RequestBody(required = true) Pageview event) throws Exception { + Assert.notNull(event.getUser(), "User must be set"); + Assert.notNull(event.getId(), "Id must be set"); + resetStateMachineFromStore(event.getUser()); + feedMachine(event.getUser(), event.getId()); +} +``` + +In the following listing, `feedMachine` sends an event into a `StateMachine` and persists +its state by using a `StateMachinePersister`: + +``` +private void feedMachine(String user, Events id) throws Exception { + stateMachine + .sendEvent(Mono.just(MessageBuilder + .withPayload(id).build())) + .blockLast(); + stateMachinePersister.persist(stateMachine, "testprefix:" + user); +} +``` + +The following listing shows a `resetStateMachineFromStore` that is used to restore a state machine +for a particular user: + +``` +private StateMachine resetStateMachineFromStore(String user) throws Exception { + return stateMachinePersister.restore(stateMachine, "testprefix:" + user); +} +``` + +As you would usually send an event by using a UI, you can do the same by using `REST` calls, +as the following curl command shows: + +``` +# curl http://localhost:8080/feed -H "Content-Type: application/json" --data '{"user":"joe","id":"VIEW_I"}' +``` + +At this point, you should have content in Redis with a key of`testprefix:joe`, as the following example shows: + +``` +$ ./redis-cli +127.0.0.1:6379> KEYS * +1) "testprefix:joe" +``` + +The next three images show when state for `joe` has been changed from`HOME` to `ITEMS` and when the `ADD` action has been executed. + +The following image the `ADD` event being sent: + +sm eventservice 2 + +Now your are still on the `ITEMS` state, and the internal transition caused +the `COUNT` extended state variable to increase to `1`, as the following image shows: + +sm eventservice 3 + +Now you can run the following `curl` rest call a few times (or do it through the UI) and +see the `COUNT` variable increase with every call: + +``` +# curl http://localhost:8080/feed -H "Content-Type: application/json" # --data '{"user":"joe","id":"ADD"}' +``` + +The following image shows the result of these operations: + +sm eventservice 4 + +## Deploy + +The deploy example shows how you can use state machine concepts with +UML modeling to provide a generic error handling state. This state +machine is a relatively complex example of how you can use various features +to provide a centralized error handling concept. +The following image shows the deploy state machine: + +model deployer + +| |The preceding state chart was designed by using the Eclipse Papyrus Plugin
(see[Eclipse Modeling Support](#sm-papyrus)) and imported into Spring StateMachine through the resulting UML
model file. Actions and guards defined in a model are resolved
from a Spring Application Context.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +In this state machine scenario, we have two different behaviors +(`DEPLOY` and `UNDEPLOY`) that user tries to execute. + +In the preceding state chart: + +* In the `DEPLOY` state, the `INSTALL` and `START` states are entered + conditionally. We enter `START` directly if a product is already + installed and have no need to try to `START` if install fails. + +* In the `UNDEPLOY` state, we enter `STOP` conditionally if the application is + already running. + +* Conditional choices for `DEPLOY` and `UNDEPLOY` are done through a + choice pseudostate within those states, and the choices are selected + by guards. + +* We use exit point pseudostates to have a more controlled exit from the`DEPLOY` and `UNDEPLOY` states. + +* After exiting from `DEPLOY` and `UNDEPLOY`, we go through a junction + pseudostate to choose whether to go through an `ERROR` state + (if an error was added into an extended state). + +* Finally, we go back to the `READY` state to process new requests. + +Now we can get to the actual demo. Run the boot based sample application +by running the following command: + +``` +# java -jar spring-statemachine-samples-deploy-3.0.1.jar +``` + +In a browser, you can see something like the following image: + +sm deploy 1 + +| |As we do not have real install, start, or stop functionality, we
simulate failures by checking the existence of particular message headers.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------| + +Now you can start to send events to a machine and choose various +message headers to drive functionality. + +## Order Shipping + +The order shipping example shows how you can use state machine concepts +to build a simple order processing system. + +The following image shows a state chart that drives this order shipping sample. + +![sm ordershipping 1](https://docs.spring.io/spring-statemachine/docs/3.0.1/reference/images/sm-ordershipping-1.png) + +In the preceding state chart: + +* The state machine enters the `WAIT_NEW_ORDER` (default) state. + +* The event `PLACE_ORDER` transitions into the `RECEIVE_ORDER` state and the entry + action (`entryReceiveOrder`) is executed. + +* If the order is `OK`, the state machine goes into two regions, one handling order + production and one handling user-level payment. Otherwise, the state machine goes + into `CUSTOMER_ERROR`, which is a final state. + +* The state machine loops in a lower region to remind the user to pay + until `RECEIVE_PAYMENT` is sent successfully to indicate correct + payment. + +* Both regions go into waiting states (`WAIT_PRODUCT` and`WAIT_ORDER`), where they are joined before the parent orthogonal state + (`HANDLE_ORDER`) is exited. + +* Finally, the state machine goes through `SHIP_ORDER` to its final state + (`ORDER_SHIPPED`). + +The following command runs the sample: + +``` +# java -jar spring-statemachine-samples-ordershipping-3.0.1.jar +``` + +In a browser, you can see something similar to the following image. You can start by choosing +a customer and an order to create a state machine. + +![sm ordershipping 2](https://docs.spring.io/spring-statemachine/docs/3.0.1/reference/images/sm-ordershipping-2.png) + +The state machine for a particular order is now created and you can start to play +with placing an order and sending a payment. Other settings (such as`makeProdPlan`, `produce`, and `payment`) let you control how the state +machine works. +The following image shows the state machine waiting for an order: + +![sm ordershipping 3](https://docs.spring.io/spring-statemachine/docs/3.0.1/reference/images/sm-ordershipping-3.png) + +Finally, you can see what machine does by refreshing a page, as the following image shows: + +![sm ordershipping 4](https://docs.spring.io/spring-statemachine/docs/3.0.1/reference/images/sm-ordershipping-4.png) + +## JPA Configuration + +The JPA configuration example shows how you can use state machine concepts +with a machine configuration kept in a database. This sample uses +an embedded H2 database with an H2 Console (to ease playing with the +database). + +This sample uses `spring-statemachine-autoconfigure` (which, by default, +auto-configures the repositories and entity classes needed for JPA). +Thus, you need only `@SpringBootApplication`. +The following example shows the `Application` class with the `@SpringBootApplication` annotation: + +``` +@SpringBootApplication +public class Application { + + public static void main(String[] args) { + SpringApplication.run(Application.class, args); + } +} +``` + +The following example shows how to create a `RepositoryStateMachineModelFactory`: + +``` +@Configuration +@EnableStateMachineFactory +public static class Config extends StateMachineConfigurerAdapter { + + @Autowired + private StateRepository stateRepository; + + @Autowired + private TransitionRepository transitionRepository; + + @Override + public void configure(StateMachineModelConfigurer model) throws Exception { + model + .withModel() + .factory(modelFactory()); + } + + @Bean + public StateMachineModelFactory modelFactory() { + return new RepositoryStateMachineModelFactory(stateRepository, transitionRepository); + } +} +``` + +You can use the following command to run the sample: + +``` +# java -jar spring-statemachine-samples-datajpa-3.0.1.jar +``` + +Accessing the application at `[http://localhost:8080](http://localhost:8080)` brings up a newly +constructed machine for each request. You can then choose to send +events to a machine. The possible events and machine configuration are +updated from a database with every request. +The following image shows the UI and the initial events that are created when +this state machine starts: + +![sm datajpa 1](https://docs.spring.io/spring-statemachine/docs/3.0.1/reference/images/sm-datajpa-1.png) + +To access the embedded console, you can use the JDBC URL (which is `jdbc:h2:mem:testdb`, if it is +not already set). +The following image shows the H2 console: + +![sm datajpa 2](https://docs.spring.io/spring-statemachine/docs/3.0.1/reference/images/sm-datajpa-2.png) + +From the console, you can see the database tables and modify +them as you wish. +The following image shows the result of a simple query in the UI: + +![sm datajpa 3](https://docs.spring.io/spring-statemachine/docs/3.0.1/reference/images/sm-datajpa-3.png) + +Now that you have gotten this far, you have probably wondered how those default +states and transitions got populated into the database. Spring Data +has a nice trick to auto-populate repositories, and we +used this feature through `Jackson2RepositoryPopulatorFactoryBean`. +The following example shows how we create such a bean: + +``` +@Bean +public StateMachineJackson2RepositoryPopulatorFactoryBean jackson2RepositoryPopulatorFactoryBean() { + StateMachineJackson2RepositoryPopulatorFactoryBean factoryBean = new StateMachineJackson2RepositoryPopulatorFactoryBean(); + factoryBean.setResources(new Resource[]{new ClassPathResource("data.json")}); + return factoryBean; +} +``` + +The following listing shows the source of the data with which we populate the database: + +``` +[ + { + "@id": "10", + "_class": "org.springframework.statemachine.data.jpa.JpaRepositoryAction", + "spel": "T(System).out.println('hello exit S1')" + }, + { + "@id": "11", + "_class": "org.springframework.statemachine.data.jpa.JpaRepositoryAction", + "spel": "T(System).out.println('hello entry S2')" + }, + { + "@id": "12", + "_class": "org.springframework.statemachine.data.jpa.JpaRepositoryAction", + "spel": "T(System).out.println('hello state S3')" + }, + { + "@id": "13", + "_class": "org.springframework.statemachine.data.jpa.JpaRepositoryAction", + "spel": "T(System).out.println('hello')" + }, + { + "@id": "1", + "_class": "org.springframework.statemachine.data.jpa.JpaRepositoryState", + "initial": true, + "state": "S1", + "exitActions": ["10"] + }, + { + "@id": "2", + "_class": "org.springframework.statemachine.data.jpa.JpaRepositoryState", + "initial": false, + "state": "S2", + "entryActions": ["11"] + }, + { + "@id": "3", + "_class": "org.springframework.statemachine.data.jpa.JpaRepositoryState", + "initial": false, + "state": "S3", + "stateActions": ["12"] + }, + { + "_class": "org.springframework.statemachine.data.jpa.JpaRepositoryTransition", + "source": "1", + "target": "2", + "event": "E1", + "kind": "EXTERNAL" + }, + { + "_class": "org.springframework.statemachine.data.jpa.JpaRepositoryTransition", + "source": "2", + "target": "3", + "event": "E2", + "actions": ["13"] + } +] +``` + +## Data Persist + +The data persist sample shows how you can state machine concepts +with a persisting machine in an external repository. This sample uses +an embedded H2 database with an H2 Console (to ease playing with the +database). Optionally, you can also enable Redis or MongoDB. + +This sample uses `spring-statemachine-autoconfigure` (which, by default, +auto-configures the repositories and entity classes needed for JPA). +Thus, you need only `@SpringBootApplication`. +The following example shows the `Application` class with the `@SpringBootApplication` annotation: + +``` +@SpringBootApplication +public class Application { + + public static void main(String[] args) { + SpringApplication.run(Application.class, args); + } +} +``` + +The `StateMachineRuntimePersister` interface works on the runtime +level of a `StateMachine`. Its implementation,`JpaPersistingStateMachineInterceptor`, is meant to be used with a +JPA. +The following listing creates a `StateMachineRuntimePersister` bean: + +``` +@Configuration +@Profile("jpa") +public static class JpaPersisterConfig { + + @Bean + public StateMachineRuntimePersister stateMachineRuntimePersister( + JpaStateMachineRepository jpaStateMachineRepository) { + return new JpaPersistingStateMachineInterceptor<>(jpaStateMachineRepository); + } +} +``` + +The following example shows how you can use a very similar configuration +to create a bean for MongoDB: + +``` +@Configuration +@Profile("mongo") +public static class MongoPersisterConfig { + + @Bean + public StateMachineRuntimePersister stateMachineRuntimePersister( + MongoDbStateMachineRepository jpaStateMachineRepository) { + return new MongoDbPersistingStateMachineInterceptor<>(jpaStateMachineRepository); + } +} +``` + +The following example shows how you can use a very similar configuration +to create a bean for Redis: + +``` +@Configuration +@Profile("redis") +public static class RedisPersisterConfig { + + @Bean + public StateMachineRuntimePersister stateMachineRuntimePersister( + RedisStateMachineRepository jpaStateMachineRepository) { + return new RedisPersistingStateMachineInterceptor<>(jpaStateMachineRepository); + } +} +``` + +You can configure `StateMachine` to use runtime persistence by using the`withPersistence` configuration method. +The following listing shows how to do so: + +``` +@Autowired +private StateMachineRuntimePersister stateMachineRuntimePersister; + +@Override +public void configure(StateMachineConfigurationConfigurer config) + throws Exception { + config + .withPersistence() + .runtimePersister(stateMachineRuntimePersister); +} +``` + +This sample also uses `DefaultStateMachineService`, which makes it +easier to work with multiple machines. +The following listing shows how to create an instance of `DefaultStateMachineService`: + +``` +@Bean +public StateMachineService stateMachineService( + StateMachineFactory stateMachineFactory, + StateMachineRuntimePersister stateMachineRuntimePersister) { + return new DefaultStateMachineService(stateMachineFactory, stateMachineRuntimePersister); +} +``` + +The following listing shows the logic that drives the `StateMachineService` in this sample: + +``` +private synchronized StateMachine getStateMachine(String machineId) throws Exception { + listener.resetMessages(); + if (currentStateMachine == null) { + currentStateMachine = stateMachineService.acquireStateMachine(machineId); + currentStateMachine.addStateListener(listener); + currentStateMachine.startReactively().block(); + } else if (!ObjectUtils.nullSafeEquals(currentStateMachine.getId(), machineId)) { + stateMachineService.releaseStateMachine(currentStateMachine.getId()); + currentStateMachine.stopReactively().block(); + currentStateMachine = stateMachineService.acquireStateMachine(machineId); + currentStateMachine.addStateListener(listener); + currentStateMachine.startReactively().block(); + } + return currentStateMachine; +} +``` + +You can use the following command to run the sample: + +``` +# java -jar spring-statemachine-samples-datapersist-3.0.1.jar +``` + +| |By default, the `jpa` profile is enabled in `application.yml`. If you want to try
other backends, enable either the `mongo` profile or the `redis` profile.
The following commands specify which profile to use (`jpa` is the default,
but we included it for the sake of completeness):

```
# java -jar spring-statemachine-samples-datapersist-3.0.1.jar --spring.profiles.active=jpa
# java -jar spring-statemachine-samples-datapersist-3.0.1.jar --spring.profiles.active=mongo
# java -jar spring-statemachine-samples-datapersist-3.0.1.jar --spring.profiles.active=redis
```| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Accessing the application at [http://localhost:8080](http://localhost:8080) brings up a newly +constructed state machine for each request, and you can choose to send +events to a machine. The possible events and machine configuration are +updated from a database with every request. + +The state machines in this sample have a simple configuration with states 'S1' +to 'S6' and events 'E1' to 'E6' to transition the state machine between those +states. You can use two state machine identifiers (`datajpapersist1` and`datajpapersist2`) to request a particular state machine. +The following image shows the UI that lets you pick a machine and an event and that shows +what happens when you do: + +![sm datajpapersist 1](https://docs.spring.io/spring-statemachine/docs/3.0.1/reference/images/sm-datajpapersist-1.png) + +The sample defaults to using machine 'datajpapersist1' and goes to its +initial state 'S1'. +The following image shows the result of using those defaults: + +![sm datajpapersist 2](https://docs.spring.io/spring-statemachine/docs/3.0.1/reference/images/sm-datajpapersist-2.png) + +If you send events `E1` and `E2` to the `datajpapersist1` state machine, its +state is persisted as 'S3'. +The following image shows the result of doing so: + +![sm datajpapersist 3](https://docs.spring.io/spring-statemachine/docs/3.0.1/reference/images/sm-datajpapersist-3.png) + +If you then request state machine `datajpapersist1` but send no events, +the state machine is restored back to its persisted state, `S3`. + +## Data Multi Persist + +The data multi ersist sample is an extension of two other samples:[JPA Configuration](#statemachine-examples-datajpa) and [Data Persist](#statemachine-examples-datapersist). +We still keep machine configuration in a database and persist into a +database. However, this time, we also have a machine that contains two orthogonal +regions, to show how those are persisted independently. This sample +also uses an embedded H2 database with an H2 Console (to ease playing +with the database). + +This sample uses `spring-statemachine-autoconfigure` (which, by default, +auto-configures the repositories and entity classes needed for JPA). +Thus, you need only `@SpringBootApplication`. +The following example shows the `Application` class with the `@SpringBootApplication` annotation: + +``` +@SpringBootApplication +public class Application { + + public static void main(String[] args) { + SpringApplication.run(Application.class, args); + } +} +``` + +As in the other data-driven samples, we again create a `StateMachineRuntimePersister`, +as the following listing shows: + +``` +@Bean +public StateMachineRuntimePersister stateMachineRuntimePersister( + JpaStateMachineRepository jpaStateMachineRepository) { + return new JpaPersistingStateMachineInterceptor<>(jpaStateMachineRepository); +} +``` + +A `StateMachineService` bean makes it easier to work with a machines. +The following listing shows how to create such a bean: + +``` +@Bean +public StateMachineService stateMachineService( + StateMachineFactory stateMachineFactory, + StateMachineRuntimePersister stateMachineRuntimePersister) { + return new DefaultStateMachineService(stateMachineFactory, stateMachineRuntimePersister); +} +``` + +We use JSON data to import the configuration. +The following example creates a bean to do so: + +``` +@Bean +public StateMachineJackson2RepositoryPopulatorFactoryBean jackson2RepositoryPopulatorFactoryBean() { + StateMachineJackson2RepositoryPopulatorFactoryBean factoryBean = new StateMachineJackson2RepositoryPopulatorFactoryBean(); + factoryBean.setResources(new Resource[] { new ClassPathResource("datajpamultipersist.json") }); + return factoryBean; +} +``` + +The following listing shows how we get a `RepositoryStateMachineModelFactory`: + +``` +@Configuration +@EnableStateMachineFactory +public static class Config extends StateMachineConfigurerAdapter { + + @Autowired + private StateRepository stateRepository; + + @Autowired + private TransitionRepository transitionRepository; + + @Autowired + private StateMachineRuntimePersister stateMachineRuntimePersister; + + @Override + public void configure(StateMachineConfigurationConfigurer config) + throws Exception { + config + .withPersistence() + .runtimePersister(stateMachineRuntimePersister); + } + + @Override + public void configure(StateMachineModelConfigurer model) + throws Exception { + model + .withModel() + .factory(modelFactory()); + } + + @Bean + public StateMachineModelFactory modelFactory() { + return new RepositoryStateMachineModelFactory(stateRepository, transitionRepository); + } +} +``` + +You can run the sample by using the following command: + +``` +# java -jar spring-statemachine-samples-datajpamultipersist-3.0.1.jar +``` + +Accessing the application at `[http://localhost:8080](http://localhost:8080)` brings up a newly +constructed machine for each request and lets you send +events to a machine. The possible events and the state machine configuration are +updated from a database for each request. We also print out +all state machine contexts and the current root machine, +as the following image shows: + +![sm datajpamultipersist 1](https://docs.spring.io/spring-statemachine/docs/3.0.1/reference/images/sm-datajpamultipersist-1.png) + +The state machine named `datajpamultipersist1` is a simple “flat” machine where states `S1`,`S2` and `S3` are transitioned by events `E1`, `E2`, and `E3` (respectively). +However, the state machine named `datajpamultipersist2` contains two +regions (`R1` and `R2`) directly under the root level. That is why this +root level machine really does not have a state. We need +that root level machine to host those regions. + +Regions `R1` and `R2` in the `datajpamultipersist2` state machine contains states`S10`, `S11`, and `S12` and `S20`, `S21`, and `S22` (respectively). Events`E10`, `E11`, and `E12` are used for region `R1` and events `E20`, `E21`, +and event `E22` is used for region `R2`. The following images shows what happens when we +send events `E10` and `E20` to the`datajpamultipersist2` state machine: + +![sm datajpamultipersist 2](https://docs.spring.io/spring-statemachine/docs/3.0.1/reference/images/sm-datajpamultipersist-2.png) + +Regions have their own contexts with their own IDs, and the actual +ID is postfixed with `#` and the region ID. As the following image shows, +different regions in a database have different contexts: + +![sm datajpamultipersist 3](https://docs.spring.io/spring-statemachine/docs/3.0.1/reference/images/sm-datajpamultipersist-3.png) + +## Data JPA Persist + +The data persist sample shows how you can state machine concepts +with a persisting machine in an external repository. This sample uses +an embedded H2 database with an H2 Console (to ease playing with the +database). Optionally, you can also enable Redis or MongoDB. + +This sample uses `spring-statemachine-autoconfigure` (which, by default, +auto-configures the repositories and entity classes needed for JPA). +Thus, you need only `@SpringBootApplication`. +The following example shows the `Application` class with the `@SpringBootApplication` annotation: + +``` +@SpringBootApplication +public class Application { + + public static void main(String[] args) { + SpringApplication.run(Application.class, args); + } +} +``` + +The `StateMachineRuntimePersister` interface works on the runtime +level of a `StateMachine`. Its implementation,`JpaPersistingStateMachineInterceptor`, is meant to be used with a +JPA. +The following listing creates a `StateMachineRuntimePersister` bean: + +``` +@Configuration +@Profile("jpa") +public static class JpaPersisterConfig { + + @Bean + public StateMachineRuntimePersister stateMachineRuntimePersister( + JpaStateMachineRepository jpaStateMachineRepository) { + return new JpaPersistingStateMachineInterceptor<>(jpaStateMachineRepository); + } +} +``` + +The following example shows how you can use a very similar configuration +to create a bean for MongoDB: + +``` +@Configuration +@Profile("mongo") +public static class MongoPersisterConfig { + + @Bean + public StateMachineRuntimePersister stateMachineRuntimePersister( + MongoDbStateMachineRepository jpaStateMachineRepository) { + return new MongoDbPersistingStateMachineInterceptor<>(jpaStateMachineRepository); + } +} +``` + +The following example shows how you can use a very similar configuration +to create a bean for Redis: + +``` +@Configuration +@Profile("redis") +public static class RedisPersisterConfig { + + @Bean + public StateMachineRuntimePersister stateMachineRuntimePersister( + RedisStateMachineRepository jpaStateMachineRepository) { + return new RedisPersistingStateMachineInterceptor<>(jpaStateMachineRepository); + } +} +``` + +You can configure `StateMachine` to use runtime persistence by using the`withPersistence` configuration method. +The following listing shows how to do so: + +``` +@Autowired +private StateMachineRuntimePersister stateMachineRuntimePersister; + +@Override +public void configure(StateMachineConfigurationConfigurer config) + throws Exception { + config + .withPersistence() + .runtimePersister(stateMachineRuntimePersister); +} +``` + +This sample also uses `DefaultStateMachineService`, which makes it +easier to work with multiple machines. +The following listing shows how to create an instance of `DefaultStateMachineService`: + +``` +@Bean +public StateMachineService stateMachineService( + StateMachineFactory stateMachineFactory, + StateMachineRuntimePersister stateMachineRuntimePersister) { + return new DefaultStateMachineService(stateMachineFactory, stateMachineRuntimePersister); +} +``` + +The following listing shows the logic that drives the `StateMachineService` in this sample: + +``` +private synchronized StateMachine getStateMachine(String machineId) throws Exception { + listener.resetMessages(); + if (currentStateMachine == null) { + currentStateMachine = stateMachineService.acquireStateMachine(machineId); + currentStateMachine.addStateListener(listener); + currentStateMachine.startReactively().block(); + } else if (!ObjectUtils.nullSafeEquals(currentStateMachine.getId(), machineId)) { + stateMachineService.releaseStateMachine(currentStateMachine.getId()); + currentStateMachine.stopReactively().block(); + currentStateMachine = stateMachineService.acquireStateMachine(machineId); + currentStateMachine.addStateListener(listener); + currentStateMachine.startReactively().block(); + } + return currentStateMachine; +} +``` + +You can use the following command to run the sample: + +``` +# java -jar spring-statemachine-samples-datapersist-3.0.1.jar +``` + +| |By default, the `jpa` profile is enabled in `application.yml`. If you want to try
other backends, enable either the `mongo` profile or the `redis` profile.
The following commands specify which profile to use (`jpa` is the default,
but we included it for the sake of completeness):

```
# java -jar spring-statemachine-samples-datapersist-3.0.1.jar --spring.profiles.active=jpa
# java -jar spring-statemachine-samples-datapersist-3.0.1.jar --spring.profiles.active=mongo
# java -jar spring-statemachine-samples-datapersist-3.0.1.jar --spring.profiles.active=redis
```| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Accessing the application at [http://localhost:8080](http://localhost:8080) brings up a newly +constructed state machine for each request, and you can choose to send +events to a machine. The possible events and machine configuration are +updated from a database with every request. + +The state machines in this sample have a simple configuration with states 'S1' +to 'S6' and events 'E1' to 'E6' to transition the state machine between those +states. You can use two state machine identifiers (`datajpapersist1` and`datajpapersist2`) to request a particular state machine. +The following image shows the UI that lets you pick a machine and an event and that shows +what happens when you do: + +![sm datajpapersist 1](https://docs.spring.io/spring-statemachine/docs/3.0.1/reference/images/sm-datajpapersist-1.png) + +The sample defaults to using machine 'datajpapersist1' and goes to its +initial state 'S1'. +The following image shows the result of using those defaults: + +![sm datajpapersist 2](https://docs.spring.io/spring-statemachine/docs/3.0.1/reference/images/sm-datajpapersist-2.png) + +If you send events `E1` and `E2` to the `datajpapersist1` state machine, its +state is persisted as 'S3'. +The following image shows the result of doing so: + +![sm datajpapersist 3](https://docs.spring.io/spring-statemachine/docs/3.0.1/reference/images/sm-datajpapersist-3.png) + +If you then request state machine `datajpapersist1` but send no events, +the state machine is restored back to its persisted state, `S3`. + +## Monitoring + +The monitoring sample shows how you can use state machine concepts to +monitor state machine transitions and actions. +The following listing configures the state machine that we use for this sample: + +``` +@Configuration +@EnableStateMachine +public static class Config extends StateMachineConfigurerAdapter { + + @Override + public void configure(StateMachineStateConfigurer states) + throws Exception { + states + .withStates() + .initial("S1") + .state("S2", null, (c) -> {System.out.println("hello");}) + .state("S3", (c) -> {System.out.println("hello");}, null); + } + + @Override + public void configure(StateMachineTransitionConfigurer transitions) + throws Exception { + transitions + .withExternal() + .source("S1").target("S2").event("E1") + .action((c) -> {System.out.println("hello");}) + .and() + .withExternal() + .source("S2").target("S3").event("E2"); + } +} +``` + +You can use the following command to run the sample: + +``` +# java -jar spring-statemachine-samples-monitoring-3.0.1.jar +``` + +The following image shows the state machine’s initial state: + +![sm monitoring 1](https://docs.spring.io/spring-statemachine/docs/3.0.1/reference/images/sm-monitoring-1.png) + +The following image shows the state of the state machine after we have +performed some actions: + +![sm monitoring 2](https://docs.spring.io/spring-statemachine/docs/3.0.1/reference/images/sm-monitoring-2.png) + +You can view metrics from Spring Boot by running the following two `curl`commands (shown with their output): + +``` +# curl http://localhost:8080/actuator/metrics/ssm.transition.duration + +{ + "name":"ssm.transition.duration", + "measurements":[ + { + "statistic":"COUNT", + "value":3.0 + }, + { + "statistic":"TOTAL_TIME", + "value":0.007 + }, + { + "statistic":"MAX", + "value":0.004 + } + ], + "availableTags":[ + { + "tag":"transitionName", + "values":[ + "INITIAL_S1", + "EXTERNAL_S1_S2" + ] + } + ] +} +``` + +``` +# curl http://localhost:8080/actuator/metrics/ssm.transition.transit + +{ + "name":"ssm.transition.transit", + "measurements":[ + { + "statistic":"COUNT", + "value":3.0 + } + ], + "availableTags":[ + { + "tag":"transitionName", + "values":[ + "EXTERNAL_S1_S2", + "INITIAL_S1" + ] + } + ] +} +``` + +You can also view tracing from Spring Boot by running the following `curl`command (shown with its output): + +``` +# curl http://localhost:8080/actuator/statemachinetrace + +[ + { + "timestamp":"2018-02-11T06:44:12.723+0000", + "info":{ + "duration":2, + "machine":null, + "transition":"EXTERNAL_S1_S2" + } + }, + { + "timestamp":"2018-02-11T06:44:12.720+0000", + "info":{ + "duration":0, + "machine":null, + "action":"demo.monitoring.StateMachineConfig$Config$$Lambda$576/[email protected]" + } + }, + { + "timestamp":"2018-02-11T06:44:12.714+0000", + "info":{ + "duration":1, + "machine":null, + "transition":"INITIAL_S1" + } + }, + { + "timestamp":"2018-02-11T06:44:09.689+0000", + "info":{ + "duration":4, + "machine":null, + "transition":"INITIAL_S1" + } + } +] +``` + +# FAQ + +This chapter answers the questions that Spring Statemachine users most often ask. + +## State Changes + +How can I automatically transit to the next state? + +You can choose from three approaches: + +* Implement an action and send an appropriate event to a state machine + to trigger a transition into the proper target state. + +* Define a deferred event within a state and, before sending an event, + send another event that is deferred. Doing so causes the next + appropriate state transition when it is more convenient to handle + that event. + +* Implement a triggerless transition, which automatically causes a + state transition into the next state when state is entered and its + actions has been completed. + +## Extended State + +How I can initialize variables on state machine start? + +An important concept in a state machine is that nothing really happens +unless a trigger causes a state transition that +then can fire actions. However, having said that, Spring Statemachine +always has an initial transition when a state machine is started. With +this initial transition, you can run a simple action that, within +a `StateContext`, can do whatever it likes with extended state +variables. + +# Appendices + +## Appendix A: Support Content + +This appendix provides generic information about the classes and +material that are used in this reference documentation. + +### Classes Used in This Document + +The following listings show the classes used throughout this reference guide: + +``` +public enum States { + SI,S1,S2,S3,S4,SF +} +``` + +``` +public enum States2 { + S1,S2,S3,S4,S5,SF, + S2I,S21,S22,S2F, + S3I,S31,S32,S3F +} +``` + +``` +public enum States3 { + S1,S2,SH, + S2I,S21,S22,S2F +} +``` + +``` +public enum Events { + E1,E2,E3,E4,EF +} +``` + +## Appendix B: State Machine Concepts + +This appendix provides generial information about state machines. + +### Quick Example + +Assuming we have states named `STATE1` and `STATE2` and events named `EVENT1` and`EVENT2`, you can define the logic of the state machine as the following image shows: + +statechart0 + +The following listings define the state machine in the preceding image: + +``` +public enum States { + STATE1, STATE2 +} + +public enum Events { + EVENT1, EVENT2 +} +``` + +``` +@Configuration +@EnableStateMachine +public class Config1 extends EnumStateMachineConfigurerAdapter { + + @Override + public void configure(StateMachineStateConfigurer states) + throws Exception { + states + .withStates() + .initial(States.STATE1) + .states(EnumSet.allOf(States.class)); + } + + @Override + public void configure(StateMachineTransitionConfigurer transitions) + throws Exception { + transitions + .withExternal() + .source(States.STATE1).target(States.STATE2) + .event(Events.EVENT1) + .and() + .withExternal() + .source(States.STATE2).target(States.STATE1) + .event(Events.EVENT2); + } +} +``` + +``` +@WithStateMachine +public class MyBean { + + @OnTransition(target = "STATE1") + void toState1() { + } + + @OnTransition(target = "STATE2") + void toState2() { + } +} +``` + +``` +public class MyApp { + + @Autowired + StateMachine stateMachine; + + void doSignals() { + stateMachine + .sendEvent(Mono.just(MessageBuilder + .withPayload(Events.EVENT1).build())) + .subscribe(); + stateMachine + .sendEvent(Mono.just(MessageBuilder + .withPayload(Events.EVENT2).build())) + .subscribe(); + } +} +``` + +### Glossary + +**State Machine** + +The main entity that drives a collection of states, together with regions, +transitions, and events. + +**State** + +A state models a situation during which some invariant condition +holds. The state is the main entity of a state machine where state changes +are driven by events. + +**Extended State** + +An extended state is a special set of variables kept in a state +machine to reduce the number of needed states. + +**Transition** + +A transition is a relationship between a source state and a target +state. It may be part of a compound transition, which takes the state +machine from one state configuration to another, representing the complete +response of the state machine to an occurrence of an event of a +particular type. + +**Event** + +An entity that is sent to a state machine and then drives a various +state changes. + +**Initial State** + +A special state in which the state machine starts. The initial state is +always bound to a particular state machine or a region. A state +machine with multiple regions may have a multiple initial states. + +**End State** + +(Also called as a final state.) A special kind of state signifying +that the enclosing region is completed. If the enclosing region is +directly contained in a state machine and all other regions in the +state machine are also completed, the entire state +machine is completed. + +**History State** + +A pseudo state that lets a state machine remember its last +active state. Two types of history state exists: *shallow* (which +remembers only top level state) and *deep* (which remembers active states in +sub-machines). + +**Choice State** + +A pseudo state that allows for making a transition choice based on (for example) +event headers or extended state variables. + +**Junction State** + +A pseudo state that is relatively similar to choice state but allows +multiple incoming transitions, while choice allows only one incoming +transition. + +**Fork State** + +A pseudo state that gives controlled entry into a region. + +**Join State** + +A pseudo state that gives controlled exit from a region. + +**Entry Point** + +A pseudo state that allows controlled entry into a submachine. + +**Exit Point** + +A pseudo state that allows controlled exit from a submachine. + +**Region** + +A region is an orthogonal part of either a composite state or a state +machine. It contains states and transitions. + +**Guard** + +A boolean expression evaluated dynamically based on the value of +extended state variables and event parameters. Guard conditions affect +the behavior of a state machine by enabling actions or transitions +only when they evaluate to `TRUE` and disabling them when they evaluate +to `FALSE`. + +**Action** + +A action is a behavior run during the triggering of the +transition. + +### A State Machine Crash Course + +This appendix provides a generic crash course to state machine +concepts. + +#### States + +A state is a model in which a state machine can be. It is always +easier to describe state as a real world example rather than trying to use +abstract concepts ingeneric documentation. To that end, consider +a simple example of a keyboard — most of us use one every single day. +If you have a full keyboard that has normal keys on the left side and +the numeric keypad on the right side, you may have noticed that the +numeric keypad may be in a two different states, depending on whether +numlock is activated. If it is not active, pressing the number pad keys +result in navigation by using arrows and so on. If the number pad is active, pressing +those keys results in numbers being typed. Essentially, the number pad part of a keyboard +can be in two different states. + +To relate state concept to programming, it means that instead of using +flags, nested if/else/break clauses, or other impractical (and sometimes tortuous) logic, you can +rely on state, state variables, or another interaction with a +state machine. + +#### Pseudo States + +Pseudostate is a special type of state that usually introduces more +higher-level logic into a state machine by either giving a state a +special meaning (such as initial state). A state machine can then internally +react to these states by doing various actions that are available in UML state +machine concepts. + +##### Initial + +The **Initial pseudostate** state is always needed for every single state +machine, whether you have a simple one-level state machine or a more +complex state machine composed of submachines or regions. The initial +state defines where a state machine should go when it starts. +Without it, a state machine is ill-formed. + +##### End + +The **Terminate pseudostate** (which is also called “end state”) indicates +that a particular state machine has reached its final state. Effectively, +this mean that a state machine no longer processes any events and does +not transit to any other state. However, in the case where submachines are +regions, a state machine can restart from its terminal state. + +##### Choice + +You can use the **Choice pseudostate** choose a dynamic conditional branch of +a transition from this state. The dynamic condition is evaluated by guards +so that one branch is selected. Usually a +simple if/elseif/else structure is used to make sure that one +branch is selected. Otherwise, the state machine might end up in a deadlock, +and the configuration is ill-formed. + +##### Junction + +The **Junction pseudostate** is functionally similar to choice, as both are +implemented with if/elseif/else structures. The only real difference is +that junction allows multiple incoming transitions, while choice +allows only one. Thus difference is largely academic but does have some +differences, such as when a state machine is designed is used with a real UI modeling +framework. + +##### History + +You can use the **History pseudostate** to remember the last active state +configuration. After a state machine has exited, you can use a history state +to restore a previously known configuration. There are two types +of history states available: `SHALLOW` (which remembers only the active state of a +state machine itself) and `DEEP` (which also remembers nested states). + +A history state could be implemented externally by listening state +machine events, but this would soon make for very difficult logic, +especially if a state machine contains complex nested structures. +Letting the state machine itself handle the recording of history states +makes things much simpler. The user need only create a +transition into a history state, and the state machine handles the needed +logic to go back to its last known recorded state. + +In cases where a Transition terminates on a history state when the state +has not been previously entered (in other words, no prior history exists) or it had reached its +end state, a transition can force the state machine to a specific substate, by +using the default history mechanism. This transition originates +in the history state and terminates on a specific vertex (the default history +state) of the region that contains the history state. This transition is +taken only if its execution leads to the history state and the state had never before been +active. Otherwise, the normal history entry into the region is executed. +If no default history transition is defined, the standard default entry of +the region is performed. + +##### Fork + +You can use the **Fork pseudostate** to do an explicit entry into one or more regions. +The following image shows how a fork works: + +statechart7 + +The target state can be a parent state that hosts regions, which simply +means that regions are activated by entering their initial states. You +can also add targets directly to any state in a region, which +allows more controlled entry into a state. + +##### Join + +The **Join pseudostate** merges together several transitions that +originate from different regions. It is generally used to wait +and block for participating regions to get into its join target states. +The following image shows how a join works: + +statechart8 + +The source state can be a parent state that hosts regions, which means that +join states are the terminal states of the participating regions. +You can also define source states to be any state in a +region, which allows controlled exit from regions. + +##### Entry Point + +An **Entry Point pseudostate** represents an entry point for a state +machine or a composite state that provides encapsulation of the insides +of the state or state machine. In each region of the state machine or +composite state that owns the entry point, there is at most a single +transition from the entry point to a vertex within that region. + +##### Exit Point + +An **Exit Point pseudostate** is an exit point of a state machine or +composite state that provides encapsulation of the insides of the state +or state machine. Transitions that terminate on an exit point within any +region of the composite state (or a state machine referenced by a +submachine state) imply exiting of this composite state or submachine +state (with execution of its associated exit behavior). + +#### Guard Conditions + +Guard conditions are expressions which evaluates to either `TRUE` or`FALSE`, based on extended state variables and event parameters. Guards +are used with actions and transitions to dynamically choose whether a +particular action or transition should be run. The various spects of guards, +event parameters, and extended state variables exist to make state +machine design much more simple. + +#### Events + +Event is the most-used trigger behavior to drive a state machine. +There are other ways to trigger behavior in a state machine +(such as a timer), but events are the ones that really let users +interact with a state machine. Events are also called “signals”. +They basically indicate something that can possibly alter a state machine state. + +#### Transitions + +A transition is a relationship between a source state and a target +state. A switch from one state to another is a state transition caused +by a trigger. + +##### Internal Transition + +Internal transition is used when an action needs to be run without +causing a state transition. In an internal transition, the source state and the target +state is always the same, and it is identical with a self-transition in the +absence of state entry and exit actions. + +##### External versus Local Transitions + +In most cases, external and local transitions are functionally +equivalent, except in cases where the transition happens between super +and sub states. Local transitions do not cause exit and entry to a +source state if the target state is a substate of a source state. +Conversely, local transitions do not cause exit and entry to a target +state if the target is a superstate of a source state. +The following image shows the difference between local and external transitions +with very simplistic super and sub states: + +statechart4 + +#### Triggers + +A trigger begins a transition. Triggers can be driven by either events or timers. + +#### Actions + +Actions really glue state machine state changes +to a user’s own code. A state machine can run an action on various +changes and on the steps in a state machine (such as entering or exiting a state) +or doing a state transition. + +Actions usually have access to a state context, which gives running +code a choice to interact with a state machine in various ways. +State context exposes a whole state machine so that a user can +access extended state variables, event headers (if a transition is based +on an event), or an actual transition (where it is possible to see more +detailed about where this state change is coming from and where it is going). + +#### Hierarchical State Machines + +The concept of a hierarchical state machine is used to simplify state +design when particular states must exist together. + +Hierarchical states are really an innovation in UML state machines over +traditional state machines, such as Mealy or Moore machines. +Hierarchical states lets you define some level of abstraction (parallel +to how a Java developer might define a class structure with abstract +classes). For example, with a nested state machine, you can +define transition on a multiple level of states (possibly with +different conditions). A state machine always tries to see if the current +state is able to handle an event, together with transition guard +conditions. If these conditions do not evaluate to `TRUE`, the state +machine merely see what the super state can handle. + +#### Regions + +Regions (which are also called as orthogonal regions) are usually viewed +as exclusive OR (XOR) operations applied to states. The concept of a region in +terms of a state machine is usually a little difficult to understand, +but things gets a little simpler with a simple example. + +Some of us have a full size keyboard with the main keys on the left side and numeric +keys on the right side. You have probably noticed that both sides really +have their own state, which you see if you press a “numlock” key (which +alters only the behaviour of the number pad itself). If you do not have a full-size +keyboard, you can buy an external USB number pad. +Given that the left and right side of a keyboard can each exist without the +other, they must have totally different states, which means they are +operating on different state machines. In state machine terms, the main part of a +keyboard is one region and the number pad is another region. + +It would be a little inconvenient to handle two different +state machines as totally separate entities, because they +still work together in some fashion. This independence lets orthogonal regions +combine together in multiple simultaneous states within a single state +in a state machine. + +## Appendix C: Distributed State Machine Technical Paper + +This appendix provides more detailed technical documentation about +using a Zookeeper instance with Spring Statemachine. + +### Abstract + +Introducing a “distributed state” on top of a single state machine +instance running on a single JVM is a difficult and a complex topic. +The concept of a “Distributed State Machine” introduces a few relatively complex +problems on top of a simple state machine, due to its run-to-completion +model and, more generally, because of its single-thread execution model, +though orthogonal regions can be run in parallel. One other natural +problem is that state machine transition execution is driven by triggers, +which are either `event` or `timer` based. + +Spring State Machine tries to solve the problem of spanning +a generic “State Machine” through a JVM boundary by supporting distributed +state machines. Here we show that you can use generic +“State Machine” concepts across multiple JVMs and Spring +Application Contexts. + +We found that, if `Distributed State Machine` abstraction is carefully chosen +and backing distributed state repository guarantees `CP` readiness, it is +possible to create a consistent state machine that can share +distributed state among other state machines in an ensemble. + +Our results demonstrate that distributed state changes are consistent if the backing +repository is “CP” (discussed [later](#state-machine-technical-paper-introduction)). +We anticipate our distributed state machine can provide +a foundation to applications that need to work with shared distributed +states. This model aims to provide good methods for cloud applications +to have much easier ways to communicate with each other without having +to explicitly build these distributed state concepts. + +### Introduction + +Spring State Machine is not forced to use a single threaded execution +model, because, once multiple regions are used, regions can be executed in +parallel if the necessary configuration is applied. This is an important +topic, because, once a user wants to have parallel state machine +execution, it makes state changes faster for independent regions. + +When state changes are no longer driven by a trigger in a local JVM or a +local state machine instance, transition logic needs to be controlled +externally in an arbitrary persistent storage. This storage needs to +have a way to notify participating state machines when distributed +state is changed. + +[CAP Theorem](https://en.wikipedia.org/wiki/CAP_theorem) states that +it is impossible for a distributed computer system to simultaneously +provide all three of the following guarantees: consistency, +availability, and partition tolerance. + +This means that, +whatever is chosen for a backing persistence storage, it is advisable +to be “CP”. In this context, “CP” means “consistency” and “partition +tolerance”. Naturally, a distributed Spring Statemachine does not care +about its “CAP” level but, in reality, “consistency” and +“partition tolerance” are more important than “availability”. This is +an exact reason why (for example) Zookeeper uses “CP” storage. + +All tests presented in this article are accomplished by running custom +Jepsen tests in the following environment: + +* A cluster having nodes n1, n2, n3, n4 and n5. + +* Each node has a `Zookeeper` instance that constructs an ensemble with + all other nodes. + +* Each node has a [Web](#statemachine-examples-web) sample installed, + to connect to a local `Zookeeper` node. + +* Every state machine instance communicates only with a local`Zookeeper` instance. While connecting a machine to multiple instances + is possible, it is not used here. + +* All state machine instances, when started, create a`StateMachineEnsemble` by using a Zookeeper ensemble. + +* Each sample contains a custom rest API, which Jepsen uses to send + events and check particular state machine statuses. + +All Jepsen tests for `Spring Distributed Statemachine` are available from[Jepsen +Tests.](https://github.com/spring-projects/spring-statemachine/tree/master/jepsen/spring-statemachine-jepsen) + +### Generic Concepts + +One design decision of a `Distributed State Machine` was not to make each +individual state machine instance be aware that it is part of a +“distributed ensemble”. Because the main functions and features of a`StateMachine` can be accessed through its interface, it makes sense to +wrap this instance in a `DistributedStateMachine`, which +intercepts all state machine communication and collaborates with an +ensemble to orchestrate distributed state changes. + +One other important concept is to be able to persist enough +information from a state machine to reset a state machine state +from an arbitrary state into a new deserialized state. This is naturally +needed when a new state machine instance joins with an ensemble +and needs to synchronize its own internal state with a distributed +state. Together with using concepts of distributed states and state +persisting, it is possible to create a distributed state machine. +Currently, the only backing repository of a `Distributed State Machine` is +implemented by using Zookeeper. + +As mentioned in [Using Distributed States](#sm-distributed), distributed states are enabled by +wrapping an instance of a `StateMachine` in a`DistributedStateMachine`. The specific `StateMachineEnsemble`implementation is `ZookeeperStateMachineEnsemble` provides +integration with Zookeeper. + +### The Role of `ZookeeperStateMachinePersist` + +We wanted to have a generic interface (`StateMachinePersist`) that +Can persist `StateMachineContext` into arbitrary storage and`ZookeeperStateMachinePersist` implements this interface for`Zookeeper`. + +### The Role of `ZookeeperStateMachineEnsemble` + +While a distributed state machine uses one set of serialized +contexts to update its own state, with zookeeper, we have a +conceptual problem around how to listen to these context changes. We +can serialize context into a zookeeper `znode` and eventually +listen when the `znode` data is modified. However, `Zookeeper` does not +guarantee that you get a notification for every data change, +because a registered `watcher` for a `znode` is disabled once it fires +and the user need to re-register that `watcher`. During this short time, +a `znode` data can be changed, thus resulting in missing events. It is +actually very easy to miss these events by changing data from +multiple threads in a concurrent manner. + +To overcome this issue, we keep individual context changes +in multiple `znodes` and we use a simple integer counter to mark +which `znode` is the current active one. Doing so lets us replay missed +events. We do not want to create more and more znodes and then later +delete old ones. Instead, we use the simple concept of a circular +set of znodes. This lets us use a predefined set of znodes where +the current node can be determined with a simple integer counter. We already have +this counter by tracking the main `znode` data version (which, in`Zookeeper`, is an integer). + +The size of a circular buffer is mandated to be a power of two, to avoid +trouble when the integer goes to overflow. For this reason, we need not +handle any specific cases. + +### Distributed Tolerance + +To show how a various distributed actions against a state +machine work in real life, we use a set of Jepsen tests to +simulate various conditions that might happen in a real distributed +cluster. These include a “brain split” on a network level, parallel +events with multiple “distributed state machines”, and changes in +“extended state variables”. Jepsen tests are based on a sample[Web](#statemachine-examples-web), where this sample instance runs on +multiple hosts together with a Zookeeper instance on every node +where the state machine is run. Essentially, every state machine sample +connects to a local Zookeeper instance, which lets us, by using +Jepsen, to simulate network conditions. + +The plotted graphs shown later in this chapter contain states and events that +directly map to a state chart, which you can be find in[Web](#statemachine-examples-web). + +#### Isolated Events + +Sending an isolated single event into exactly one state machine in an +ensemble is the simplest testing scenario and demonstrates that a +state change in one state machine is properly propagated into other +state machines in an ensemble. + +In this test, we demonstrate that a state change in one machine +eventually causes a consistent state change in other machines. +The following image shows the events and state changes for a test state machine: + +sm tech isolated events + +In the preceding image: + +* All machines report state `S21`. + +* Event `I` is sent to node `n1` and all nodes report state change + from `S21` to `S22`. + +* Event `C` is sent to node `n2` and all nodes report state change + from `S22` to `S211`. + +* Event `I` is sent to node `n5` and all nodes report state change + from `S211` to `S212`. + +* Event `K` is sent to node `n3` and all nodes report state change + from `S212` to `S21`. + +* We cycle events `I`, `C`, `I`, and `K` one more time, through random nodes. + +#### Parallel Events + +One logical problem with multiple distributed state machines is that, if the +same event is sent into multiple state machines at exactly the same +time, only one of those events causes a distributed state +transitions. This is a somewhat expected scenario, because the first state +machine (for this event) that is able to change a distributed state +controls the distributed transition logic. Effectively, all other +machines that receive this same event silently discard the event, +because the distributed state is no longer in a state where a particular +event can be processed. + +In the test shown in the following image, we demonstrate that a state change caused by a +parallel event throughout an ensemble eventually causes a +consistent state change in all machines: + +sm tech parallel events + +In the preceding image, we use the same event flow that we used in the previous sample +([Isolated Events](#sm-tech-isolated-events)), with the difference that events are always +sent to all nodes. + +#### Concurrent Extended State Variable Changes + +Extended state machine variables are not guaranteed to be atomic at +any given time, but, after a distributed state change, all state machines +in an ensemble should have a synchronized extended state. + +In this test, we demonstrate that a change in extended state +variables in one distributed state machine eventually becomes +consistent in all the distributed state machines. +The following image shows this test: + +sm tech isolated events with variable + +In the preceding image: + +* Event `J` is send to node `n5` with event variable `testVariable`having value `v1`. All nodes then report having a variable named`testVariable` with a value of `v1`. + +* Event `J` is repeated from variable `v2` to `v8`, doing the same checks. + +#### Partition Tolerance + +We need to always assume that, sooner or later, things in a cluster +go bad, whether it is a crash of a Zookeeper instance, a state +machine crash, or a network problem such as a “brain split”. (A brain split is a +situation where existing cluster members are isolated so that only +parts of hosts are able to see each other). The usual scenario is that a +brain split creates minority and majority partitions of an +ensemble such that hosts in the minority cannot participate in an ensemble +until the network status has been healed. + +In the following tests, we demonstrate that various types of brain split in +an ensemble eventually cause a fully synchronized state of all the +distributed state machines. + +There are two scenarios that have a straight brain split in a +network where where `Zookeeper` and `Statemachine` instances are +split in half (assuming each `Statemachine` connects to a +local `Zookeeper` instance): + +* If the current zookeeper leader is kept in a majority, all clients + connected to the majority keep functioning properly. + +* If the current zookeeper leader is left in the minority, all clients + disconnect from it and try to connect back till previous + minority members have successfully joined back to existing majority + ensemble. + +| |In our current Jepsen tests, we cannot separate Zookeeper split-brain
scenarios between the leader being left in the majority or in the minority, so we need to
run the tests multiple times to accomplish this situation.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |In the following plots, we have mapped a state machine error state into an`error` to indicate that the state machine is in an error state instead of
a normal state. Please remember this when interpreting chart states.| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +In this first test, we show that, when an existing Zookeeper leader was +kept in the majority, three out of five machines continue as is. +The following image shows this test: + +sm tech partition half 1 + +In the preceding image: + +* The first event, `C`, is sent to all machines, leading a state change to`S211`. + +* Jepsen nemesis causes a brain split, which causes partitions + of `n1/n2/n5` and `n3/n4`. Nodes `n3/n4` are left in the minority, and + nodes `n1/n2/n5` construct a new healthy majority. Nodes in the + majority keep functioning without problems, but nodes in the minority + go into error states. + +* Jepsen heals the network and, after some time, nodes `n3/n4` join + back into the ensemble and synchronize its distributed status. + +* Finally, event `K1` is sent to all state machines to ensure that the ensemble + is working properly. This state change leads back to state`S21`. + +In the second test, we show that, when the existing zookeeper leader was +kept in the minority, all machines error out. +The following image shows the second test: + +sm tech partition half 2 + +In the preceding image: + +* The first event, `C`, is sent to all machines leading to a state change to`S211`. + +* Jepsen nemesis causes a brain split, which causes partitions + such that the existing `Zookeeper` leader is kept in the minority and all + instances are disconnected from the ensemble. + +* Jepsen heals the network and, after some time, all nodes join + back into the ensemble and synchronize its distributed status. + +* Finally, event `K1` is sent to all state machines to ensure that ensemble + workS properly. This state change leads back to state`S21`. + +#### Crash and Join Tolerance + +In this test, we demonstrate that killing an existing state machine +and then joining a new instance back into an ensemble keeps the +distributed state healthy and the newly joined state machines synchronize +their states properly. +The following image shows the crash and join tolerance test: + +sm tech stop start + +| |In this test, states are not checked between first the `X` and last the `X`.
Thus, the graph shows a flat line in between. The states are checked
exactly where the state change happens between `S21` and `S211`.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +In the preceding image: + +* All state machines are transitioned from the initial state (`S21`) into + state `S211` so that we can test proper state synchronize during the join. + +* `X` marks when a specific node has been crashed and started. + +* At the same time, we request states from all machines and plot the result. + +* Finally, we do a simple transition back to `S21` from `S211` to make + sure that all state machines still function properly. + +## Developer Documentation + +This appendix provides generic information for adevelopers who may +want to contribute or other people who want to understand how state +machine works or understand its internal concepts. + +### StateMachine Config Model + +`StateMachineModel` and other related SPI classes are an abstraction +between various configuration and factory classes. This also allows +easier integration for others to build state machines. + +As the following listing shows, you can instantiate a state machine by building a model +with configuration data classes and then asking a factory to build a +state machine: + +``` +// setup configuration data +ConfigurationData configurationData = new ConfigurationData<>(); + +// setup states data +Collection> stateData = new ArrayList<>(); +stateData.add(new StateData("S1", true)); +stateData.add(new StateData("S2")); +StatesData statesData = new StatesData<>(stateData); + +// setup transitions data +Collection> transitionData = new ArrayList<>(); +transitionData.add(new TransitionData("S1", "S2", "E1")); +TransitionsData transitionsData = new TransitionsData<>(transitionData); + +// setup model +StateMachineModel stateMachineModel = new DefaultStateMachineModel<>(configurationData, statesData, + transitionsData); + +// instantiate machine via factory +ObjectStateMachineFactory factory = new ObjectStateMachineFactory<>(stateMachineModel); +StateMachine stateMachine = factory.getStateMachine(); +``` + +## Appendix D: Reactor Migration Guide + +Main task for a work for `3.x` has been to both internally and externally to move and change +as much as we can from imperative code into a reactive world. This means that some +of a main interfaces has added a new reative methods and most of a internal execution locig +(where applicable) has been moved over to handled by a reactor. Essentially what this means is that thread handling model is considerably different compared to `2.x`. Following chapters +go throught all these changes. + +### Communicating with a Machine + +We’ve added new reactive methods to `StateMachine` while still keeping old blocking event +methods in place. + +``` +Flux> sendEvent(Mono> event); + +Flux> sendEvents(Flux> events); + +Mono>> sendEventCollect(Mono> event); +``` + +We’re now solely working on a spring `Message` and reactor `Mono` and `Flux` classes. +You can send a `Mono` of a `Message` and receive back a `Flux` of `StateMachineEventResult`. +Remember that nothing happens until you subscribe to this `Flux`. More about +this returned value, see [StateMachineEventResult](#sm-triggers-statemachineeventresult). Method `sendEventCollect`is just a syntactic sugar to pass in a `Mono` and get a `Mono` which wraps +results as a list. + +``` +Message message = MessageBuilder.withPayload("EVENT").build(); +machine.sendEvent(Mono.just(message)).subscribe(); +``` + +You can also send a `Flux` of messages instead of a single `Mono` message. + +``` +machine.sendEvents(Flux.just(message)).subscribe(); +``` + +All of the reactor methods are on your disposal and for example not to block and +do something when event handling is completed, you could do something like. + +``` +Mono> mono = Mono.just(MessageBuilder.withPayload("EVENT").build()); +machine.sendEvent(mono) + .doOnComplete(() -> { + System.out.println("Event handling complete"); + }) + .subscribe(); +``` + +Old API methods returning a `boolean` for accepted status are still in place +but are deprecated to get removed in future releases. + +``` +boolean accepted = machine.sendEvent("EVENT"); +``` + +### TaskExecutor and TaskScheduler + +StateMachine execution with `TaskExecutor` and state action scheduling with `TaskScheduler`has been fully replaced in favour or Reactor execution and scheduling. + +Essentially execution outside of a main thread is needed in two places, firstly with*State Actions* which needs to be cancellable and secondly with *Regions* which should +be always be executed independently. Currently we’ve chosen to just use *Reactor*`Schedulers.parallel()` for these which should give relatively good results as it +tries to automatically use available number of cpu cores from a system. + +### Reactive Examples + +While most of an examples are still same, we’ve overhauled some of them and +created some new: + +* Tunrstile Reactive [Turnstile Reactive](#statemachine-examples-turnstilereactive) \ No newline at end of file diff --git a/docs/en/spring-vault/README.md b/docs/en/spring-vault/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/en/spring-vault/spring-vault.md b/docs/en/spring-vault/spring-vault.md new file mode 100644 index 0000000000000000000000000000000000000000..b3d49514bfbc48a5724d46e19ccf8141a76966b2 --- /dev/null +++ b/docs/en/spring-vault/spring-vault.md @@ -0,0 +1,2381 @@ +# Spring Vault - Reference Documentation + +# Preface + +The Spring Vault project applies core Spring concepts to the development of solutions using HashiCorp Vault. We provide a "template" as a high-level abstraction for storing and querying documents. You will notice similarities to the REST support in the Spring Framework. + +This document is the reference guide for Spring Vault. It explains Vault concepts and semantics and the syntax. + +This part of the reference documentation explains the core functionality offered by Spring Vault. + +[Vault support](#vault.core) introduces the Vault module feature set. + +## 1. Document Structure + +This section provides basic introduction to Spring and Vault. +It contains details about following development and how to get support. + +The rest of the document refers to Spring Vault features and assumes +the user is familiar with [HashiCorp Vault](https://www.vaultproject.io)as well as Spring concepts. + +## 2. Knowing Spring + +Spring Vault uses Spring framework’s [core](https://docs.spring.io/spring/docs/5.3.4/spring-framework-reference/core.html) functionality, such as [IoC](https://docs.spring.io/spring/docs/5.3.4/spring-framework-reference//core.html) container. While it is not important to know the Spring APIs, understanding the concepts behind them is. At a minimum, the idea behind IoC should be familiar for whatever IoC container you choose to use. + +The core functionality of the Vault support can be used directly, with no need to invoke the IoC services of the Spring Container. This is much like `RestTemplate` which can be used 'standalone' without any other services of the Spring container. To leverage all the features of Spring Vault document, such as the session support, you will need to configure some parts of the library using Spring. + +To learn more about Spring, you can refer to the comprehensive (and sometimes disarming) documentation that explains in detail the Spring Framework. There are a lot of articles, blog entries and books on the matter - take a look at the Spring framework [home page ](https://spring.io/docs) for more information. + +## 3. Knowing Vault + +Security and working with secrets is a concern of every developer working with databases, user credentials or API keys. Vault steps in by providing a secure storage combined with access control, revocation, key rolling and auditing. In short: Vault is a service for securely accessing and storing secrets. A secret is anything that you want to tightly control access to, such as API keys, passwords, certificates, and more. + +The jumping off ground for learning about Vault is [www.vaultproject.io](https://www.vaultproject.io). Here is a list of useful resources: + +* The manual introduces Vault and contains links to getting started guides, reference documentation and tutorials. + +* The online shell provides a convenient way to interact with a Vault instance in combination with the online tutorial. + +* [HashiCorp Vault Introduction](https://www.vaultproject.io/intro/index.html) + +* [HashiCorp Vault Documentation](https://www.vaultproject.io/docs/index.html) + +Spring Vault provides client-side support for accessing, storing and revoking secrets. +With [HashiCorp’s Vault](https://www.vaultproject.io) you have a central place to +manage external secret data for applications across all environments. +Vault can manage static and dynamic secrets such as application data, +username/password for remote applications/resources and provide credentials +for external services such as MySQL, PostgreSQL, Apache Cassandra, Consul, AWS and more. + +## 4. Requirements + +Spring Vault 2.x binaries requires JDK level 8.0 and above, and [Spring Framework](https://spring.io/docs) 5.3.4 and above. + +In terms of Vault, [Vault](https://www.vaultproject.io/) at least 0.6. + +## 5. Additional Help Resources + +Learning a new framework is not always straight forward. In this section, we try to provide what we think is an easy to follow guide for starting with Spring Vault module. However, if you encounter issues or you are just looking for advice, feel free to use one of the links below: + +### 5.1. Support + +There are a few support options available: + +#### 5.1.1. Community Forum + +Post questions questions regarding Spring Vault on [Stackoverflow](https://stackoverflow.com/questions/tagged/spring-vault) to share information and help each other. Note that registration is needed **only** for posting. + +#### 5.1.2. Professional Support + +Professional, from-the-source support, with guaranteed response time, is available from [Pivotal Sofware, Inc.](https://pivotal.io/), the company behind Spring Vault and Spring. + +### 5.2. Following Development + +For information on the Spring Vault source code repository, nightly builds and snapshot artifacts please see the [Spring Vault homepage](https://projects.spring.io/spring-vault/). You can help make Spring Vault best serve the needs of the Spring community by interacting with developers through the Community on [Stackoverflow](https://stackoverflow.com/questions/tagged/spring-vault). If you encounter a bug or want to suggest an improvement, please create a ticket on the Spring Vault issue [tracker](https://github.com/spring-projects/spring-vault/issues). To stay up to date with the latest news and announcements in the Spring ecosystem, subscribe to the Spring Community [Portal](https://spring.io). Lastly, you can follow the Spring [blog ](https://spring.io/blog)or the project team on Twitter ([SpringCentral](https://twitter.com/springcentral)). + +## 6. New & Noteworthy + +### 6.1. What’s new in Spring Vault 2.3 + +* Support for PEM-encoded certificates for keystore and truststore usage. + +* `ReactiveVaultEndpointProvider` for non-blocking lookup of `VaultEndpoint`. + +* `VaultKeyValueMetadataOperations` for Key-Value metadata interaction. + +* Support for `transform` backend (Enterprise Feature). + +* Documentation of [how to use Vault secret backends](#vault.core.secret-engines). + +* Login credentials for Kubernetes and PCF authentication are reloaded for each login attempt. + +* `SecretLeaseContainer` publishes `SecretLeaseRotatedEvent` instead of `SecretLeaseExpiredEvent` and `SecretLeaseCreatedEvent` on successful secret rotation. + +* `AbstractVaultConfiguration.threadPoolTaskScheduler()` bean type changed to `TaskSchedulerWrapper` instead of `ThreadPoolTaskScheduler`. + +### 6.2. What’s new in Spring Vault 2.2 + +* Support for Key-Value v2 (versioned backend) secrets through `@VaultPropertySource`. + +* SpEL support in `@Secret`. + +* Add support for Jetty as reactive HttpClient. + +* `LifecycleAwareSessionManager` and `ReactiveLifecycleAwareSessionManager` emit now `AuthenticationEvent`s. + +* [PCF authentication](#vault.authentication.pcf). + +* Deprecation of `AppIdAuthentication`. + Use `AppRoleAuthentication` instead as recommended by HashiCorp Vault. + +* `CubbyholeAuthentication` and wrapped `AppRoleAuthentication` now use `sys/wrapping/unwrap` endpoints by default. + +* Kotlin Coroutines support for `ReactiveVaultOperations`. + +### 6.3. What’s new in Spring Vault 2.1 + +* [GCP Compute](#vault.authentication.gcpgce), [GCP IAM](#vault.authentication.gcpiam), and [Azure](#vault.authentication.azuremsi) authentication. + +* Template API support for versioned and unversioned Key/Value backends and for Vault wrapping operations. + +* Support full pull mode in reactive AppRole authentication. + +* Improved Exception hierarchy for Vault login failures. + +### 6.4. What’s new in Spring Vault 2.0 + +* Authentication steps DSL to [compose authentication flows](#vault.authentication.steps). + +* [Reactive Vault client](#vault.core.reactive.template) via `ReactiveVaultOperations`. + +* [Vault repository support](#vault.repositories) based on Spring Data KeyValue. + +* Transit batch encrypt and decrypt support. + +* Policy management for policies stored as JSON. + +* Support CSR signing, certificate revocation and CRL retrieval. + +* [Kubernetes authentication](#vault.authentication.kubernetes). + +* RoleId/SecretId unwrapping for [AppRole authentication](#vault.authentication.approle). + +* [Spring Security integration](#vault.misc.spring-security) with transit backend-based `BytesKeyGenerator` and `BytesEncryptor`. + +### 6.5. What’s new in Spring Vault 1.1.0 + +* [AWS IAM authentication](#vault.authentication.awsiam). + +* Configuration of encryption/decryption versions for transit keys. + +* Pull mode for [AppRole authentication](#vault.authentication.approle). + +* Transit batch encrypt and decrypt support. + +* TTL-based generic secret rotation. + +### 6.6. What’s new in Spring Vault 1.0 + +* Initial Vault support. + +# Reference documentation + +## 7. Vault support + +The Vault support contains a wide range of features which are summarized below. + +* Spring configuration support using Java based @Configuration classes + +* `VaultTemplate` helper class that increases productivity performing common + Vault operations. Includes integrated object mapping between Vault responses and POJOs. + +For most tasks, you will find yourself using `VaultTemplate` that leverages the +rich communication functionality. `VaultTemplate` is the place to look for +accessing functionality such as reading data from Vault or issuing +administrative commands. `VaultTemplate` also provides callback methods so that it is easy for you to +get a hold of the low-level API artifacts such as `RestTemplate` to communicate +directly with Vault. + +### 7.1. Dependencies + +The easiest way to find compatible versions of Spring Vault dependencies is by relying on the Spring Vault BOM we ship with the compatible versions defined. +In a Maven project you would declare this dependency in the`` section of your `pom.xml`: + +Example 1. Using the Spring Vault BOM + +``` + + + + org.springframework.vault + spring-vault-dependencies + 2.3.1 + import + pom + + + +``` + +The current version is `2.3.1`. +The version name follows the following patterns: `${version}` for GA and service releases and `${version}-${release}` for snapshots and milestones. `release` can be one of the following: + +* `SNAPSHOT` - current snapshots + +* `M1`, `M2` etc. - milestones + +* `RC1`, `RC2` etc. - release candidates + +Example 2. Declaring a dependency to Spring Vault + +``` + + + org.springframework.vault + spring-vault-core + + +``` + +### 7.2. Spring Framework + +The current version of Spring Vault requires Spring Framework in version +5.3.4 or better. +The modules might also work with an older bugfix version of that minor version. +However, using the most recent version within that generation is highly recommended. + +## 8. Getting Started + +Spring Vault support requires Vault 0.6 or higher and Java SE 6 or higher. +An easy way to bootstrap setting up a working environment is to create a +Spring based project in [STS](https://spring.io/tools/sts). + +First you need to set up a running Vault server. +Refer to the [Vault](https://www.vaultproject.io/intro/) for an explanation on how to startup a Vault instance. + +To create a Spring project in STS go to File → New → +Spring Template Project → Simple Spring Utility Project → +press Yes when prompted. Then enter a project and a package name such as `org.spring.vault.example`. + +Then add the following to `pom.xml` dependencies section. + +Example 3. Adding Spring Vault dependency + +``` + + + + + + org.springframework.vault + spring-vault-core + 2.3.1 + + + +``` + +If you are using a milestone or release candidate, you will also need to add the location of the Spring +Milestone repository to your maven `pom.xml` which is at the same level of your `` element. + +``` + + + spring-milestone + Spring Maven MILESTONE Repository + https://repo.spring.io/libs-milestone + + +``` + +The repository is also [browseable here](https://repo.spring.io/milestone/org/springframework/vault/). + +If you are using a SNAPSHOT, you will also need to add the location of the Spring +Snapshot repository to your maven `pom.xml` which is at the same level of your `` element. + +``` + + + spring-snapshot + Spring Maven SNAPSHOT Repository + https://repo.spring.io/libs-snapshot + + +``` + +The repository is also [browseable here](https://repo.spring.io/snapshot/org/springframework/vault/). + +Create a simple `Secrets` class to persist: + +Example 4. Mapped data object + +``` +package org.spring.vault.example; + +public class Secrets { + + String username; + String password; + + public String getUsername() { + return username; + } + + public String getPassword() { + return password; + } +} +``` + +And a main application to run + +Example 5. Example application using Spring Vault + +``` +package org.springframework.vault.example; + +import org.springframework.vault.authentication.TokenAuthentication; +import org.springframework.vault.client.VaultEndpoint; +import org.springframework.vault.core.VaultTemplate; +import org.springframework.vault.support.VaultResponseSupport; + +public class VaultApp { + + public static void main(String[] args) { + + VaultTemplate vaultTemplate = new VaultTemplate(new VaultEndpoint(), + new TokenAuthentication("00000000-0000-0000-0000-000000000000")); + + Secrets secrets = new Secrets(); + secrets.username = "hello"; + secrets.password = "world"; + + vaultTemplate.write("secret/myapp", secrets); + + VaultResponseSupport response = vaultTemplate.read("secret/myapp", Secrets.class); + System.out.println(response.getData().getUsername()); + + vaultTemplate.delete("secret/myapp"); + } +} +``` + +Even in this simple example, there are few things to take notice of + +* You can instantiate the central class of Spring Vault,[`VaultTemplate`](#vault.core.template), using the `org.springframework.vault.client.VaultEndpoint`object and the `ClientAuthentication`. + You are not required to spin up a Spring Context to use Spring Vault. + +* Vault is expected to be configured with a root token of`00000000-0000-0000-0000-000000000000` to run this application. + +* The mapper works against standard POJO objects without the need for any + additional metadata (though you can optionally provide that information). + +* Mapping conventions can use field access. Notice the `Secrets` class has only getters. + +* If the constructor argument names match the field names of the stored document, + they will be used to instantiate the object. + +## 9. Introduction to VaultTemplate + +The class `VaultTemplate`, located in the package `org.springframework.vault.core`, +is the central class of the Spring’s Vault support providing a rich feature set to +interact with Vault. The template offers convenience operations to read, write and +delete data in Vault and provides a mapping between your domain objects and Vault data. + +| |Once configured, `VaultTemplate` is thread-safe and can be reused across
multiple instances.| +|---|------------------------------------------------------------------------------------------------| + +The mapping between Vault documents and domain classes is done by delegating to`RestTemplate`. Spring Web support provides the mapping infrastructure. + +The `VaultTemplate` class implements the interface `VaultOperations`. +In as much as possible, the methods on `VaultOperations` are named after methods +available on the Vault API to make the API familiar to existing Vault developers +who are used to the API and CLI. For example, you will find methods such as +"write", "delete", "read", and "revoke". +The design goal was to make it as easy as possible to transition between +the use of the Vault API and `VaultOperations`. A major difference in between +the two APIs is that `VaultOperations` can be passed domain objects instead of +JSON Key-Value pairs. + +| |The preferred way to reference the operations on `VaultTemplate` instance
is via its interface `VaultOperations`.| +|---|---------------------------------------------------------------------------------------------------------------------| + +While there are many convenience methods on `VaultTemplate` to help you easily +perform common tasks if you should need to access the Vault API directly to access +functionality not explicitly exposed by the `VaultTemplate` you can use one of +several execute callback methods to access underlying APIs. The execute callbacks +will give you a reference to a `RestOperations` object. +Please see the section [Execution Callbacks](#vault.core.executioncallback) for more information. + +Now let’s look at a examples of how to work with Vault in the context of the Spring container. + +### 9.1. Registering and configuring Spring Vault beans + +Using Spring Vault does not require a Spring Context. However, instances of `VaultTemplate` and `SessionManager` registered inside a managed context will participate +in [lifecycle events](https://docs.spring.io/spring/docs/5.3.4/spring-framework-reference/core.html#beans-factory-nature)provided by the Spring IoC container. This is useful to dispose active Vault sessions upon +application shutdown. You also benefit from reusing the same `VaultTemplate`instance across your application. + +Spring Vault comes with a supporting configuration class that provides bean definitions +for use inside a Spring context. Application configuration +classes typically extend from `AbstractVaultConfiguration` and are required to +provide additional details that are environment specific. + +Extending from `AbstractVaultConfiguration` requires to implement +` VaultEndpoint vaultEndpoint()` and `ClientAuthentication clientAuthentication()`methods. + +Example 6. Registering Spring Vault objects using Java based bean metadata + +``` +@Configuration +public class AppConfig extends AbstractVaultConfiguration { + + /** + * Specify an endpoint for connecting to Vault. + */ + @Override + public VaultEndpoint vaultEndpoint() { + return new VaultEndpoint(); (1) + } + + /** + * Configure a client authentication. + * Please consider a more secure authentication method + * for production use. + */ + @Override + public ClientAuthentication clientAuthentication() { + return new TokenAuthentication("…"); (2) + } +} +``` + +|**1**| Create a new `VaultEndpoint` that points by default to `https://localhost:8200`. | +|-----|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**|This sample uses `TokenAuthentication` to get started quickly.
See [Authentication Methods](#vault.core.authentication) for details on supported authentication methods.| + +Example 7. Registering Spring Vault applying injected properties + +``` +@Configuration +public class AppConfig extends AbstractVaultConfiguration { + + @Value("${vault.uri}") + URI vaultUri; + + /** + * Specify an endpoint that was injected as URI. + */ + @Override + public VaultEndpoint vaultEndpoint() { + return VaultEndpoint.from(vaultUri); (1) + } + + /** + * Configure a Client Certificate authentication. + * {@link RestOperations} can be obtained from {@link #restOperations()}. + */ + @Override + public ClientAuthentication clientAuthentication() { + return new ClientCertificateAuthentication(restOperations()); (2) + } +} +``` + +|**1**|`VaultEndpoint` can be constructed using various factory methods such as`from(URI uri)` or `VaultEndpoint.create(String host, int port)`. | +|-----|------------------------------------------------------------------------------------------------------------------------------------------| +|**2**|Dependencies for `ClientAuthentication` methods can be obtained either from`AbstractVaultConfiguration` or provided by your configuration.| + +| |Creating a custom configuration class might be cumbersome in some cases.
Take a look at `EnvironmentVaultConfiguration` that allows configuration by using
properties from existing property sources and Spring’s `Environment`. Read more
in [Using `EnvironmentVaultConfiguration`](#vault.core.environment-vault-configuration).| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 9.2. Session Management + +Spring Vault requires a `ClientAuthentication` to login and access Vault. +See [Authentication Methods](#vault.core.authentication) on details regarding authentication. +Vault login should not occur on each authenticated Vault interaction but +must be reused throughout a session. This aspect is handled by a`SessionManager` implementation. A `SessionManager` decides how often it +obtains a token, about revocation and renewal. Spring Vault comes with two implementations: + +* `SimpleSessionManager`: Just obtains tokens from the supplied`ClientAuthentication` without refresh and revocation + +* `LifecycleAwareSessionManager`: This `SessionManager` schedules token + renewal if a token is renewable and revoke a login token on disposal. + Renewal is scheduled with an `AsyncTaskExecutor`. `LifecycleAwareSessionManager`is configured by default if using `AbstractVaultConfiguration`. + +### 9.3. Using `EnvironmentVaultConfiguration` + +Spring Vault includes `EnvironmentVaultConfiguration` configure the Vault client from Spring’s `Environment` and a set of predefined +property keys. `EnvironmentVaultConfiguration` supports frequently applied configurations. Other configurations are supported by deriving from the most appropriate configuration class. Include `EnvironmentVaultConfiguration` with `@Import(EnvironmentVaultConfiguration.class)` to existing +Java-based configuration classes and supply configuration properties through any of Spring’s `PropertySource`s. + +Example 8. Using EnvironmentVaultConfiguration with a property file + +Java-based configuration class + +``` +@PropertySource("vault.properties") +@Import(EnvironmentVaultConfiguration.class) +public class MyConfiguration{ +} +``` + +vault.properties + +``` +vault.uri=https://localhost:8200 +vault.token=00000000-0000-0000-0000-000000000000 +``` + +**Property keys** + +* Vault URI: `vault.uri` + +* SSL Configuration + + * Keystore resource: `vault.ssl.key-store` (optional) + + * Keystore password: `vault.ssl.key-store-password` (optional) + + * Keystore type: `vault.ssl.key-store-type` (optional, typically `jks`, supports also `pem`) + + * Truststore resource: `vault.ssl.trust-store` (optional) + + * Truststore password: `vault.ssl.trust-store-password` (optional) + + * Truststore type: `vault.ssl.trust-store-type` (optional, typically `jks`, supports also `pem`) + +* Authentication method: `vault.authentication` (defaults to `TOKEN`, supported authentication methods are: `TOKEN`, `APPID`, `APPROLE`, `AWS_EC2`, `AZURE`, `CERT`, `CUBBYHOLE`, `KUBERNETES`) + +**Authentication-specific property keys** + +**[Token authentication](#vault.authentication.token)** + +* Vault Token: `vault.token` + +**[AppId authentication](#vault.authentication.appid)** + +* AppId path: `vault.app-id.app-id-path` (defaults to `app-id`) + +* AppId: `vault.app-id.app-id` + +* UserId: `vault.app-id.user-id`. `MAC_ADDRESS` and `IP_ADDRESS` use `MacAddressUserId`, respective `IpAddressUserId` user id mechanisms. + Any other value is used with `StaticUserId`. + +**[AppRole authentication](#vault.authentication.approle)** + +* AppRole path: `vault.app-role.app-role-path` (defaults to `approle`) + +* RoleId: `vault.app-role.role-id` + +* SecretId: `vault.app-role.secret-id` (optional) + +**[AWS-EC2 authentication](#vault.authentication.awsec2)** + +* AWS EC2 path: `vault.aws-ec2.aws-ec2-path` (defaults to `aws-ec2`) + +* Role: `vault.aws-ec2.role` + +* RoleId: `vault.aws-ec2.role-id` (**deprecated:** use `vault.aws-ec2.role` instead) + +* Identity Document URL: `vault.aws-ec2.identity-document` (defaults to `[http://169.254.169.254/latest/dynamic/instance-identity/pkcs7](http://169.254.169.254/latest/dynamic/instance-identity/pkcs7)`) + +**[Azure (MSI) authentication](#vault.authentication.azuremsi)** + +* Azure MSI path: `vault.azure-msi.azure-path` (defaults to `azure`) + +* Role: `vault.azure-msi.role` + +* Metadata Service URL: `vault.azure-msi.metadata-service` (defaults to `[http://169.254.169.254/metadata/instance?api-version=2017-08-01](http://169.254.169.254/metadata/instance?api-version=2017-08-01)`) + +* Identity TokenService URL: `vault.azure-msi.identity-token-service` (defaults to `[http://169.254.169.254/metadata/identity/oauth2/token?resource=https://vault.hashicorp.com&api-version=2018-02-01](http://169.254.169.254/metadata/identity/oauth2/token?resource=https://vault.hashicorp.com&api-version=2018-02-01)`) + +**[TLS certificate authentication](#vault.authentication.clientcert)** + +No configuration options. + +**[Cubbyhole authentication](#vault.authentication.cubbyhole)** + +* Initial Vault Token: `vault.token` + +**[Kubernetes authentication](#vault.authentication.kubernetes)** + +* Kubernetes path: `vault.kubernetes.kubernetes-path` (defaults to `kubernetes`) + +* Role: `vault.kubernetes.role` + +* Path to service account token file: `vault.kubernetes.service-account-token-file` (defaults to `/var/run/secrets/kubernetes.io/serviceaccount/token`) + +### 9.4. Execution callbacks + +One common design feature of all Spring template classes is that all functionality is routed into one of the templates execute callback methods. +This helps ensure that exceptions and any resource management that maybe required are performed consistency. +While this was of much greater need in the case of JDBC and JMS than with Vault, it still offers a single spot for access and logging to occur. +As such, using the execute callback is the preferred way to access the Vault API +to perform uncommon operations that we’ve not exposed as methods on `VaultTemplate`. + +Here is a list of execute callback methods. + +* ` T` **doWithVault** `(RestOperationsCallback callback)` Executes the given`RestOperationsCallback`, allows to interact with Vault using `RestOperations` without requiring a session. + +* ` T` **doWithSession** `(RestOperationsCallback callback)` Executes the given`RestOperationsCallback`, allows to interact with Vault in an authenticated session. + +Here is an example that uses the `ClientCallback` to initialize Vault: + +``` +vaultOperations.doWithVault(new RestOperationsCallback() { + + @Override + public VaultInitializationResponse doWithRestOperations(RestOperations restOperations) { + + ResponseEntity exchange = restOperations + .exchange("/sys/init", HttpMethod.PUT, + new HttpEntity(request), + VaultInitializationResponse.class); + + return exchange.getBody(); + } +}); +``` + +## 10. Supporting for Vault’s Secret Engines + +Spring Vault ships with several extensions to support Vault’s various secret engines. + +Specifically, Spring Vault ships with extensions for: + +* [Key-Value Version 1 ("unversioned secrets")](#vault.core.backends.kv1) + +* [Key-Value Version 2 ("versioned secrets")](#vault.core.backends.kv2) + +* [PKI (Public Key Infrastructure)](#vault.core.backends.pki) + +* [Token Authentication Backend](#vault.core.backends.token) + +* Transform (Enterprise Feature) + +* [Transit Backend](#vault.core.backends.transit) + +* System Backend + +You can use all other backends through methods on `VaultTemplate` directly (`VaultTemplate.read(…)`, `VaultTemplate.write(…)`). + +### 10.1. Key-Value Version 1 ("unversioned secrets") + +The `kv` secrets engine is used to store arbitrary secrets within the configured physical storage for Vault. + +When running the `kv` secrets engine in a non-versioned way, only the most recently written value for a key is preserved. +The benefits of non-versioned kv is a reduced storage size for each key, since no additional metadata or history is stored. +Additionally, requests going to a backend configured this way are more performant because there are fewer storage calls and no locking for any given request. + +Spring Vault ships with a dedicated Key-Value API to encapsulate differences between the individual Key-Value API implementations.`VaultKeyValueOperations` follows the Vault CLI design. +That’s the primary command line tool for Vault providing commands such as `vault kv get`, `vault kv put` and so on. + +You can use this API with both Key-Value engine versions by specifying the version and mount path. +The following example uses the Key-Value version 1: + +``` +VaultOperations operations = new VaultTemplate(new VaultEndpoint()); +VaultKeyValueOperations keyValueOperations = operations.opsForKeyValue("secret", + VaultKeyValueOperationsSupport.KeyValueBackend.KV_1); + +keyValueOperations.put("elvis", Collections.singletonMap("password", "409-52-2002")); + +VaultResponse read = keyValueOperations.get("elvis"); +read.getRequiredData().get("social-security-number"); +``` + +`VaultKeyValueOperations` supports all Key-Value operations such as `put`, `get`, `delete`, `list`. + +Alternatively, the API can be used through `VaultTemplate` because of its direct mapping and simple use, as keys and responses map directly to input and output keys. +The following example illustrates writing and reading a secret at `mykey`. +The `kv` secrets engine is mounted at `secret`: + +``` +VaultOperations operations = new VaultTemplate(new VaultEndpoint()); + +operations.write("secret/elvis", Collections.singletonMap("social-security-number", "409-52-2002")); + +VaultResponse read = operations.read("secret/elvis"); +read.getRequiredData().get("social-security-number"); +``` + +You can find more details about the [Vault Key-Value version 1 API](https://www.vaultproject.io/api-docs/secret/kv/kv-v1) in the Vault reference documentation. + +### 10.2. Key-Value Version 2 ("versioned secrets") + +You can run the `kv` secrets engine in one of two versions. +This section explains using version 2. When running version 2 of the `kv` backend a key can retain a configurable number of versions. +You can retrieve the metadata and data of the older versions. +Additionally, you can use check-and-set operations to avoid unintentionally overwriting data. + +Similar to [Key-Value Version 1 ("unversioned secrets")](#vault.core.backends.kv1), Spring Vault ships with a dedicated Key-Value API to encapsulate differences between the individual Key-Value API implementations. +Spring Vault ships with a dedicated Key-Value API to encapsulate differences between the individual Key-Value API implementations.`VaultKeyValueOperations` follows the Vault CLI design. +That is the primary command line tool for Vault, providing commands such as `vault kv get`, `vault kv put`, and so on. + +You can use this API with both Key-Value engine versions by specifying the version and mount path. +The following example uses Key-Value version 2: + +``` +VaultOperations operations = new VaultTemplate(new VaultEndpoint()); +VaultKeyValueOperations keyValueOperations = operations.opsForKeyValue("secret", + VaultKeyValueOperationsSupport.KeyValueBackend.KV_2); + +keyValueOperations.put("elvis", Collections.singletonMap("social-security-number", "409-52-2002")); + +VaultResponse read = keyValueOperations.get("elvis"); +read.getRequiredData().get("social-security-number"); +``` + +`VaultKeyValueOperations` supports all Key-Value operations, such as `put`, `get`, `delete`, `list`. + +You can also interact with the specifics of the versioned key-value API. This is useful if you want to obtain a specific secret or you need access to the metadata. + +``` +VaultOperations operations = new VaultTemplate(new VaultEndpoint()); +VaultVersionedKeyValueOperations versionedOperations = operations.opsForVersionedKeyValue("secret"); + +Versioned.Metadata metadata = versionedOperations.put("elvis", (1) + Collections.singletonMap("social-security-number", "409-52-2002")); + +Version version = metadata.getVersion(); (2) + +Versioned ssn = versionedOperations.get("elvis", Version.from(42)); (3) + +Versioned mappedSsn = versionedOperations.get("elvis", (4) + Version.from(42), SocialSecurityNumber.class); + +Versioned> versioned = Versioned.create(Collections (5) + .singletonMap("social-security-number", "409-52-2002"), + Version.from(42)); + +versionedOperations.put("elvis", version); +``` + +|**1**| Store secrets at `elvis` in that is available under the `secret/` mount. | +|-----|---------------------------------------------------------------------------------------------------| +|**2**| Storing data in the versioned backend returns metadata such as the version number. | +|**3**|The versioned Key-Value API allows retrieval of specific versions identified by the version number.| +|**4**| Versioned key-value secrets can be mapped into value objects. | +|**5**|When updating versioned secrets using CAS, the input must refer to the previously obtained version.| + +While using the `kv` v2 secrets engine through `VaultTemplate` is possible. +It is not the most convenient approach since the API offers a different approach to context paths and how input/output is represented. +Specifically, interaction with the actual secrets requires wrapping and unwrapping of the data section and introducing a `data/` path segment between the mount and the secrets key. + +``` +VaultOperations operations = new VaultTemplate(new VaultEndpoint()); + +operations.write("secret/data/elvis", Collections.singletonMap("data", + Collections.singletonMap("social-security-number", "409-52-2002"))); + +VaultResponse read = operations.read("secret/data/ykey"); +Map data = (Map) read.getRequiredData().get("data"); +data.get("social-security-number"); +``` + +You can find more details about the [Vault Key-Value version 2 API](https://www.vaultproject.io/api-docs/secret/kv/kv-v2) in the Vault reference documentation. + +### 10.3. PKI (Public Key Infrastructure) + +The `pki` secrets engine represents a backend for certificates by implementing certificate authority operations. + +The PKI secrets engine generates dynamic X.509 certificates. +With this secrets engine, services can get certificates without going through the usual manual process of generating a private key and CSR, submitting to a CA, and waiting for a verification and signing process to complete. +Vault’s built-in authentication and authorization mechanisms provide the verification functionality. + +Spring Vault supports issuing, signing, revoking certificates, and CRL retrieval through `VaultPkiOperations`. +All other PKI functionality can be used through `VaultOperations`. + +The following examples explain briefly the use of how to issue and revoke certificates: + +``` +VaultOperations operations = new VaultTemplate(new VaultEndpoint()); +VaultPkiOperations pkiOperations = operations.opsForPki("pki"); + +VaultCertificateRequest request = VaultCertificateRequest.builder() (1) + .ttl(Duration.ofHours(48)) + .altNames(Arrays.asList("prod.dc-1.example.com", "prod.dc-2.example.com")) + .withIpSubjectAltName("1.2.3.4") + .commonName("hello.example.com") + .build(); + +VaultCertificateResponse response = pkiOperations.issueCertificate("production", request); (2) +CertificateBundle certificateBundle = response.getRequiredData(); + +KeyStore keyStore = certificateBundle.createKeyStore("my-keystore"); (3) + +KeySpec privateKey = certificateBundle.getPrivateKeySpec(); (4) +X509Certificate certificate = certificateBundle.getX509Certificate(); +X509Certificate caCertificate = certificateBundle.getX509IssuerCertificate(); + +pkiOperations.revoke(certificateBundle.getSerialNumber()); (5) +``` + +|**1**| Construct a certificate request by using the `VaultCertificateRequest` builder. | +|-----|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| Request a certificate from Vault.
Vault acts as certificate authority and responds with a signed X.509 certificate.
The actual response is a `CertificateBundle`. | +|**3**|You can obtain generated certificates directly as Java KeyStore that contains public and private keys as well as the issuer certificate. KeyStore has a wide range of uses, which makes this format suitable to configure (for example a HTTP client, a database driver, or an SSL-secured HTTP server).| +|**4**| `CertificateBundle` allows accessing the private key and the public and issuer certificates directly through the Java Cryptography Extension API. | +|**5**| Once a certificate is no longer in use (or it was compromised), you can revoke it through its serial number.
Vault includes the revoked certificate in its CRL. | + +You can find more details about the [Vault PKI secrets API](https://www.vaultproject.io/api-docs/secret/pki) in the Vault reference documentation. + +### 10.4. Token Authentication Backend + +This backend is an authentication backend that does not interact with actual secrets. +Rather, it gives access to access token management. +You can read more about [Token-based authentication](#vault.authentication.token) in the [authentication methods chapter](#vault.core.authentication). + +The `token` authentication method is built-in and automatically available at `/auth/token`. +It lets users authenticate using a token, as well to create new tokens, revoke secrets by token, and more. + +When any other auth method returns an identity, Vault core invokes the token method to create a new unique token for that identity. + +You can also use the token store to bypass any other auth method. You can create tokens directly, as well as perform a variety of other operations on tokens, such as renewal and revocation. + +Spring Vault uses this backend to renew and revoke the session tokens supplied by the configured [authentication method](#vault.core.authentication). + +The following examples show how to request, renew and revoke a Vault token from within your application: + +``` +VaultOperations operations = new VaultTemplate(new VaultEndpoint()); +VaultTokenOperations tokenOperations = operations.opsForToken(); + +VaultTokenResponse tokenResponse = tokenOperations.create(); (1) +VaultToken justAToken = tokenResponse.getToken(); + +VaultTokenRequest tokenRequest = VaultTokenRequest.builder().withPolicy("policy-for-myapp") + .displayName("Access tokens for myapp") + .renewable() + .ttl(Duration.ofHours(1)) + .build(); + +VaultTokenResponse appTokenResponse = tokenOperations.create(tokenRequest); (2) +VaultToken appToken = appTokenResponse.getToken(); + +tokenOperations.renew(appToken); (3) + +tokenOperations.revoke(appToken); (4) +``` + +|**1**| Create an token by applying role defaults. | +|-----|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**|Using the builder API, you can define fine-grained settings for the token to request.
Requesting a token returns a `VaultToken`, which is used as value object for Vault tokens.| +|**3**| You can renew tokens through the Token API. Typically, that is done by `SessionManager` to keep track of the Vault session token. | +|**4**| Tokens can be revoked if needed through the Token API. Typically, that is done by `SessionManager` to keep track of the Vault session token. | + +You can find more details about the [Vault Token Auth Method API](https://www.vaultproject.io/api-docs/auth/token) in the Vault reference documentation. + +### 10.5. Transit Backend + +The transit secrets engine handles cryptographic functions on data in-transit. +Vault does not store the data sent to this secrets engine. +It can also be seen as "cryptography as a service" or "encryption as a service". +The transit secrets engine can also sign and verify data, generate hashes and HMACs of data, and act as a random bytes source. + +The primary use case for transit is to encrypt data from applications while still storing that encrypted data in some primary data store. +This relieves the burden of proper encryption and decryption from application developers and pushes the burden onto the operators of Vault. + +Spring Vault supports a wide range of Transit operations: + +* Key creation + +* Key reconfiguration + +* Encryption/Decryption/Rewrapping + +* HMAC computation + +* Signing and signature verification + +All operations within `transit` are centered around keys. +The Transit engine supports the versioning of keys and [a variety of key types](https://www.vaultproject.io/docs/secrets/transit). +Note that the key type may impose a limitation on which operations can used. + +The following examples shows how to create a key and how to encrypt and decrypt data: + +``` +VaultOperations operations = new VaultTemplate(new VaultEndpoint()); +VaultTransitOperations transitOperations = operations.opsForTransit("transit"); + +transitOperations.createKey("my-aes-key", VaultTransitKeyCreationRequest.ofKeyType("aes128-gcm96")); (1) + +String ciphertext = transitOperations.encrypt("my-aes-key", "plaintext to encrypt"); (2) + +String plaintext = transitOperations.decrypt("my-aes-key", ciphertext); (3) +``` + +|**1**| First, we need a key to begin with.
Each key requires the type to be specified. `aes128-gcm96` supports encryption, decryption, key derivation, and convergent encryption, of which we need encryption and decryption for this example. | +|-----|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**|Next, we encrypt a `String` that contains the plain text that should be encrypted.
The input `String` uses the default `Charset` to encode the string into its binary representation.
Requesting a token returns a `VaultToken`, which is used as value object for Vault tokens.
The `encrypt` method returns Base64-encoded ciphertext, typically starting with `vault:`.| +|**3**| To decrypt ciphertext into plain text, call the `decrypt` method.
It decrypts the ciphertext and returns a `String` that is decoded using the default charset. | + +The preceeding example uses simple strings for cryptographic operations. +While it is a simple approach, it bears the risk of charset misconfiguration and is not binary-safe. +Binary safety is required when the plain text uses a binary representation for data such as images, compressed data, or binary data structures. + +To encrypt and decrypt binary data, use the `Plaintext` and `Ciphertext` value objects that can hold binary values: + +``` +byte [] plaintext = "plaintext to encrypt".getBytes(); + +Ciphertext ciphertext = transitOperations.encrypt("my-aes-key", Plaintext.of(plaintext)); (1) + +Plaintext decrypttedPlaintext = transitOperations.decrypt("my-aes-key", ciphertext); (2) +``` + +|**1**|Assuming a key `my-aes-key` is already in place, we’re encrypting the `Plaintext` object.
In return, the `encrypt` method returns a `Ciphertext` object.| +|-----|------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| The `Ciphertext` object can be used directly for decryption and returns a `Plaintext` object. | + +`Plaintext` and `Ciphertext` come with a contextual object, `VaultTransitContext`. +It is used to supply a nonce value for [convergent encryption](https://www.vaultproject.io/docs/secrets/transit#convergent-encryption) and for a context value to make use of key derivation. + +Transit allows for signing plain text and verifying the signature for a given plain text. +Sign operations require an asymmetric key, typically using Elliptic Curve Cryptography or RSA. + +| |Signatures use the public/private key split to ensure authenticity.
The signer uses its private key to create a signature. Otherwise, anybody would be able to sign messages in your name.
The verifier uses the public key part to verify the signature. The actual signature is typically a hash value.

Internally, the hash gets computed and encrypted using the private key to create the final signature. The verification decrypts the signature message, computes their own hash for the plain text and compares both hash values to check whether the signature is valid or not.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +``` +byte [] plaintext = "plaintext to sign".getBytes(); + +transitOperations.createKey("my-ed25519-key", VaultTransitKeyCreationRequest.ofKeyType("ed25519")); (1) + +Signature signature = transitOperations.sign("my-ed25519-key", Plaintext.of(plaintext)); (2) + +boolean valid = transitOperations.verify("my-ed25519-key", Plaintext.of(plaintext), signature); (3) +``` + +|**1**|Signing requires an asymmetric key. You can use any Elliptic Curve Cryptography or RSA key type. Once the key is created, you have all the prerequisites in place to create a signature.| +|-----|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| The signature gets created for a plain text message. The returned `Signature` contains an ASCII-safe string that uses Base64 characters. | +|**3**| To verify the signature, the verification requires a Signature object and the plain text message. As the return value, you get whether the signature was valid or not. | + +You can find more details about the [Vault Transit Backend](https://www.vaultproject.io/api/secret/transit) in the Vault reference documentation. + +## 11. Introduction to ReactiveVaultTemplate + +This section covers basic information on the reactive programming support using Spring Vault. + +### 11.1. What is Reactive Programming? + +In plain terms reactive programming is about non-blocking applications that are +asynchronous and event-driven and require a small number of threads to scale vertically +(i.e. within the JVM) rather than horizontally (i.e. through clustering). + +A key aspect of reactive applications is the concept of backpressure which is a mechanism +to ensure producers don’t overwhelm consumers. For example in a pipeline of reactive +components extending from the database to the HTTP response when the HTTP connection is +too slow the data repository can also slow down or stop completely until network capacity frees up. + +### 11.2. Reactive Vault Client + +Spring Vault’s reactive client support is built on top of [composable authentication steps](#vault.authentication.steps) and Spring’s functional `WebClient` via Reactor Netty or Jetty, which feature both a fully non-blocking, event-driven HTTP client. + +It exposes `VaultTokenSupplier` as supplier of `VaultToken` to authenticate HTTP requests +and `ReactiveVaultOperations` as the primary entry point. The core configuration of`VaultEndpoint`, `ClientOptions` and [SSL](#vault.client-ssl) are reused across the +various client implementation. + +The class `ReactiveVaultTemplate`, located in the package `org.springframework.vault.core`, +is the central class of the Spring’s reactive Vault support providing a rich feature set to +interact with Vault. The template offers convenience operations to read, write and +delete data in Vault and provides a mapping between your domain objects and Vault data. + +| |Once configured, `ReactiveVaultTemplate` is thread-safe and can be reused across
multiple instances.| +|---|--------------------------------------------------------------------------------------------------------| + +The mapping between Vault documents and domain classes is done by delegating to`WebClient` and its codecs. + +The `ReactiveVaultTemplate` class implements the interface `ReactiveVaultOperations`. +In as much as possible, the methods on `ReactiveVaultOperations` are named after methods +available on the Vault API to make the API familiar to existing Vault developers +who are used to the API and CLI. For example, you will find methods such as +"write", "delete", and "read". +The design goal was to make it as easy as possible to transition between +the use of the Vault API and `ReactiveVaultOperations`. A major difference in between +the two APIs is that `ReactiveVaultOperations` can be passed domain objects instead of +JSON Key-Value pairs. + +| |The preferred way to reference the operations on `ReactiveVaultTemplate` instance
is via its interface `ReactiveVaultOperations`.| +|---|-------------------------------------------------------------------------------------------------------------------------------------| + +Functionality not explicitly exposed by the `ReactiveVaultTemplate` you can use one of +several execute callback methods to access underlying APIs. The execute callbacks +will give you a reference to a `WebClient` object. +Please see the section [Execution Callbacks](#vault.core.reactive.executioncallback) for more information. + +Now let’s look at a examples of how to work with Vault in the context of the Spring container. + +### 11.3. Registering and configuring Spring Vault beans + +Using Spring Vault does not require a Spring Context. However, instances of`ReactiveVaultTemplate` and `VaultTokenSupplier` registered inside a managed context will participate +in [lifecycle events](https://docs.spring.io/spring/docs/5.3.4/spring-framework-reference/core.html#beans-factory-nature)provided by the Spring IoC container. This is useful to dispose active Vault sessions upon +application shutdown. You also benefit from reusing the same `ReactiveVaultTemplate`instance across your application. + +Spring Vault comes with a supporting configuration class that provides bean definitions +for use inside a Spring context. Application configuration +classes typically extend from `AbstractVaultConfiguration` and are required to +provide additional details that are environment specific. + +Extending from `AbstractVaultConfiguration` requires to implement +` VaultEndpoint vaultEndpoint()` and `ClientAuthentication clientAuthentication()`methods. + +Example 9. Registering Spring Vault objects using Java based bean metadata + +``` +@Configuration +public class AppConfig extends AbstractReactiveVaultConfiguration { + + /** + * Specify an endpoint for connecting to Vault. + */ + @Override + public VaultEndpoint vaultEndpoint() { + return new VaultEndpoint(); (1) + } + + /** + * Configure a client authentication. + * Please consider a more secure authentication method + * for production use. + */ + @Override + public ClientAuthentication clientAuthentication() { + return new TokenAuthentication("…"); (2) + } +} +``` + +|**1**| Create a new `VaultEndpoint` that points by default to `https://localhost:8200`. | +|-----|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**|This sample uses `TokenAuthentication` to get started quickly.
See [Authentication Methods](#vault.core.authentication) for details on supported authentication methods.| + +### 11.4. Session Management + +Spring Vault requires a token to authenticate Vault requests. +See [Authentication Methods](#vault.core.authentication) on details regarding authentication. +The reactive client requires a non-blocking token supplier whose contract is defined +in `VaultTokenSupplier`. Tokens can be static or obtained through a[declared authentication flow](#vault.authentication.steps). +Vault login should not occur on each authenticated Vault interaction but +the session token should be kept across a session. This aspect is handled by a +session manager implementing `ReactiveSessionManager`, such as `ReactiveLifecycleAwareSessionManager`. + +### 11.5. Execution callbacks + +One common design feature of all Spring template classes is that all functionality +is routed into one of the templates execute callback methods. This helps ensure +that exceptions and any resource management that maybe required are performed +consistency. While this was of much greater need in the case of JDBC and JMS +than with Vault, it still offers a single spot for access and logging to occur. +As such, using the execute callback is the preferred way to access the Vault API +to perform uncommon operations that we’ve not exposed as methods on `ReactiveVaultTemplate`. + +Here is a list of execute callback methods. + +* ` T` **doWithVault** `(Function clientCallback)` Composes a reactive + sequence the given `WebClient`, allows to interact with Vault without a session context. + +* ` T` **doWithSession** `(Function clientCallback)` Composes a reactive + sequence the given `WebClient`, allows to interact with Vault in an authenticated session. + +Here is an example that uses the callback to initialize Vault: + +``` +reactiveVaultOperations.doWithVault(webClient -> { + + return webClient.put() + .uri("/sys/init") + .syncBody(request) + .retrieve() + .toEntity(VaultInitializationResponse.class); +}); +``` + +## 12. Vault Property Source Support + +Vault can be used in many different ways. One specific use-case is using +Vault to store encrypted properties. Spring Vault supports Vault as property +source to obtain configuration properties using Spring’s [PropertySource abstraction](https://docs.spring.io/spring/docs/5.3.4/spring-framework-reference/core.html#beans-property-source-abstraction). + +| |You can reference properties stored inside Vault in other property sources or use value injection with `@Value(…)`. Special attention is required when bootstrapping beans that require data stored inside of Vault. A `VaultPropertySource` must be initialized at that time to retrieve properties from Vault.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Spring Boot/Spring Cloud users can benefit from [Spring Cloud Vault](https://github.com/spring-cloud/spring-cloud-vault-config)'s
configuration integration that initializes various property sources during application startup.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 12.1. Registering `VaultPropertySource` + +Spring Vault provides a `VaultPropertySource` to be used with Vault to obtain +properties. It uses the nested `data` element to expose properties stored and +encrypted in Vault. + +``` +ConfigurableApplicationContext ctx = new GenericApplicationContext(); +MutablePropertySources sources = ctx.getEnvironment().getPropertySources(); +sources.addFirst(new VaultPropertySource(vaultTemplate, "secret/my-application")); +``` + +In the code above, `VaultPropertySource` has been added with highest precedence +in the search. If it contains a ´foo` property, it will be detected and returned +ahead of any `foo` property in any other `PropertySource`.`MutablePropertySources` exposes a number of methods that allow for precise +manipulation of the set of property sources. + +### 12.2. @VaultPropertySource + +The `@VaultPropertySource` annotation provides a convenient and declarative +mechanism for adding a `PropertySource` to Spring’s `Environment`to be used in conjunction with `@Configuration` classes. + +`@VaultPropertySource` takes a Vault path such as `secret/my-application`and exposes the data stored at the node in a `PropertySource`.`@VaultPropertySource` supports lease renewal for secrets associated with a lease +(i. e. credentials from the `mysql` backend) and credential rotation upon terminal +lease expiration. Lease renewal is disabled by default. + +Example 10. Properties stored in Vault + +``` +{ + // … + + "data": { + "database": { + "password": ... + }, + "user.name": ..., + } + + // … +} +``` + +Example 11. Declaring a `@VaultPropertySource` + +``` +@Configuration +@VaultPropertySource("secret/my-application") +public class AppConfig { + + @Autowired Environment env; + + @Bean + public TestBean testBean() { + TestBean testBean = new TestBean(); + testBean.setUser(env.getProperty("user.name")); + testBean.setPassword(env.getProperty("database.password")); + return testBean; + } +} +``` + +Example 12. Declaring a `@VaultPropertySource` with credential rotation and prefix + +``` +@Configuration +@VaultPropertySource(value = "aws/creds/s3-access", + propertyNamePrefix = "aws.", + renewal = Renewal.ROTATE) +public class AppConfig { + // provides aws.access_key and aws.secret_key properties +} +``` + +| |Secrets obtained from `generic` secret backends are associated with a TTL (`refresh_interval`) but not a lease Id. Spring Vault’s `PropertySource` rotates generic secrets when reaching its TTL.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |You can use `@VaultPropertySource` to obtain the newest secret version from the versioned Key-Value backend. Make sure to not include the `data/` segment in the path.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Any `${…​}` placeholders present in a `@VaultPropertySource` path are resolved against the set of property sources already registered against the environment, as the following example shows: + +Example 13. Declaring a `@VaultPropertySource` path using placeholders + +``` +@Configuration +@VaultPropertySource(value = "aws/creds/${my.placeholder:fallback/value}", + propertyNamePrefix = "aws.", + renewal = Renewal.ROTATE) +public class AppConfig { +} +``` + +Assuming that `my.placeholder` is present in one of the property sources already registered (for example, system properties or environment variables), the placeholder is resolved to the corresponding value. +If not, then `fallback/value` is used as a default. +If no default is specified and a property cannot be resolved, an `IllegalArgumentException` is thrown. + +In certain situations, it may not be possible or practical to tightly control +property source ordering when using `@VaultPropertySource` annotations. +For example, if the `@Configuration` classes above were registered via +component-scanning, the ordering is difficult to predict. +In such cases - and if overriding is important - it is recommended that the +user fall back to using the programmatic PropertySource API. +See [`ConfigurableEnvironment`](https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/core/env/ConfigurableEnvironment.html) and[`MutablePropertySources`](https://docs.spring.io/spring/docs/current/javadoc-api/org/springframework/core/env/MutablePropertySources.html) for details. + +## 13. Vault Repositories + +Working with `VaultTemplate` and responses mapped to Java classes allows basic data operations like read, write +and delete. Vault repositories apply Spring Data’s repository concept on top of Vault. +A Vault repository exposes basic CRUD functionality and supports query derivation with predicates constraining +the Id property, paging and sorting. + +| |Read more about Spring Data Repositories in the [Spring Data Commons reference documentation](https://docs.spring.io/spring-data/commons/docs/current/reference/html/#repositories). The reference documentation will give you an introduction to Spring Data repositories.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 13.1. Usage + +To access domain entities stored in Vault you can leverage repository support that eases implementing those quite significantly. + +Example 14. Sample Credentials Entity + +``` +@Secret +public class Credentials { + + @Id String id; + String password; + String socialSecurityNumber; + Address address; +} +``` + +We have a pretty simple domain object here. Note that it has a property named `id` annotated with`org.springframework.data.annotation.Id` and a `@Secret` annotation on its type. +Those two are responsible for creating the actual key used to persist the object as JSON inside Vault. + +| |Properties annotated with `@Id` as well as those named `id` are considered as the identifier properties.
Those with the annotation are favored over others.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The next step is to declare a repository interface that uses the domain object. + +Example 15. Basic Repository Interface for `Credentials` entities + +``` +public interface CredentialsRepository extends CrudRepository { + +} +``` + +As our repository extends `CrudRepository` it provides basic CRUD and query methods. Vault repositories +require Spring Data components. Make sure to include `spring-data-commons` and `spring-data-keyvalue` artifacts in your class path. + +The easiest way to achive this, is by setting up dependency management and adding the artifacts to your `pom.xml`: + +Then add the following to `pom.xml` dependencies section. + +Example 16. Using the Spring Data BOM + +``` + + + + org.springframework.data + spring-data-bom + 2020.0.2 + import + pom + + + + + + + + + + org.springframework.vault + spring-vault-core + 2.3.1 + + + + org.springframework.data + spring-data-keyvalue + + + + +``` + +The thing we need in between to glue things together is the according Spring configuration. + +Example 17. JavaConfig for Vault Repositories + +``` +@Configuration +@EnableVaultRepositories +public class ApplicationConfig { + + @Bean + public VaultTemplate vaultTemplate() { + return new VaultTemplate(…); + } +} +``` + +Given the setup above we can go on and inject `CredentialsRepository` into our components. + +Example 18. Access to Person Entities + +``` +@Autowired CredentialsRepository repo; + +public void basicCrudOperations() { + + Credentials creds = new Credentials("heisenberg", "327215", "AAA-GG-SSSS"); + rand.setAddress(new Address("308 Negra Arroyo Lane", "Albuquerque", "New Mexico", "87104")); + + repo.save(creds); (1) + + repo.findOne(creds.getId()); (2) + + repo.count(); (3) + + repo.delete(creds); (4) +} +``` + +|**1**|Stores properties of `Credentials` inside Vault Hash with a key pattern `keyspace/id`,
in this case `credentials/heisenberg`, in the generic secret backend.| +|-----|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| Uses the provided id to retrieve the object stored at `keyspace/id`. | +|**3**| Counts the total number of entities available within the keyspace *credentials* defined by `@Secret` on `Credentials`. | +|**4**| Removes the key for the given object from Vault. | + +### 13.2. Object to Vault JSON Mapping + +Vault repositories store objects in Vault using JSON as interchange format. Object mapping between JSON and +the entity is done by `VaultConverter`. The converter reads and writes `SecretDocument` that contains the body +from a `VaultResponse`. `VaultResponse`s are read from Vault and the body is deserialized by +Jackson into a `Map` of `String` and `Object`. +The default `VaultConverter` implementation reads the `Map` with nested values, `List` and `Map` objects and +converts these to entities and vice versa. + +Given the `Credentials` type from the previous sections the default mapping is as follows: + +``` +{ + "_class": "org.example.Credentials", (1) + "password", "327215", (2) + "socialSecurityNumber": "AAA-GG-SSSS", + "address": { (3) + "street": "308 Negra Arroyo Lane", + "city": "Albuquerque", + "state": "New Mexico", + "zip":"87104" + } +} +``` + +|**1**|The `_class` attribute is included on root level as well as on any nested interface or abstract types.| +|-----|------------------------------------------------------------------------------------------------------| +|**2**| Simple property values are mapped by path. | +|**3**| Properties of complex types are mapped as nested objects. | + +| |The `@Id` property must be mapped to `String`.| +|---|----------------------------------------------| + +| Type | Sample | Mapped Value | +|--------------------------------|--------------------------------------------------------|---------------------------------------------------| +| Simple Type
(eg. String) | String firstname = "Walter"; | firstname = "Walter" | +|Complex Type
(eg. Address)| Address adress = new Address("308 Negra Arroyo Lane"); | address: { "street": "308 Negra Arroyo Lane" } | +| List
of Simple Type |List\ nicknames = asList("walt", "heisenberg");| nicknames: ["walt", "heisenberg"] | +| Map
of Simple Type | Map\ atts = asMap("age", 51) | atts : {"age" : 51} | +| List
of Complex Type | List\ addresses = asList(new Address("308… |address: [{ "street": "308 Negra Arroyo Lane" }, …]| + +You can customize the mapping behavior by registering a `Converter` in `VaultCustomConversions`. +Those converters can take care of converting from/to a type such as `LocalDate` as well as `SecretDocument`whereas the first one is suitable for converting simple properties and the last one complex types to their JSON +representation. The second option offers full control over the resulting `SecretDocument`. Writing objects to `Vault`will delete the content and re-create the whole entry, so not mapped data will be lost. + +### 13.3. Queries and Query Methods + +Query methods allow automatic derivation of simple queries from the method name. Vault has no query engine but +requires direct access of HTTP context paths. Vault query methods translate Vault’s API possibilities to queries. +A query method execution lists children under a context path, applies filtering to the Id, optionally limits the +Id stream with offset/limit and applies sorting after fetching the results. + +Example 19. Sample Repository Query Method + +``` +public interface CredentialsRepository extends CrudRepository { + + List findByIdStartsWith(String prefix); +} +``` + +| |Query methods for Vault repositories support only queries with predicates on the `@Id` property.| +|---|------------------------------------------------------------------------------------------------| + +Here’s an overview of the keywords supported for Vault. + +| Keyword | Sample | +|------------------------------------|-------------------------------------------------------------| +| `After`, `GreaterThan` | `findByIdGreaterThan(String id)` | +| `GreaterThanEqual` | `findByIdGreaterThanEqual(String id)` | +| `Before`, `LessThan` | `findByIdLessThan(String id)` | +| `LessThanEqual` | `findByIdLessThanEqual(String id)` | +| `Between` | `findByIdBetween(String from, String to)` | +| `In` | `findByIdIn(Collection ids)` | +| `NotIn` | `findByIdNotIn(Collection ids)` | +|`Like`, `StartingWith`, `EndingWith`| `findByIdLike(String id)` | +| `NotLike`, `IsNotLike` | `findByIdNotLike(String id)` | +| `Containing` | `findByFirstnameContaining(String id)` | +| `NotContaining` | `findByFirstnameNotContaining(String name)` | +| `Regex` | `findByIdRegex(String id)` | +| `(No keyword)` | `findById(String name)` | +| `Not` | `findByIdNot(String id)` | +| `And` | `findByLastnameAndFirstname` | +| `Or` | `findByLastnameOrFirstname` | +| `Is,Equals` |`findByFirstname`,`findByFirstnameIs`,`findByFirstnameEquals`| +| `Top,First` | `findFirst10ByFirstname`,`findTop5ByFirstname` | + +#### 13.3.1. Sorting and Paging + +Query methods support sorting and paging by selecting in memory a sublist (offset/limit) Id’s retrieved from +a Vault context path. Sorting has is not limited to a particular field, unlike query method predicates. +Unpaged sorting is applied after Id filtering and all resulting secrets are fetched from Vault. This way +a query method fetches only results that are also returned as part of the result. + +Using paging and sorting requires secret fetching before filtering the Id’s which impacts performance. +Sorting and paging guarantees to return the same result even if the natural order of Id returned by Vault changes. +Therefore, all Id’s are fetched from Vault first, then sorting is applied and afterwards filtering and offset/limiting. + +Example 20. Paging and Sorting Repository + +``` +public interface CredentialsRepository extends PagingAndSortingRepository { + + List findTop10ByIdStartsWithOrderBySocialSecurityNumberDesc(String prefix); + + List findByIdStarts(String prefix, Pageable pageRequest); +} +``` + +## 14. Client support + +Spring Vault supports various HTTP clients to access Vault’s HTTP API. Spring Vault uses[`RestTemplate`](https://docs.spring.io/spring/docs/5.3.4/spring-framework-reference/integration.html#rest-resttemplate) as primary interface accessing Vault. +Dedicated client support originates from [customized SSL configuration](#vault.client-ssl)that is scoped only to Spring Vault’s client components. + +Spring Vault supports following HTTP imperative clients: + +* Java’s builtin `HttpURLConnection` (default client) + +* Apache Http Components + +* Netty + +* OkHttp 3 + +Spring Vault’s reactive integration supports the following reactive HTTP clients: + +* Reactor Netty + +* Jetty + +Using a specific client requires the according dependency to be available on the classpath +so Spring Vault can use the available client for communicating with Vault. + +### 14.1. Java’s builtin `HttpURLConnection` + +Java’s builtin `HttpURLConnection` is available out-of-the-box without additional +configuration. Using `HttpURLConnection` comes with a limitation regarding SSL configuration. +Spring Vault won’t apply [customized SSL configuration](#vault.client-ssl) as it would +require a deep reconfiguration of the JVM. This configuration would affect all +components relying on the default SSL context. Configuring SSL settings using`HttpURLConnection` requires you providing these settings as System Properties. See[Customizing JSSE](https://docs.oracle.com/javase/8/docs/technotes/guides/security/jsse/JSSERefGuide.html#InstallationAndCustomization) for further details. + +### 14.2. External Clients + +You can use external clients to access Vault’s API. Simply add one of the following +dependencies to your project. You can omit the version number if using[Spring Vault’s Dependency BOM](#dependencies) + +Example 21. Apache Http Components Dependency + +``` + + org.apache.httpcomponents + httpclient + +``` + +| |Apache HttpClient’s [wire logging](https://hc.apache.org/httpcomponents-client-4.5.x/logging.html) can be enabled through logging configuration. Make sure to not accidentally enable wire logging as logs may expose traffic (tokens and secrets) between your application and Vault in plain text.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Example 22. Netty Dependency + +``` + + io.netty + netty-all + +``` + +Example 23. Square OkHttp 3 + +``` + + com.squareup.okhttp3 + okhttp + +``` + +Example 24. Reactor Netty + +``` + + io.projectreactor.netty + reactor-netty + +``` + +Example 25. Jetty + +``` + + org.eclipse.jetty + jetty-reactive-httpclient + +``` + +### 14.3. Vault Client SSL configuration + +SSL can be configured using `SslConfiguration` by setting various properties. +You can set either `javax.net.ssl.trustStore` to configure +JVM-wide SSL settings or configure `SslConfiguration`to set SSL settings only for Spring Vault. + +``` +SslConfiguration sslConfiguration = SslConfiguration.create( (1) + new FileSystemResource("client-cert.jks"), "changeit".toCharArray(), + new FileSystemResource("truststore.jks"), "changeit".toCharArray()); + +SslConfiguration.forTrustStore(new FileSystemResource("keystore.jks"), (2) + "changeit".toCharArray()) + +SslConfiguration.forKeyStore(new FileSystemResource("keystore.jks"), (3) + "changeit".toCharArray()) + +SslConfiguration.forKeyStore(new FileSystemResource("keystore.jks"), (4) + "changeit".toCharArray(), + KeyConfiguration.of("key-password".toCharArray(), + "my-key-alias")) +``` + +|**1**| Full configuration. | +|-----|-----------------------------------------------------------------------| +|**2**| Configuring only trust store settings. | +|**3**| Configuring only key store settings. | +|**4**|Configuring only key store settings with providing a key-configuration.| + +Please note that providing `SslConfiguration` can be only applied when either Apache Http Components or the OkHttp client is on your class-path. + +The SSL configuration supports also PEM-encoded certificates as alternative to a Java Key Store. + +``` +KeyStoreConfiguration keystore = KeyStoreConfiguration + .of(new ClassPathResource("ca.pem")).withStoreType("PEM"); +SslConfiguration configuration = SslConfiguration.forTrustStore(keystore); +``` + +PEM files may contain one or more certificates (blocks of `-----BEGIN CERTIFICATE-----` and `-----END CERTIFICATE-----`). +Certificates added to the underlying `KeyStore` use the full subject name as alias. + +## 15. Authentication Methods + +Different organizations have different requirements for security +and authentication. Vault reflects that need by shipping multiple authentication +methods. Spring Vault supports multiple authentications mechanisms. + +### 15.1. Externalizing login credentials + +Obtaining first-time access to a secured system is known as secure introduction. +Any client requires ephemeral or permanent credentials to access Vault. Externalizing credentials +is a good pattern to keep code maintainability high but comes at a risk of increased disclosure. + +Disclosure of login credentials to any party allows login to Vault and access secrets that +are permitted by the underlying role. Picking the appropriate client authentication and +injecting credentials into the application is subject to risk evaluation. + +Spring’s [PropertySource abstraction](https://docs.spring.io/spring/docs/5.3.4/spring-framework-reference/core.html#beans-property-source-abstraction) is a natural fit +to keep configuration outside the application code. You can use system properties, environment +variables or property files to store login credentials. Each approach comes with its own properties. +Keep in mind that the command line and environment properties can be introspected with appropriate +OS access levels. + +Example 26. Externalizing `vault.token` to a properties file + +``` +@PropertySource("configuration.properties") +@Configuration +public class Config extends AbstractVaultConfiguration { + + @Override + public ClientAuthentication clientAuthentication() { + return new TokenAuthentication(getEnvironment().getProperty("vault.token")); + } +} +``` + +| |Spring allows multiple ways to obtain `Environment`. When using `VaultPropertySource`, injection via `@Autowired Environment environment` will not provide the `Environment` as the environment bean is still in construction and autowiring comes at a later stage. Your configuration class should rather implement `ApplicationContextAware` and obtain the `Environment` from `ApplicationContext`.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +See [`SecurePropertyUsage.java`](https://github.com/spring-projects/spring-vault/blob/master/spring-vault-core/src/test/java/org/springframework/vault/demo/SecurePropertyUsage.java)for a sample on referencing properties in components and other property sources. + +### 15.2. Token authentication + +Tokens are the core method for authentication within Vault. +Token authentication requires a static token to be provided. + +| |Token authentication is the default authentication method.
If a token is disclosed an unintended party, it gains access to Vault and
can access secrets for the intended client.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Typically, Token authentication is used in scenarios in which the token is created and renewed +externally (such as [HashiCorp Vault service broker](https://github.com/hashicorp/vault-service-broker)). +Depending on the actual setup, you may or may not want token renewal and revocation. +See [`LifecycleAwareSessionManager`](#vault.authentication.session) for details about TTL and token revocation. + +``` +@Configuration +class AppConfig extends AbstractVaultConfiguration { + + // … + + @Override + public ClientAuthentication clientAuthentication() { + return new TokenAuthentication("…"); + } + + // … +} +``` + +See also: + +* [Vault Documentation: Tokens](https://www.vaultproject.io/docs/concepts/tokens.html) + +* [Vault Documentation: Using the Token auth backend](https://www.vaultproject.io/docs/auth/token.html) + +### 15.3. AppId authentication + +| |AppId authentication is deprecated by Vault. Use [AppRole authentication](#vault.authentication.approle) instead.| +|---|-----------------------------------------------------------------------------------------------------------------| + +Vault supports [AppId](https://www.vaultproject.io/docs/auth/app-id.html)authentication that consists of two hard to guess tokens. The AppId +defaults to `spring.application.name` that is statically configured. +The second token is the UserId which is a part determined by the application, +usually related to the runtime environment. IP address, Mac address or a +Docker container name are good examples. Spring Vault supports +IP address, Mac address and static UserId’s (e.g. supplied via System properties). +The IP and Mac address are represented as Hex-encoded SHA256 hash. + +IP address-based UserId’s use the local host’s IP address. + +``` +@Configuration +class AppConfig extends AbstractVaultConfiguration { + + // … + + @Override + public ClientAuthentication clientAuthentication() { + AppIdAuthenticationOptions options = AppIdAuthenticationOptions.builder() + .appId("myapp") + .userIdMechanism(new IpAddressUserId()) + .build(); + + return new AppIdAuthentication(options, restOperations()); + } + + // … +} +``` + +The corresponding command to generate the IP address UserId from a command line is: + +``` +$ echo -n 192.168.99.1 | sha256sum +``` + +| |Including the line break of `echo` leads to a different hash value
so make sure to include the `-n` flag.| +|---|-------------------------------------------------------------------------------------------------------------| + +Mac address-based UserId’s obtain their network device from the +localhost-bound device. The configuration also allows specifying +a `network-interface` hint to pick the right device. The value of`network-interface` is optional and can be either an interface +name or interface index (0-based). + +``` +@Configuration +class AppConfig extends AbstractVaultConfiguration { + + // … + + @Override + public ClientAuthentication clientAuthentication() { + + AppIdAuthenticationOptions options = AppIdAuthenticationOptions.builder() + .appId("myapp") + .userIdMechanism(new MacAddressUserId()) + .build(); + + return new AppIdAuthentication(options, restOperations()); + } + + // … +} +``` + +The corresponding command to generate the Mac address UserId from a command line is: + +``` +$ echo -n 0AFEDE1234AC | sha256sum +``` + +| |The Mac address is specified uppercase and without colons.
Including the line break of `echo` leads to a different hash value
so make sure to include the `-n` flag.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 15.3.1. Custom UserId + +A more advanced approach lets you implementing your own `AppIdUserIdMechanism`. +This class must be on your classpath and must implement +the `org.springframework.vault.authentication.AppIdUserIdMechanism` interface +and the `createUserId` method. Spring Vault will obtain the UserId +by calling `createUserId` each time it authenticates using AppId to +obtain a token. + +MyUserIdMechanism.java + +``` +public class MyUserIdMechanism implements AppIdUserIdMechanism { + + @Override + public String createUserId() { + + String userId = … + return userId; + } +} +``` + +See also: [Vault Documentation: Using the App ID auth backend](https://www.vaultproject.io/docs/auth/app-id.html) + +### 15.4. AppRole authentication + +[AppRole](https://www.vaultproject.io/docs/auth/app-id.html) allows machine +authentication, like the deprecated (since Vault 0.6.1) [AppId authentication](#vault.authentication.appid). +AppRole authentication consists of two hard to guess (secret) tokens: RoleId and SecretId. + +Spring Vault supports AppRole authentication by providing either RoleId only +or together with a provided SecretId and fetching RoleId/SecretId from Vault +(push and pull modes with response unwrapping). + +``` +@Configuration +class AppConfig extends AbstractVaultConfiguration { + + // … + + @Override + public ClientAuthentication clientAuthentication() { + + AppRoleAuthenticationOptions options = AppRoleAuthenticationOptions.builder() + .roleId(RoleId.provided("…")) + .secretId(SecretId.wrapped(VaultToken.of("…"))) + .build(); + + return new AppRoleAuthentication(options, restOperations()); + } + + // … +} +``` + +Spring Vault also support full pull mode: If RoleId and SecretId are not provided, +Spring Vault will retrieve them using the role name and an initial token. The +initial token may be associated with a TTL and usage limit. + +``` +@Configuration +class AppConfig extends AbstractVaultConfiguration { + + // … + + @Override + public ClientAuthentication clientAuthentication() { + + VaultToken initialToken = VaultToken.of("…"); + AppRoleAuthenticationOptions options = AppRoleAuthenticationOptions.builder() + .appRole("…") + .roleId(RoleId.pull(initialToken)) + .secretId(SecretId.pull(initialToken)) + .build(); + + return new AppRoleAuthentication(options, restOperations()); + } + + // … +} +``` + +See also: [Vault Documentation: Using the AppRole auth backend](https://www.vaultproject.io/docs/auth/approle.html) + +### 15.5. AWS-EC2 authentication + +The [aws-ec2](https://www.vaultproject.io/docs/auth/aws-ec2.html)auth backend provides a secure introduction mechanism +for AWS EC2 instances, allowing automated retrieval of a Vault +token. Unlike most Vault authentication backends, this backend +does not require first-deploying, or provisioning security-sensitive +credentials (tokens, username/password, client certificates, etc.). +Instead, it treats AWS as a Trusted Third Party and uses the +cryptographically signed dynamic metadata information that uniquely +represents each EC2 instance. + +``` +@Configuration +class AppConfig extends AbstractVaultConfiguration { + + // … + + @Override + public ClientAuthentication clientAuthentication() { + return new AwsEc2Authentication(restOperations()); + } + + // … +} +``` + +AWS-EC2 authentication enables nonce by default to follow +the Trust On First Use (TOFU) principle. Any unintended party that +gains access to the PKCS#7 identity metadata can authenticate +against Vault. + +During the first login, Spring Vault generates a nonce +that is stored in the auth backend aside the instance Id. +Re-authentication requires the same nonce to be sent. Any other +party does not have the nonce and can raise an alert in Vault for +further investigation. + +The nonce is kept in memory and is lost during application restart. + +AWS-EC2 authentication roles are optional and default to the AMI. +You can configure the authentication role by setting +it in `AwsEc2AuthenticationOptions`. + +See also: [Vault Documentation: Using the AWS-EC2 auth backend](https://www.vaultproject.io/docs/auth/aws-ec2.html) + +### 15.6. AWS-IAM authentication + +The [aws](https://www.vaultproject.io/docs/auth/aws.html)auth backend allows Vault login by using existing AWS IAM credentials. + +AWS IAM authentication creates a signed HTTP request that is +executed by Vault to get the identity of the signer using AWS STS`GetCallerIdentity` method. AWSv4 signatures require IAM credentials. + +IAM credentials can be obtained from either the runtime environment +or supplied externally. Runtime environments such as AWS-EC2, +Lambda and ECS with assigned IAM principals do not require client-specific +configuration of credentials but can obtain these from its metadata source. + +``` +@Configuration +class AppConfig extends AbstractVaultConfiguration { + + // … + + @Override + public ClientAuthentication clientAuthentication() { + + AwsIamAuthenticationOptions options = AwsIamAuthenticationOptions.builder() + .credentials(new BasicAWSCredentials(…)).build(); + + return new AwsIamAuthentication(options, restOperations()); + } + + // … +} +``` + +Example 27. Using AWS-EC2 instance profile as credentials source + +``` +@Configuration +class AppConfig extends AbstractVaultConfiguration { + + // … + + @Override + public ClientAuthentication clientAuthentication() { + + AwsIamAuthenticationOptions options = AwsIamAuthenticationOptions.builder() + .credentialsProvider(InstanceProfileCredentialsProvider.getInstance()).build(); + + return new AwsIamAuthentication(options, restOperations()); + } + + // … +} +``` + +`AwsIamAuthentication` requires the AWS Java SDK dependency (`com.amazonaws:aws-java-sdk-core`) +as the authentication implementation uses AWS SDK types for credentials and request signing. + +You can configure the authentication via `AwsIamAuthenticationOptions`. + +See also: + +* [Vault Documentation: Using the AWS auth backend](https://www.vaultproject.io/docs/auth/aws.html) + +* [AWS Documentation: STS GetCallerIdentity](https://docs.aws.amazon.com/STS/latest/APIReference/API_GetCallerIdentity.html) + +### 15.7. Azure (MSI) authentication + +The [azure](https://www.vaultproject.io/docs/auth/azure.html)auth backend provides a secure introduction mechanism +for Azure VM instances, allowing automated retrieval of a Vault +token. Unlike most Vault authentication backends, this backend +does not require first-deploying, or provisioning security-sensitive +credentials (tokens, username/password, client certificates, etc.). +Instead, it treats Azure as a Trusted Third Party and uses the +managed service identity and instance metadata information that can be +bound to a VM instance. + +``` +@Configuration +class AppConfig extends AbstractVaultConfiguration { + + // … + + @Override + public ClientAuthentication clientAuthentication() { + + AzureMsiAuthenticationOptions options = AzureMsiAuthenticationOptions.builder() + .role(…).build(); + + return new AzureMsiAuthentication(options, restOperations()); + } + + // … +} +``` + +Azure authentication requires details about the VM environment (subscription Id, +resource group name, VM name). These details can be either configured through`AzureMsiAuthenticationOptionsBuilder`. +If left unconfigured, `AzureMsiAuthentication` queries Azure’s instance metadata service to +obtain these details. + +See also: + +* [Vault Documentation: Using the Azure auth backend](https://www.vaultproject.io/docs/auth/azure.html) + +* [Azure Documentation: Managed Service Identity](https://docs.microsoft.com/en-us/azure/active-directory/managed-service-identity/overview) + +### 15.8. GCP-GCE authentication + +The [gcp](https://www.vaultproject.io/docs/auth/gcp.html)auth backend allows Vault login by using existing GCP (Google Cloud Platform) IAM and GCE credentials. + +GCP GCE (Google Compute Engine) authentication creates a signature in the form of a +JSON Web Token (JWT) for a service account. A JWT for a Compute Engine instance +is obtained from the GCE metadata service using [Instance identification](https://cloud.google.com/compute/docs/instances/verifying-instance-identity). +This API creates a JSON Web Token that can be used to confirm the instance identity. + +Unlike most Vault authentication backends, this backend +does not require first-deploying, or provisioning security-sensitive +credentials (tokens, username/password, client certificates, etc.). +Instead, it treats GCP as a Trusted Third Party and uses the +cryptographically signed dynamic metadata information that uniquely +represents each GCP service account. + +You can configure the authentication via `GcpComputeAuthenticationOptions`. + +``` +@Configuration +class AppConfig extends AbstractVaultConfiguration { + + // … + + @Override + public ClientAuthentication clientAuthentication() { + + GcpComputeAuthenticationOptions options = GcpComputeAuthenticationOptions.builder() + .role(…).build(); + + GcpComputeAuthentication authentication = new GcpComputeAuthentication(options, + restOperations()); + } + + // … +} +``` + +See also: + +* [Vault Documentation: Using the GCP auth backend](https://www.vaultproject.io/docs/auth/gcp.html) + +* [GCP Documentation: Verifying the Identity of Instances](https://cloud.google.com/compute/docs/instances/verifying-instance-identity) + +### 15.9. GCP-IAM authentication + +The [gcp](https://www.vaultproject.io/docs/auth/gcp.html)auth backend allows Vault login by using existing GCP (Google Cloud Platform) IAM and GCE credentials. + +GCP IAM authentication creates a signature in the form of a JSON Web Token (JWT) +for a service account. A JWT for a service account is obtained by +calling GCP IAM’s [`projects.serviceAccounts.signJwt`](https://cloud.google.com/iam/reference/rest/v1/projects.serviceAccounts/signJwt) API. The caller authenticates against GCP IAM +and proves thereby its identity. This Vault backend treats GCP as a Trusted Third Party. + +IAM credentials can be obtained from either the runtime environment +or supplied externally as e.g. JSON. JSON is the preferred form as it +carries the project id and service account identifier required for calling`projects.serviceAccounts.signJwt`. + +``` +@Configuration +class AppConfig extends AbstractVaultConfiguration { + + // … + + @Override + public ClientAuthentication clientAuthentication() { + + GcpIamAuthenticationOptions options = GcpIamAuthenticationOptions.builder() + .role(…).credential(GoogleCredentials.getApplicationDefault()).build(); + + GcpIamAuthentication authentication = new GcpIamAuthentication(options, + restOperations()); + } + + // … +} +``` + +`GcpIamAuthenticationOptions` requires the Google Cloud Java SDK dependency +(`com.google.apis:google-api-services-iam` and `com.google.auth:google-auth-library-oauth2-http`) +as the authentication implementation uses Google APIs for credentials and JWT signing. + +You can configure the authentication via `GcpIamAuthenticationOptions`. + +| |Google credentials require an OAuth 2 token maintaining the token lifecycle. All API
is synchronous therefore, `GcpIamAuthentication` does not support `AuthenticationSteps` which is
required for reactive usage.| +|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +See also: + +* [Vault Documentation: Using the GCP auth backend](https://www.vaultproject.io/docs/auth/gcp.html) + +* [GCP Documentation: projects.serviceAccounts.signJwt](https://cloud.google.com/iam/reference/rest/v1/projects.serviceAccounts/signJwt)[]() + +### 15.10. PCF authentication + +The [pcf](https://www.vaultproject.io/docs/auth/pcf.html)auth backend allows Vault login for PCF instances. +It leverages [PCF’s App and Container Identity Assurance](https://content.pivotal.io/blog/new-in-pcf-2-1-app-container-identity-assurance-via-automatic-cert-rotation). + +PCF authentication uses the instance key and certificate to create a signature that is validated by Vault. +If the signature matches, and potentially bound organization/space/application Id’s match, Vault issues an appropriately-scoped token. + +Instance credentials are available from files at `CF_INSTANCE_CERT` and`CF_INSTANCE_KEY` variables. + +``` +@Configuration +class AppConfig extends AbstractVaultConfiguration { + + // … + + @Override + public ClientAuthentication clientAuthentication() { + + PcfAuthenticationOptions options = PcfAuthenticationOptions.builder() + .role(…).build(); + + PcfAuthentication authentication = new PcfAuthentication(options, + restOperations()); + } + + // … +} +``` + +`PcfAuthenticationOptions` requires the [BouncyCastle](https://www.bouncycastle.org/latest_releases.html)library for creating RSA-PSS signatures. + +You can configure the authentication via `PcfAuthenticationOptions`. + +See also: + +* [Vault Documentation: + Using the PCF auth backend](https://www.vaultproject.io/docs/auth/pcf.html) + +### 15.11. TLS certificate authentication + +The `cert` auth backend allows authentication using SSL/TLS client +certificates that are either signed by a CA or self-signed. + +To enable `cert` authentication you need to: + +1. Use SSL, see [Vault Client SSL configuration](#vault.client-ssl) + +2. Configure a Java `Keystore` that contains the client + certificate and the private key + +``` +@Configuration +class AppConfig extends AbstractVaultConfiguration { + + // … + + @Override + public ClientAuthentication clientAuthentication() { + + ClientCertificateAuthenticationOptions options = ClientCertificateAuthenticationOptions.builder() + .path(…).build(); + + return new ClientCertificateAuthentication(options, restOperations()); + } + + // … +} +``` + +See also: [Vault Documentation: Using the Cert auth backend](https://www.vaultproject.io/docs/auth/cert.html) + +### 15.12. Cubbyhole authentication + +Cubbyhole authentication uses Vault primitives to provide a secured authentication +workflow. Cubbyhole authentication uses tokens as primary login method. +An ephemeral token is used to obtain a second, login VaultToken from Vault’s +Cubbyhole secret backend. The login token is usually longer-lived and used to +interact with Vault. The login token can be retrieved either from a wrapped +response or from the `data` section. + +**Creating a wrapped token** + +| |Response Wrapping for token creation requires Vault 0.6.0 or higher.| +|---|--------------------------------------------------------------------| + +Example 28. Crating and storing tokens + +``` +$ vault token-create -wrap-ttl="10m" +Key Value +--- ----- +wrapping_token: 397ccb93-ff6c-b17b-9389-380b01ca2645 +wrapping_token_ttl: 0h10m0s +wrapping_token_creation_time: 2016-09-18 20:29:48.652957077 +0200 CEST +wrapped_accessor: 46b6aebb-187f-932a-26d7-4f3d86a68319 +``` + +Example 29. Wrapped token response usage + +``` +@Configuration +class AppConfig extends AbstractVaultConfiguration { + + // … + + @Override + public ClientAuthentication clientAuthentication() { + + CubbyholeAuthenticationOptions options = CubbyholeAuthenticationOptions + .builder() + .initialToken(VaultToken.of("…")) + .wrapped() + .build(); + + return new CubbyholeAuthentication(options, restOperations()); + } + + // … +} +``` + +**Using stored tokens** + +Example 30. Crating and storing tokens + +``` +$ vault token create +Key Value +--- ----- +token f9e30681-d46a-cdaf-aaa0-2ae0a9ad0819 +token_accessor 4eee9bd9-81bb-06d6-af01-723c54a72148 +token_duration 0s +token_renewable false +token_policies [root] + +$ vault token create -use-limit=2 -orphan -no-default-policy -policy=none +Key Value +--- ----- +token 895cb88b-aef4-0e33-ba65-d50007290780 +token_accessor e84b661c-8aa8-2286-b788-f258f30c8325 +token_duration 0s +token_renewable false +token_policies [none] + +$ export VAULT_TOKEN=895cb88b-aef4-0e33-ba65-d50007290780 +$ vault write cubbyhole/token token=f9e30681-d46a-cdaf-aaa0-2ae0a9ad0819 +``` + +Example 31. Stored token response usage + +``` +@Configuration +class AppConfig extends AbstractVaultConfiguration { + + // … + + @Override + public ClientAuthentication clientAuthentication() { + + CubbyholeAuthenticationOptions options = CubbyholeAuthenticationOptions + .builder() + .initialToken(VaultToken.of("…")) + .path("cubbyhole/token") + .build(); + + return new CubbyholeAuthentication(options, restOperations()); + } + + // … +} +``` + +**Remaining TTL/Renewability** + +Tokens retrieved from Cubbyhole associated with a non-zero TTL start their TTL at the +time of token creation. That time is not necessarily identical with application +startup. To compensate for the initial delay, Cubbyhole authentication performs a +self lookup for tokens associated with a non-zero TTL to retrieve the remaining TTL. +Cubbyhole authentication will not self-lookup wrapped tokens without a TTL because a +zero TTL indicates there is no TTL associated. + +Non-wrapped tokens do not provide details regarding renewability and TTL by just +retrieving the token. A self-lookup will lookup renewability and the remaining TTL. + +See also: + +* [Vault Documentation: Tokens](https://www.vaultproject.io/docs/concepts/tokens.html) + +* [Vault Documentation: Cubbyhole Secret Backend](https://www.vaultproject.io/docs/secrets/cubbyhole/index.html) + +* [Vault Documentation: Response Wrapping](https://www.vaultproject.io/docs/concepts/response-wrapping.html) + +### 15.13. Kubernetes authentication + +Vault supports since 0.8.3 [kubernetes](https://www.vaultproject.io/docs/auth/kubernetes.html)-based authentication using Kubernetes tokens. + +Using Kubernetes authentication requires a Kubernetes Service Account Token, +usually mounted at `/var/run/secrets/kubernetes.io/serviceaccount/token`. +The file contains the token which is read and sent to Vault. +Vault verifies its validity using Kubernetes' API during login. + +Configuring Kubernetes authentication requires at least the role name to be provided: + +``` +@Configuration +class AppConfig extends AbstractVaultConfiguration { + + // … + + @Override + public ClientAuthentication clientAuthentication() { + + KubernetesAuthenticationOptions options = KubernetesAuthenticationOptions.builder() + .role(…).jwtSupplier(…).build(); + + return new KubernetesAuthentication(options, restOperations()); + } + + // … +} +``` + +You can configure the authentication via `KubernetesAuthenticationOptions`. + +See also: + +* [Vault Documentation: Using the Kubernetes auth backend](https://www.vaultproject.io/docs/auth/kubernetes.html) + +* [Kubernetes Documentation: Configure Service Accounts for Pods](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) + +### 15.14. Authentication Steps + +`ClientAuthentication` objects describe the authentication flow and perform the actual +authentication steps. Pre-composed authentications are easy to use and to configure with +a tight binding to synchronous execution. + +The composition of authentication methods and reusing common steps, such as posting login +payload to Vault or retrieving authentication input from an HTTP source is not intended +with `ClientAuthentication` objects. + +Authentication steps provide reusability of common authentication activity. +Steps created via `AuthenticationSteps` describe an authentication flow in a functional +style leaving the actual authentication execution to specific executors. + +Example 32. Stored token authentication flow. + +``` +AuthenticationSteps.just(VaultToken.of(…)); (1) +``` + +|**1**|Creates `AuthenticationSteps` from just a `VaultToken`.| +|-----|-------------------------------------------------------| + +A single-step authentication flow can be created from a single input. Flows declaring +multiple authentication steps start with a `Supplier` or `HttpRequest` that provide an +authentication state object which can be used to map or post to Vault for login. + +Example 33. AppRole authentication flow + +``` +AuthenticationSteps.fromSupplier( (1) + + () -> getAppRoleLogin(options.getRoleId(), options.getSecretId())) (2) + + .login("auth/{mount}/login", options.getPath()); (3) +``` + +|**1**|Start declaring `AuthenticationSteps` accepting a `Supplier`.
The state object type depends on the `Supplier` response type which can be mapped in a later step.| +|-----|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| The actual `Supplier` implementation.
Creating a `Map` in this case. | +|**3**| Perform a Vault login by posting the state object (`Map`) to a Vault endpoint for Vault token creation.
Note that template variables are subject to URL escaping. | + +Authentication flows require an executor to perform the actual login. We provide two executors +for different execution models: + +* `AuthenticationStepsExecutor` as a drop-in replacement for synchronous `ClientAuthentication`. + +* `AuthenticationStepsOperator` for reactive execution. + +Many `ClientAuthentication`'s come with static factory methods to create `AuthenticationSteps`for their authentication-specific options: + +Example 34. Synchronous `AuthenticationSteps` execution + +``` +CubbyholeAuthenticationOptions options = … +RestOperations restOperations = … + +AuthenticationSteps steps = CubbyholeAuthentication.createAuthenticationSteps(options); + +AuthenticationStepsExecutor executor = new AuthenticationStepsExecutor(steps, restOperations); + +VaultToken token = executor.login(); +``` + +### 15.15. Token Lifecycle + +Vault’s tokens can be associated with a time to live. Tokens obtained by an authentication method +are intended to be used as long as the session is active and should not expire while the application is active. + +Spring Vault provides with [`LifecycleAwareSessionManager`](https://docs.spring.io/spring-vault/docs/2.3.1/api/org/springframework/vault/authentication/LifecycleAwareSessionManager.html) a session manager that can renew the token until it reaches its terminal TTL to then perform another login to obtain the next token which is associated with the session. + +Depending on the authentication method, a login can create two kinds of tokens: + +* [`VaultToken`](https://docs.spring.io/spring-vault/docs/2.3.1/api/org/springframework/vault/support/VaultToken.html): Generic token encapsulating the actual token. + +* [`LoginToken`](https://docs.spring.io/spring-vault/docs/2.3.1/api/org/springframework/vault/support/LoginToken.html): Token associated with renewability/TTL. + +Authentication methods such as [`TokenAuthentication`](https://docs.spring.io/spring-vault/docs/2.3.1/api/org/springframework/vault/authentication/TokenAuthentication.html) just create a `VaultToken` which does not carry any renewability/TTL details. `LifecycleAwareSessionManager` will run a self-lookup on the token to retrieve renewability and TTL from Vault.`VaultToken` are renewed periodically if self-lookup is enabled. Note that `VaultToken` are never revoked, only `LoginToken` are revoked. + +Authentication methods creating `LoginToken` directly (all login-based authentication methods) already provide all necessary details to setup token renewal. Tokens obtained from a login are revoked by `LifecycleAwareSessionManager` if the session manager is shut down. + +## 16. Miscellaneous + +Learn in this chapter about details worth mentioning like the Spring Security integration. + +### 16.1. Spring Security + +Spring Vault integrates with Spring Security by providing implementations for [`BytesKeyGenerator`](https://docs.spring.io/spring-security/site/docs/current/reference/htmlsingle/#spring-security-crypto-keygenerators) and [`BytesEncryptor`](https://docs.spring.io/spring-security/site/docs/current/reference/htmlsingle/#spring-security-crypto-encryption). Both implementations use Vault’s `transit` backend. + +Example 35. `VaultBytesKeyGenerator` example + +``` +VaultOperations operations = …; +VaultBytesKeyGenerator generator = new VaultBytesKeyGenerator(operations); + +byte[] key = generator.generateKey(); +``` + +Example 36. `VaultBytesEncryptor` example + +``` +VaultTransitOperations transit = …; + +VaultBytesEncryptor encryptor = new VaultBytesEncryptor(transit, "my-key-name"); + +byte[] ciphertext = encryptor.encrypt(plaintext); + +byte[] result = encryptor.decrypt(ciphertext); +``` + +Vault encapsulates an entropy source that is decoupled from your JVM along with server-side key-management. This relieves the burden of proper encryption/decryption from application developers and pushes the burden onto the operators of Vault. Operators of Vault commonly include the security team at an organization, which means they can ensure that data is encrypted/decrypted properly. Additionally, since encrypt/decrypt operations must enter the audit log, any decryption event is recorded. + +The backend also supports key rotation, which allows a new version of the named key to be generated. All data encrypted with the key will use the newest version of the key; previously encrypted data can be decrypted using old versions of the key. Administrators can control which previous versions of a key are available for decryption, to prevent an attacker gaining an old copy of ciphertext to be able to successfully decrypt it. + +Vault is after all a networked service that incurs each operation with a latency. Components heavily using encryption or random bytes generation may experience a difference in throughput and performance. \ No newline at end of file diff --git a/docs/en/spring-web-flow/README.md b/docs/en/spring-web-flow/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/en/spring-web-flow/actions.md b/docs/en/spring-web-flow/actions.md new file mode 100644 index 0000000000000000000000000000000000000000..0fd7cfad486b8cb562a5d79edc0e383c995a83bf --- /dev/null +++ b/docs/en/spring-web-flow/actions.md @@ -0,0 +1,465 @@ +# 6. Executing actions + +## 6.1. Introduction + +This chapter shows you how to use the `action-state` element to control the execution of an action at a point within a flow. +It will also show how to use the `decision-state` element to make a flow routing decision. +Finally, several examples of invoking actions from the various points possible within a flow will be discussed. + +## 6.2. Defining action states + +Use the `action-state` element when you wish to invoke an action, then transition to another state based on the action's outcome: + +``` + + + + + + +``` + +The full example below illustrates a interview flow that uses the action-state above to determine if more answers are needed to complete the interview: + +``` + + + + + + + + + + + + + + + + + + + + + + + + + +``` + +After the execution of each action, the action-state checks the result to see if matches a declared +transition to another state. That means if more than one action is configured they are executed in +an ordered chain until one returns a result event that matches a state transition out of the +action-state while the rest are ignored. This is a form of the Chain of Responsibility (CoR) pattern. + +The result of an action's execution is typically the criteria for a transition out of this state. +Additional information in the current RequestContext may also be tested as part of custom +transitional criteria allowing for sophisticated transition expressions that reason on contextual +state. + +Note also that an action-state just like any other state can have one more on-entry actions +that are executed as a list from start to end. + +## 6.3. Defining decision states + +Use the `decision-state` element as an alternative to the action-state to make a routing decision using a convenient if/else syntax. +The example below shows the `moreAnswersNeeded` state above now implemented as a decision state instead of an action-state: + +``` + + + + +``` + +## 6.4. Action outcome event mappings + +Actions often invoke methods on plain Java objects. +When called from action-states and decision-states, these method return values can be used to drive state transitions. +Since transitions are triggered by events, a method return value must first be mapped to an Event object. +The following table describes how common return value types are mapped to Event objects: + + + +**Table 6.1. Action method return value to event id mappings** + +|Method return type|Mapped Event identifier expression| +|------------------|----------------------------------| +| java.lang.String | the String value | +|java.lang.Boolean | yes (for true), no (for false) | +| java.lang.Enum | the Enum name | +| any other type | success | + +This is illustrated in the example action state below, which invokes a method that returns a boolean value: + +``` + + + + + + +``` + +## 6.5. Action implementations + +While writing action code as POJO logic is the most common, there are several other action implementation options. +Sometimes you need to write action code that needs access to the flow context. +You can always invoke a POJO and pass it the flowRequestContext as an EL variable. +Alternatively, you may implement the `Action` interface or extend from the `MultiAction` base class. +These options provide stronger type safety when you have a natural coupling between your action code and Spring Web Flow APIs. +Examples of each of these approaches are shown below. + +### 6.5.1. Invoking a POJO action + +``` + + +``` + +``` +public class PojoAction { + public String method(RequestContext context) { + ... + } +} + +``` + +### 6.5.2. Invoking a custom Action implementation + +``` + + +``` + +``` +public class CustomAction implements Action { + public Event execute(RequestContext context) { + ... + } +} + +``` + +### 6.5.3. Invoking a MultiAction implementation + +``` + + + +``` + +``` +public class CustomMultiAction extends MultiAction { + public Event actionMethod1(RequestContext context) { + ... + } + + public Event actionMethod2(RequestContext context) { + ... + } + + ... +} + +``` + +## 6.6. Action exceptions + +Actions often invoke services that encapsulate complex business logic. +These services may throw business exceptions that the action code should handle. + +### 6.6.1. Handling a business exception with a POJO action + +The following example invokes an action that catches a business exception, adds a error message to the context, and returns a result event identifier. +The result is treated as a flow event which the calling flow can then respond to. + +``` + + +``` + +``` +public class BookingAction { +public String makeBooking(Booking booking, RequestContext context) { + try { + BookingConfirmation confirmation = bookingService.make(booking); + context.getFlowScope().put("confirmation", confirmation); + return "success"; + } catch (RoomNotAvailableException e) { + context.addMessage(new MessageBuilder().error(). + .defaultText("No room is available at this hotel").build()); + return "error"; + } +} +} + +``` + +### 6.6.2. Handling a business exception with a MultiAction + +The following example is functionally equivlant to the last, but implemented as a MultiAction instead of a POJO action. +The MultiAction requires its action methods to be of the signature `Event ${methodName}(RequestContext)`, providing stronger type safety, while a POJO action allows for more freedom. + +``` + + +``` + +``` +public class BookingAction extends MultiAction { +public Event makeBooking(RequestContext context) { + try { + Booking booking = (Booking) context.getFlowScope().get("booking"); + BookingConfirmation confirmation = bookingService.make(booking); + context.getFlowScope().put("confirmation", confirmation); + return success(); + } catch (RoomNotAvailableException e) { + context.getMessageContext().addMessage(new MessageBuilder().error(). + .defaultText("No room is available at this hotel").build()); + return error(); + } +} +} + +``` + +### 6.6.3. Using an exception-handler element + +In general it is recommended to catch exceptions in actions and return result +events that drive standard transitions, it is also possible to add an`exception-handler` sub-element to any state type with a`bean` attribute referencing a bean of type`FlowExecutionExceptionHandler`. This is an advanced +option that if used incorrectly can leave the flow execution in an invalid state. +Consider the build-in `TransitionExecutingFlowExecutionExceptionHandler`as example of a correct implementation. + +## 6.7. Other Action execution examples + +### 6.7.1. on-start + +The following example shows an action that creates a new Booking object by invoking a method on a service: + +``` + + + + + + + + + + +``` + +### 6.7.2. on-entry + +The following example shows a state entry action that sets the special `fragments` variable that causes the view-state to render a partial fragment of its view: + +``` + + + + + + +``` + +### 6.7.3. on-exit + +The following example shows a state exit action that releases a lock on a record being edited: + +``` + + + + + + + + + + + + +``` + +### 6.7.4. on-end + +The following example shows the equivalent object locking behavior using flow start and end actions: + +``` + + + + + + + + + + + + + + + + + + + + +``` + +### 6.7.5. on-render + +The following example shows a render action that loads a list of hotels to display before the view is rendered: + +``` + + + + + + + + + +``` + +### 6.7.6. on-transition + +The following example shows a transition action adds a subflow outcome event attribute to a collection: + +``` + + + + + + +``` + +### 6.7.7. Named actions + +The following example shows how to execute a chain of actions in an action-state. +The name of each action becomes a qualifier for the action's result event. + +``` + + + + + + + + + + +``` + +In this example, the flow will transition to `showResults` when `thingTwo`completes successfully. + +### 6.7.8. Streaming actions + +Sometimes an Action needs to stream a custom response back to the client. +An example might be a flow that renders a PDF document when handling a print event. +This can be achieved by having the action stream the content then record "Response Complete" status on the ExternalContext. +The responseComplete flag tells the pausing view-state not to render the response because another object has taken care of it. + +``` + + + + + + +``` + +``` +public class PrintBoardingPassAction extends AbstractAction { + public Event doExecute(RequestContext context) { + // stream PDF content here... + // - Access HttpServletResponse by calling context.getExternalContext().getNativeResponse(); + // - Mark response complete by calling context.getExternalContext().recordResponseComplete(); + return success(); + } +} + +``` + +In this example, when the print event is raised the flow will call the printBoardingPassAction. +The action will render the PDF then mark the response as complete. + +### 6.7.9. Handling File Uploads + +Another common task is to use Web Flow to handle multipart file uploads in combination with Spring MVC's`MultipartResolver`. Once the resolver is set up correctly [as described here](http://static.springsource.org/spring/docs/2.5.x/reference/mvc.html#mvc-multipart) and the submitting +HTML form is configured with `enctype="multipart/form-data"`, you can easily handle the file upload in a +transition action. + +| ![[Note]](images/note.png) |Note| +|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|:---| +|The file upload example below below is not relevant when using Web Flow with JSF. See[Section 13.8, “Handling File Uploads with JSF”](spring-faces.html#spring-faces-file-upload) for details of how to upload files using JSF.| | + +Given a form such as: + +``` + + Select file: + + + +``` + +and a backing object for handling the upload such as: + +``` +package org.springframework.webflow.samples.booking; + +import org.springframework.web.multipart.MultipartFile; + +public class FileUploadHandler { + + private transient MultipartFile file; + + public void processFile() { + //Do something with the MultipartFile here + } + + public void setFile(MultipartFile file) { + this.file = file; + } +} + +``` + +you can process the upload using a transition action as in the following example: + +``` + + + + + + + + +``` + +The `MultipartFile` will be bound to the `FileUploadHandler` bean as +part of the normal form binding process so that it will be available to process during the +execution of the transition action. \ No newline at end of file diff --git a/docs/en/spring-web-flow/defining-flows.md b/docs/en/spring-web-flow/defining-flows.md new file mode 100644 index 0000000000000000000000000000000000000000..c605419d0e916b65aa78fe394aa2baf437256733 --- /dev/null +++ b/docs/en/spring-web-flow/defining-flows.md @@ -0,0 +1,490 @@ +# 3. Defining Flows + +## 3.1. Introduction + +This chapter begins the Users Section. +It shows how to implement flows using the flow definition language. +By the end of this chapter you should have a good understanding of language constructs, and be capable of authoring a flow definition. + +## 3.2. What is a flow? + +A flow encapsulates a reusable sequence of steps that can execute in different contexts. +Below is a [Garrett Information Architecture](http://www.jjg.net/ia/visvocab/) diagram illustrating a reference to a flow that encapsulates the steps of a hotel booking process: + + + +Site Map illustrating a reference to a flow + +## 3.3. What is the makeup of a typical flow? + +In Spring Web Flow, a flow consists of a series of steps called "states". +Entering a state typically results in a view being displayed to the user. +On that view, user events occur that are handled by the state. +These events can trigger transitions to other states which result in view navigations. + +The example below shows the structure of the book hotel flow referenced in the previous diagram: + + + +Flow diagram + +## 3.4. How are flows authored? + +Flows are authored by web application developers using a simple XML-based flow definition language. +The next steps of this guide will walk you through the elements of this language. + +## 3.5. Essential language elements + +### 3.5.1. flow + +Every flow begins with the following root element: + +``` + + + + + +``` + +All states of the flow are defined within this element. +The first state defined becomes the flow's starting point. + +### 3.5.2. view-state + +Use the `view-state` element to define a step of the flow that renders a view: + +``` + + +``` + +By convention, a view-state maps its id to a view template in the directory where the flow is located. +For example, the state above might render `/WEB-INF/hotels/booking/enterBookingDetails.xhtml`if the flow itself was located in the `/WEB-INF/hotels/booking` directory. + +### 3.5.3. transition + +Use the `transition` element to handle events that occur within a state: + +``` + + + + +``` + +These transitions drive view navigations. + +### 3.5.4. end-state + +Use the `end-state` element to define a flow outcome: + +``` + + +``` + +When a flow transitions to a end-state it terminates and the outcome is returned. + +### 3.5.5. Checkpoint: Essential language elements + +With the three elements `view-state`, `transition`, and `end-state`, you can quickly express your view navigation logic. +Teams often do this before adding flow behaviors so they can focus on developing the user interface of the application with end users first. +Below is a sample flow that implements its view navigation logic using these elements: + +``` + + + + + + + + + + + + + + + + + + +``` + +## 3.6. Actions + +Most flows need to express more than just view navigation logic. +Typically they also need to invoke business services of the application or other actions. + +Within a flow, there are several points where you can execute actions. These points are: + +* On flow start + +* On state entry + +* On view render + +* On transition execution + +* On state exit + +* On flow end + +Actions are defined using a concise expression language. Spring Web Flow uses the Unified EL by default. +The next few sections will cover the essential language elements for defining actions. + +### 3.6.1. evaluate + +The action element you will use most often is the `evaluate` element. +Use the `evaluate` element to evaluate an expression at a point within your flow. +With this single tag you can invoke methods on Spring beans or any other flow variable. +For example: + +``` + + +``` + +#### Assigning an evaluate result + +If the expression returns a value, that value can be saved in the flow's data model called `flowScope`: + +``` + + +``` + +#### Converting an evaluate result + +If the expression returns a value that may need to be converted, specify the desired type using the `result-type` attribute: + +``` + + +``` + +### 3.6.2. Checkpoint: flow actions + +Now review the sample booking flow with actions added: + +``` + + + + + + + + + + + + + + + + + + + + + + + + +``` + +This flow now creates a Booking object in flow scope when it starts. +The id of the hotel to book is obtained from a flow input attribute. + +## 3.7. Input/Output Mapping + +Each flow has a well-defined input/output contract. +Flows can be passed input attributes when they start, and can return output attributes when they end. +In this respect, calling a flow is conceptually similar to calling a method with the following signature: + +``` +FlowOutcome flowId(Map inputAttributes); + +``` + +... where a `FlowOutcome` has the following signature: + +``` +public interface FlowOutcome { + public String getName(); + public Map getOutputAttributes(); +} + +``` + +### 3.7.1. input + +Use the `input` element to declare a flow input attribute: + +``` + + +``` + +Input values are saved in flow scope under the name of the attribute. +For example, the input above would be saved under the name `hotelId`. + +#### Declaring an input type + +Use the `type` attribute to declare the input attribute's type: + +``` + + +``` + +If an input value does not match the declared type, a type conversion will be attempted. + +#### Assigning an input value + +Use the `value` attribute to specify an expression to assign the input value to: + +``` + + +``` + +If the expression's value type can be determined, that metadata will be used for type coersion if no `type` attribute is specified. + +#### Marking an input as required + +Use the `required` attribute to enforce the input is not null or empty: + +``` + + +``` + +### 3.7.2. output + +Use the `output` element to declare a flow output attribute. +Output attributes are declared within end-states that represent specific flow outcomes. + +``` + + + + +``` + +Output values are obtained from flow scope under the name of the attribute. +For example, the output above would be assigned the value of the `bookingId` variable. + +#### Specifying the source of an output value + +Use the `value` attribute to denote a specific output value expression: + +``` + + +``` + +### 3.7.3. Checkpoint: input/output mapping + +Now review the sample booking flow with input/output mapping: + +``` + + + + + + + + + + + + + + + + + + + + + + + + + + +``` + +The flow now accepts a `hotelId` input attribute and returns a `bookingId` output attribute +when a new booking is confirmed. + +## 3.8. Variables + +A flow may declare one or more instance variables. +These variables are allocated when the flow starts. +Any `@Autowired` transient references the variable holds are also rewired when the flow resumes. + +### 3.8.1. var + +Use the `var` element to declare a flow variable: + +``` + + +``` + +Make sure your variable's class implements `java.io.Serializable`, as the instance state is saved between flow requests. + +## 3.9. Variable Scopes + +Web Flow can store variables in one of several scopes: + +### 3.9.1. Flow Scope + +Flow scope gets allocated when a flow starts and destroyed when the flow ends. +With the default implementation, any objects stored in flow scope need to be Serializable. + +### 3.9.2. View Scope + +View scope gets allocated when a `view-state` enters and destroyed when the state exits. +View scope is *only* referenceable from within a `view-state`. With the +default implementation, any objects stored in view scope need to be Serializable. + +### 3.9.3. Request Scope + +Request scope gets allocated when a flow is called and destroyed when the flow returns. + +### 3.9.4. Flash Scope + +Flash scope gets allocated when a flow starts, cleared after every view render, and destroyed when the +flow ends. With the default implementation, any objects stored in flash scope need to be Serializable. + +### 3.9.5. Conversation Scope + +Conversation scope gets allocated when a top-level flow starts and destroyed when the top-level flow ends. +Conversation scope is shared by a top-level flow and all of its subflows. With the default +implementation, conversation scoped objects are stored in the HTTP session and should generally be +Serializable to account for typical session replication. + +The scope to use is often determined contextually, for example depending on where a +variable is defined -- at the start of the flow definition (flow scope), inside a +a view state (view scope), etc. In other cases, for example in EL expressions and +Java code, it needs to be specified explicitly. Subsequent sections explain +how this is done. + +## 3.10. Calling subflows + +A flow may call another flow as a subflow. The flow will wait until the subflow returns, then respond to the subflow outcome. + +### 3.10.1. subflow-state + +Use the `subflow-state` element to call another flow as a subflow: + +``` + + + + + + + +``` + +The above example calls the `createGuest` flow, then waits for it to return. +When the flow returns with a `guestCreated` outcome, the new guest is added to the booking's guest list. + +#### Passing a subflow input + +Use the `input` element to pass input to the subflow: + +``` + + + + + +``` + +#### Mapping subflow output + +When a subflow completes, its end-state id is returned to the calling flow as +the event to use to continue navigation. + +The subflow can also create output attributes to which the calling flow can refer +within an outcome transition as follows: + +``` + + + + +``` + +In the above example, `guest` is the name of an output attribute returned by the `guestCreated` outcome. + +### 3.10.2. Checkpoint: calling subflows + +Now review the sample booking flow calling a subflow: + +``` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +``` + +The flow now calls a `createGuest` subflow to add a new guest to the guest list. \ No newline at end of file diff --git a/docs/en/spring-web-flow/el.md b/docs/en/spring-web-flow/el.md new file mode 100644 index 0000000000000000000000000000000000000000..d855db56ee0038e026e44bc6a8eb40b61d4e519b --- /dev/null +++ b/docs/en/spring-web-flow/el.md @@ -0,0 +1,320 @@ +# 4. Expression Language (EL) + +## 4.1. Introduction + +Web Flow uses EL to access its data model and to invoke actions. +This chapter will familiarize you with EL syntax, configuration, and special EL variables you can reference from your flow definition. + +EL is used for many things within a flow including: + +1. Access client data such as declaring flow inputs or referencing request parameters. + +2. Access data in Web Flow's `RequestContext` such as `flowScope` or `currentEvent`. + +3. Invoke methods on Spring-managed objects through actions. + +4. Resolve expressions such as state transition criteria, subflow ids, and view names. + +EL is also used to bind form parameters to model objects and reversely to render formatted form fields from the properties of a model object. +That however does not apply when using Web Flow with JSF in which case the standard JSF component lifecyle applies. + +### 4.1.1. Expression types + +An important concept to understand is there are two types of expressions in Web Flow: standard expressions and template expressions. + +#### Standard Expressions + +The first and most common type of expression is the *standard expression*. +Such expressions are evaluated directly by the EL and need not be enclosed in delimiters like `#{}`. +For example: + +``` + + +``` + +The expression above is a standard expression that invokes the `nextPage` method on the `searchCriteria` variable when evaluated. +If you attempt to enclose this expression in a special delimiter like `#{}` you will get an `IllegalArgumentException`. +In this context the delimiter is seen as redundant. +The only acceptable value for the `expression` attribute is an single expression string. + +#### Template expressions + +The second type of expression is a *template expression*. +A template expression allows mixing of literal text with one or more standard expressions. +Each standard expression block is explicitly surrounded with the `#{}` delimiters. +For example: + +``` + + +``` + +The expression above is a template expression. +The result of evaluation will be a string that concatenates literal text such as `error-` and `.xhtml` with the result of evaluating `externalContext.locale`. +As you can see, explicit delimiters are necessary here to demarcate standard expression blocks within the template. + +| ![[Note]](images/note.png) |Note| +|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|:---| +|See the Web Flow XML schema for a complete listing of those XML attributes that accept standard expressions and those that accept template expressions.
You can also use F2 in Eclipse (or equivalent shortcut in other IDEs) to access available documentation when typing out specific flow definition attributes.| | + +## 4.2. EL Implementations + +### 4.2.1. Spring EL + +Web Flow uses the [Spring Expression Language](https://docs.spring.io/spring/docs/current/spring-framework-reference/html/expressions.html) (Spring EL). +Spring EL was created to provide a single, well-supported expression language for use across all the products in the Spring portfolio. +It is distributed as a separate jar `org.springframework.expression` in the Spring Framework. + +### 4.2.2. Unified EL + +Use of [Unified EL](https://en.wikipedia.org/wiki/Unified_Expression_Language)also implies a dependency on `el-api` although that is typically *provided*by your web container. +Although Spring EL is the default and recommended expression language to use, +it is possible to replace it with Unified EL if you wish to do so. +You need the following Spring configuration to plug in the `WebFlowELExpressionParser` to the `flow-builder-services`: + +``` + + + + + + + + +``` + +Note that if your application is registering custom converters it's important to ensure the WebFlowELExpressionParser is configured with the conversion service that has those custom converters. + +``` + + + + + + + + + + + +``` + +## 4.3. EL portability + +In general, you will find Spring EL and Unified EL to have a very similar syntax. + +Note however there are some advantages to Spring EL. +For example Spring EL is closely integrated with the type conversion of Spring 3 and that allows you to take full advantage of its features. +Specifically the automatic detection of generic types as well as the use of formatting annotations is currently supported with Spring EL only. + +There are some minor changes to keep in mind when upgrading to Spring EL from Unified EL as follows: + +1. Expressions deliniated with `${}` in flow definitions must be changed to `#{}`. + +2. Expressions testing the current event `#{currentEvent == 'submit'}` must be changed to `#{currentEvent.id == 'submit'}`. + +3. Resolving properties such as `#{currentUser.name}` may cause NullPointerException without any checks such as `#{currentUser != null ? currentUser.name : null}`. + A much better alternative though is the safe navigation operator `#{currentUser?.name}`. + +For more information on Spring EL syntax please refer to the [Language Reference](http://static.springsource.org/spring/docs/3.0.x/spring-framework-reference/html/expressions.html#expressions-language-ref) section in the Spring Documentation. + +## 4.4. Special EL variables + +There are several implicit variables you may reference from within a flow. +These variables are discussed in this section. + +Keep in mind this general rule. +Variables referring to data scopes (flowScope, viewScope, requestScope, etc.) should only be used when assigning a new variable to one of the scopes. + +For example when assigning the result of the call to `bookingService.findHotels(searchCriteria)` to a new variable called "hotels" you must prefix it with a scope variable in order to let Web Flow know where you want it stored: + +``` + + + + + + + + + + + + + +``` + +However when setting an existing variable such as "searchCriteria" in the example below, you reference the variable directly without prefixing it with any scope variables: + +``` + + + + + + + + + + + + + +``` + +The following is the list of implicit variables you can reference within a flow definition: + +### 4.4.1. flowScope + +Use `flowScope` to assign a flow variable. +Flow scope gets allocated when a flow starts and destroyed when the flow ends. With the default +implementation, any objects stored in flow scope need to be Serializable. + +``` + + +``` + +### 4.4.2. viewScope + +Use `viewScope` to assign a view variable. +View scope gets allocated when a `view-state` enters and destroyed when the state exits. +View scope is *only* referenceable from within a `view-state`. With the +default implementation, any objects stored in view scope need to be Serializable. + +``` + + + + +``` + +### 4.4.3. requestScope + +Use `requestScope` to assign a request variable. +Request scope gets allocated when a flow is called and destroyed when the flow returns. + +``` + + +``` + +### 4.4.4. flashScope + +Use `flashScope` to assign a flash variable. +Flash scope gets allocated when a flow starts, cleared after every view render, and destroyed when the +flow ends. With the default implementation, any objects stored in flash scope need to be Serializable. + +``` + + +``` + +### 4.4.5. conversationScope + +Use `conversationScope` to assign a conversation variable. +Conversation scope gets allocated when a top-level flow starts and destroyed when the top-level flow ends. +Conversation scope is shared by a top-level flow and all of its subflows. With the default +implementation, conversation scoped objects are stored in the HTTP session and should generally be +Serializable to account for typical session replication. + +``` + + +``` + +### 4.4.6. requestParameters + +Use `requestParameters` to access a client request parameter: + +``` + + +``` + +### 4.4.7. currentEvent + +Use `currentEvent` to access attributes of the current `Event`: + +``` + + +``` + +### 4.4.8. currentUser + +Use `currentUser` to access the authenticated `Principal`: + +``` + + +``` + +### 4.4.9. messageContext + +Use `messageContext` to access a context for retrieving and creating flow execution messages, including error and success messages. +See the `MessageContext` Javadocs for more information. + +``` + + +``` + +### 4.4.10. resourceBundle + +Use `resourceBundle` to access a message resource. + +``` + + +``` + +### 4.4.11. flowRequestContext + +Use `flowRequestContext` to access the `RequestContext` API, which is a representation of the current flow request. +See the API Javadocs for more information. + +### 4.4.12. flowExecutionContext + +Use `flowExecutionContext` to access the `FlowExecutionContext` API, which is a representation of the current flow state. +See the API Javadocs for more information. + +### 4.4.13. flowExecutionUrl + +Use `flowExecutionUrl` to access the context-relative URI for the current flow execution view-state. + +### 4.4.14. externalContext + +Use `externalContext` to access the client environment, including user session attributes. +See the `ExternalContext` API JavaDocs for more information. + +``` + + +``` + +## 4.5. Scope searching algorithm + +As mentioned earlier in this section when assigning a variable in one of the flow scopes, referencing that scope is required. +For example: + +``` + + +``` + +When simply accessing a variable in one of the scopes, referencing the scope is optional. +For example: + +``` + + +``` + +When no scope is specified, like in the use of `booking` above, a scope searching algorithm is used. +The algorithm will look in request, flash, view, flow, and conversation scope for the variable. +If no such variable is found, an `EvaluationException` will be thrown. \ No newline at end of file diff --git a/docs/en/spring-web-flow/field-mappings.md b/docs/en/spring-web-flow/field-mappings.md new file mode 100644 index 0000000000000000000000000000000000000000..19b85ed569009a57b85c1d83a763a8aac1f0eba5 --- /dev/null +++ b/docs/en/spring-web-flow/field-mappings.md @@ -0,0 +1,134 @@ +# Appendix A. Flow Definition Language 1.0 to 2.0 Mappings + +The flow definition language has changed since the 1.0 release. +This is a listing of the language elements in the 1.0 release, and how they map to elements in the 2.0 release. +While most of the changes are semantic, there are a few structural changes. +Please see the upgrade guide for more details about changes between Web Flow 1.0 and 2.0. + + + +**Table A.1. Mappings** + +| SWF 1.0 | SWF 2.0 | Comments | | | +|--------------------|---------------------|--------------------------------------------------------------------------|------------------------|--------------------------------------------------------------------------| +| *action* | *\** | use \ | | | +| | bean | | \* | | +| | name | | \* | | +| | method | | \* | | +| *action-state* | *action-state* | | | | +| | id | | id | | +| | \* | | parent | | +| *argument* | *\** | use \ | | | +| | expression | | | | +| | parameter-type | | | | +| *attribute* | *attribute* | | | | +| | name | | name | | +| | type | | type | | +| | value | | value | | +| *attribute-mapper* | *\** | input and output elements can be in flows or subflows directly | | | +| | bean | | \* | now subflow-attribute-mapper attribute on subflow-state | +| *bean-action* | *\** | use \ | | | +| | bean | | \* | | +| | name | | \* | | +| | method | | \* | | +| *decision-state* | *decision-state* | | | | +| | id | | id | | +| | \* | | parent | | +| *end-actions* | *on-end* | | | | +| *end-state* | *end-state* | | | | +| | id | | id | | +| | view | | view | | +| | \* | | parent | | +| | \* | | commit | | +| *entry-actions* | *on-entry* | | | | +| *evaluate-action* | *evaluate* | | | | +| | expression | | expression | | +| | name | | \* |use \ \ \| +| | \* | | result | | +| | \* | | result-type | | +|*evaluation-result* | *\** | use \ | | | +| | name | | \* | | +| | scope | | \* | | +|*exception-handler* | *exception-handler* | | | | +| | bean | | bean | | +| *exit-actions* | *on-exit* | | | | +| *flow* | *flow* | | | | +| | \* | | start-state | | +| | \* | | parent | | +| | \* | | abstract | | +|*global-transitions*|*global-transitions* | | | | +| *if* | *if* | | | | +| | test | | test | | +| | then | | then | | +| | else | | else | | +| *import* | *bean-import* | | | | +| | resource | | resource | | +| *inline-flow* | *\** | convert to new top-level flow | | | +| | id | | \* | | +| *input-attribute* | *input* | | | | +| | name | | name | | +| | scope | | \* | prefix name with scope \ | +| | required | | required | | +| | \* | | type | | +| | \* | | value | | +| *input-mapper* | *\** | inputs can be in flows and subflows directly | | | +| *mapping* | *input or output* | | | | +| | source | | name or value | name when in flow element, value when in subflow-state element | +| | target | | name or value | value when in flow element, name when in subflow-state element | +| | target-collection | | \* | no longer supported | +| | from | | \* | detected automatically | +| | to | | type | | +| | required | | required | | +| *method-argument* | *\** | use \ | | | +| *method-result* | *\** | use \ | | | +| | name | | \* | | +| | scope | | \* | | +| *output-attribute* | *output* | | | | +| | name | | name | | +| | scope | | \* | prefix name with scope \ | +| | required | | required | | +| | \* | | type | | +| | \* | | value | | +| *output-mapper* | *\** | output can be in flows and subflows directly | | | +| *render-actions* | *on-render* | | | | +| *set* | *set* | | | | +| | attribute | | name | | +| | scope | | \* | prefix name with scope \ | +| | value | | value | | +| | name | | \* | use \ \ \ | +| | \* | | type | | +| *start-actions* | *on-start* | | | | +| *start-state* | *\** |now \, or defaults to the first state in the flow| | | +| | idref | | \* | | +| *subflow-state* | *subflow-state* | | | | +| | id | | id | | +| | flow | | subflow | | +| | \* | | parent | | +| | \* | |subflow-attribute-mapper| | +| *transition* | *transition* | | | | +| | on | | on | | +| | on-exception | | on-exception | | +| | to | | to | | +| | \* | | bind | | +| | \* | | validate | | +| | \* | | history | | +| *value* | *value* | | | | +| *var* | *var* | | | | +| | name | | name | | +| | class | | class | | +| | scope | | \* | always flow scope | +| | bean | | \* | all Spring beans can be resolved with EL | +| *view-state* | *view-state* | | | | +| | id | | id | | +| | view | | view | | +| | \* | | parent | | +| | \* | | redirect | | +| | \* | | popup | | +| | \* | | model | | +| | \* | | history | | +| *\** |*persistence-context*| | | | +| *\** | *render* | | | | +| | \* | | fragments | | +| *\** | *secured* | | | | +| | \* | | attributes | | +| | \* | | match | | diff --git a/docs/en/spring-web-flow/flow-inheritance.md b/docs/en/spring-web-flow/flow-inheritance.md new file mode 100644 index 0000000000000000000000000000000000000000..5289ebffc66f0a7443358284d93196e1b78d488e --- /dev/null +++ b/docs/en/spring-web-flow/flow-inheritance.md @@ -0,0 +1,143 @@ +# 9. Flow Inheritance + +## 9.1. Introduction + +Flow inheritance allows one flow to inherit the configuration of another flow. +Inheritance can occur at both the flow and state levels. +A common use case is for a parent flow to define global transitions and exception handlers, then each child flow can inherit those settings. + +In order for a parent flow to be found, it must be added to the `flow-registry` just like any other flow. + +## 9.2. Is flow inheritance like Java inheritance? + +Flow inheritance is similar to Java inheritance in that elements defined in a parent are exposed via the child, however, there are key differences. + +A child flow cannot override an element from a parent flow. +Similar elements between the parent and child flows will be merged. +Unique elements in the parent flow will be added to the child. + +A child flow can inherit from multiple parent flows. +Java inheritance is limited to a single class. + +## 9.3. Types of Flow Inheritance + +### 9.3.1. Flow level inheritance + +Flow level inheritance is defined by the `parent` attribute on the `flow` element. +The attribute contains a comma separated list of flow identifiers to inherit from. +The child flow will inherit from each parent in the order it is listed adding elements and content to the resulting flow. +The resulting flow from the first merge will be considered the child in the second merge, and so on. + +``` + + +``` + +### 9.3.2. State level inheritance + +State level inheritance is similar to flow level inheritance, except only one state inherits from the parent, instead of the entire flow. + +Unlike flow inheritance, only a single parent is allowed. +Additionally, the identifier of the flow state to inherit from must also be defined. +The identifiers for the flow and the state within that flow are separated by a #. + +The parent and child states must be of the same type. +For instance a view-state cannot inherit from an end-state, only another view-state. + +``` + + +``` + +| ![[Note]](images/note.png) |Note| +|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|:---| +|The intent for flow-level inheritance is to define common states to be
added to and shared among multiple flow definitions while the intent
for state-level inheritance is to extend from and merge with a single
parent state. Flow-level inheritance is a good fit for composition
and multiple inheritance but at the state level you can still only
inherit from a single parent state.| | + +## 9.4. Abstract flows + +Often parent flows are not designed to be executed directly. +In order to protect these flows from running, they can be marked as `abstract`. +If an abstract flow attempts to run, a `FlowBuilderException` will be thrown. + +``` + + +``` + +## 9.5. Inheritance Algorithm + +When a child flow inherits from it's parent, essentially what happens is that the parent and child are merged together to create a new flow. +There are rules for every element in the Web Flow definition language that govern how that particular element is merged. + +There are two types of elements: *mergeable* and *non-mergeable*. +Mergeable elements will always attempt to merge together if the elements are similar. +Non-mergeable elements in a parent or child flow will always be contained in the resulting flow intact. +They will not be modified as part of the merge process. + +| ![[Note]](images/note.png) |Note| +|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|:---| +|Paths to external resources in the parent flow should be absolute.
Relative paths will break when the two flows are merged unless the parent and child flow are in the same directory.
Once merged, all relative paths in the parent flow will become relative to the child flow.| | + +### 9.5.1. Mergeable Elements + +If the elements are of the same type and their keyed attribute are identical, the content of the parent element will be merged with the child element. +The merge algorithm will continue to merge each sub-element of the merging parent and child. +Otherwise the parent element is added as a new element to the child. + +In most cases, elements from a parent flow that are added will be added after elements in the child flow. +Exceptions to this rule include action elements (evaluate, render and set) which will be added at the beginning. +This allows for the results of parent actions to be used by child actions. + +Mergeable elements are: + +* action-state: id + +* attribute: name + +* decision-state: id + +* end-state: id + +* flow: always merges + +* if: test + +* on-end: always merges + +* on-entry: always merges + +* on-exit: always merges + +* on-render: always merges + +* on-start: always merges + +* input: name + +* output: name + +* secured: attributes + +* subflow-state: id + +* transition: on and on-exception + +* view-state: id + +### 9.5.2. Non-mergeable Elements + +Non-mergeable elements are: + +* bean-import + +* evaluate + +* exception-handler + +* persistence-context + +* render + +* set + +* var \ No newline at end of file diff --git a/docs/en/spring-web-flow/flow-managed-persistence.md b/docs/en/spring-web-flow/flow-managed-persistence.md new file mode 100644 index 0000000000000000000000000000000000000000..ed9e8182341fe0c5adddba35b18bf7a992ff97e8 --- /dev/null +++ b/docs/en/spring-web-flow/flow-managed-persistence.md @@ -0,0 +1,75 @@ +# 7. Flow Managed Persistence + +## 7.1. Introduction + +Most applications access data in some way. +Many modify data shared by multiple users and therefore require transactional data access properties. +They often transform relational data sets into domain objects to support application processing. +Web Flow offers "flow managed persistence" where a flow can create, commit, and close a object persistence context for you. +Web Flow integrates both Hibernate and JPA object persistence technologies. + +Apart from flow-managed persistence, there is the pattern of fully encapsulating PersistenceContext management within the service layer of your application. +In that case, the web layer does not get involved with persistence, instead it works entirely with detached objects that are passed to and returned by your service layer. +This chapter will focus on the flow-managed persistence, exploring how and when to use this feature. + +## 7.2. FlowScoped PersistenceContext + +This pattern creates a `PersistenceContext` in `flowScope` on flow startup, +uses that context for data access during the course of flow execution, and commits changes made to persistent entities at the end. +This pattern provides isolation of intermediate edits by only committing changes to the database at the end of flow execution. +This pattern is often used in conjunction with an optimistic locking strategy to protect the integrity of data modified in parallel by multiple users. +To support saving and restarting the progress of a flow over an extended period of time, a durable store for flow state must be used. +If a save and restart capability is not required, standard HTTP session-based storage of flow state is sufficient. +In that case, session expiration or termination before commit could potentially result in changes being lost. + +To use the FlowScoped PersistenceContext pattern, first mark your flow as a `persistence-context`: + +``` + + + + + + + +``` + +Then configure the correct `FlowExecutionListener` to apply this pattern to your flow. +If using Hibernate, register the `HibernateFlowExecutionListener`. If using JPA, register the `JpaFlowExecutionListener`. + +``` + + + + + + + + + + + +``` + +To trigger a commit at the end, annotate your end-state with the commit attribute: + +``` + + +``` + +That is it. When your flow starts, the listener will handle allocating a new `EntityManager` in `flowScope`. +Reference this EntityManager at anytime from within your flow by using the special `persistenceContext` variable. +In addition, any data access that occurs using a Spring managed data access object will use this EntityManager automatically. +Such data access operations should always execute non transactionally or in read-only transactions to maintain isolation of intermediate edits. + +## 7.3. Flow Managed Persistence And Sub-Flows + +A flow managed `PersistenceContext` is automatically extended +(propagated) to subflows assuming the subflow also has the ``variable. When a subflow re-uses the `PersistenceContext` started by its parent it ignores +commit flags when an end state is reached thereby deferring the final decision (to commit or not) to +its parent. \ No newline at end of file diff --git a/docs/en/spring-web-flow/flow-security.md b/docs/en/spring-web-flow/flow-security.md new file mode 100644 index 0000000000000000000000000000000000000000..affe3345c673fb45250f8a338b6b31a974699ebe --- /dev/null +++ b/docs/en/spring-web-flow/flow-security.md @@ -0,0 +1,161 @@ +# 8. Securing Flows + +## 8.1. Introduction + +Security is an important concept for any application. +End users should not be able to access any portion of a site simply by guessing the URL. +Areas of a site that are sensitive must ensure that only authorized requests are processed. +Spring Security is a proven security platform that can integrate with your application at multiple levels. +This section will focus on securing flow execution. + +## 8.2. How do I secure a flow? + +Securing flow execution is a three step process: + +* Configure Spring Security with authentication and authorization rules + +* Annotate the flow definition with the secured element to define the security rules + +* Add the SecurityFlowExecutionListener to process the security rules. + +Each of these steps must be completed or else flow security rules will not be applied. + +## 8.3. The secured element + +The secured element designates that its containing element should apply the authorization check before fully entering. +This may not occur more then once per stage of the flow execution that is secured. + +Three phases of flow execution can be secured: flows, states and transitions. +In each case the syntax for the secured element is identical. +The secured element is located inside the element it is securing. +For example, to secure a state the secured element occurs directly inside that state: + +``` + + + ... + + +``` + +### 8.3.1. Security attributes + +The `attributes` attribute is a comma separated list of Spring Security authorization attributes. +Often, these are specific security roles. +The attributes are compared against the user's granted attributes by a Spring Security access decision manager. + +``` + + +``` + +By default, a role based access decision manager is used to determine if the user is allowed access. +This will need to be overridden if your application is not using authorization roles. + +### 8.3.2. Matching type + +There are two types of matching available: `any` and `all`. +Any, allows access if at least one of the required security attributes is granted to the user. +All, allows access only if each of the required security attributes are granted to the user. + +``` + + +``` + +This attribute is optional. +If not defined, the default value is `any`. + +The `match` attribute will only be respected if the default access decision manager is used. + +## 8.4. The SecurityFlowExecutionListener + +Defining security rules in the flow by themselves will not protect the flow execution. +A `SecurityFlowExecutionListener` must also be defined in the webflow configuration and applied to the flow executor. + +``` + + + + + + + + +``` + +If access is denied to a portion of the application an `AccessDeniedException` will be thrown. +This exception will later be caught by Spring Security and used to prompt the user to authenticate. +It is important that this exception be allowed to travel up the execution stack uninhibited, otherwise the end user may not be prompted to authenticate. + +### 8.4.1. Custom Access Decision Managers + +If your application is using authorities that are not role based, you will need to configure a custom `AccessDecisionManager`. +You can override the default decision manager by setting the `accessDecisionManager` property on the security listener. +Please consult the [Spring Security reference documentation](http://static.springframework.org/spring-security/site/reference.html) to learn more about decision managers. + +``` + + + + +``` + +## 8.5. Configuring Spring Security + +Spring Security has robust configuration options available. +As every application and environment has its own security requirements, the [Spring Security reference documentation](http://static.springframework.org/spring-security/site/reference.html) is the best place to learn the available options. + +Both the `booking-faces` and `booking-mvc` sample applications are configured to use Spring Security. +Configuration is needed at both the Spring and web.xml levels. + +### 8.5.1. Spring configuration + +The Spring configuration defines `http` specifics (such as protected URLs and login/logout mechanics) and the `authentication-provider`. +For the sample applications, a local authentication provider is configured. + +``` + + + + + + + + + + + + + + + +``` + +### 8.5.2. web.xml Configuration + +In the `web.xml` file, a `filter` is defined to intercept all requests. +This filter will listen for login/logout requests and process them accordingly. +It will also catch `AccesDeniedException`s and redirect the user to the login page. + +``` + + springSecurityFilterChain + org.springframework.web.filter.DelegatingFilterProxy + + + + springSecurityFilterChain + /* + + +``` \ No newline at end of file diff --git a/docs/en/spring-web-flow/introduction.md b/docs/en/spring-web-flow/introduction.md new file mode 100644 index 0000000000000000000000000000000000000000..73cd68925b1f6a4cb16bb4dd6589def160417b78 --- /dev/null +++ b/docs/en/spring-web-flow/introduction.md @@ -0,0 +1,95 @@ +# 1. Introduction + +## 1.1. What this guide covers + +This guide covers all aspects of Spring Web Flow. +It covers implementing flows in end-user applications and working with the feature set. +It also covers extending the framework and the overall architectural model. + +## 1.2. What Web Flow requires to run + +Java 1.8 or higher. + +Spring 5.0 or higher. + +## 1.3. Resources + +You can ask questions and interact on StackOverflow using the designated tags, +see [Spring at StackOverflow](https://spring.io/questions). + +Report bugs and make requests using the[Spring Issue Tracker](https://jira.spring.io). + +Submit pull requests and work with the source code , +see [Web Flow on Github](https://github.com/spring-projects/spring-webflow). + +## 1.4. How to access Web Flow artifacts from Maven Central + +Each jar in the Web Flow distribution is available in the [Maven Central Repository](https://search.maven.org). +This allows you to easily integrate Web Flow into your application if you are already using Maven as the +build system for your web development project. + +To access Web Flow jars from Maven Central, declare the following dependency in your pom: + +``` + + org.springframework.webflow + spring-webflow + x.y.z.RELEASE + + + +``` + +If using JavaServer Faces, declare the following dependency in your pom +(includes transitive dependencies "spring-binding", "spring-webflow"): + +``` + + org.springframework.webflow + spring-faces + x.y.z.RELEASE + + + +``` + +## 1.5. How to access nightly builds and milestone releases + +Nightly snapshots of Web Flow development branches are available using Maven. +These snapshot builds are useful for testing out fixes you depend on in advance of the next release, and provide a convenient way for you to provide feedback about whether a fix meets your needs. + +### 1.5.1. Accessing snapshots and milestones with Maven + +For milestones and snapshots you'll need to use the SpringSource repository. +Add the following repository to your Maven pom.xml: + +``` + + spring + Spring Repository + http://repo.spring.io/snapshot + + +``` + +Then declare the following dependencies: + +``` + + org.springframework.webflow + spring-webflow + x.y.z.BUILD-SNAPSHOT + + +``` + +And if using JSF: + +``` + + org.springframework.webflow + spring-faces + x.y.z.BUILD-SNAPSHOT + + +``` \ No newline at end of file diff --git a/docs/en/spring-web-flow/preface.md b/docs/en/spring-web-flow/preface.md new file mode 100644 index 0000000000000000000000000000000000000000..03d9a5409532557e5d08b1fcdec95191b2e3fd75 --- /dev/null +++ b/docs/en/spring-web-flow/preface.md @@ -0,0 +1,12 @@ +# Preface + +Many web applications require the same sequence of steps to execute in different contexts. +Often these sequences are merely components of a larger task the user is trying to accomplish. +Such a reusable sequence is called a flow. + +Consider a typical shopping cart application. +User registration, login, and cart checkout are all examples of flows that can be invoked from several places in this type of application. + +Spring Web Flow is the module of Spring for implementing flows. +The Web Flow engine plugs into the Spring Web MVC platform and provides declarative flow definition language. +This reference guide shows you how to use and extend Spring Web Flow. diff --git a/docs/en/spring-web-flow/spring-faces.md b/docs/en/spring-web-flow/spring-faces.md new file mode 100644 index 0000000000000000000000000000000000000000..c1cf3bc7d080615f6dddfb0eb95fe53fc5528867 --- /dev/null +++ b/docs/en/spring-web-flow/spring-faces.md @@ -0,0 +1,712 @@ +# 13. JSF Integration + +## 13.1. Introduction + +Spring Web Flow provides a JSF integration that lets you use the JSF UI +Component Model with Spring Web Flow controllers. Web Flow also provides +a Spring Security tag library for use in JSF environments, +see [Section 13.9, “Using the Spring Security Facelets Tag Library”](spring-faces.html#spring-faces-security-taglib) for more details. + +Spring Web Flow 2.5 requires JSF 2.2 or higher. + +## 13.2. Configuring web.xml + +The first step is to route requests to the`DispatcherServlet` in the `web.xml` file. In this +example, we map all URLs that begin with `/spring/` to the +servlet. The servlet needs to be configured. An `init-param` is +used in the servlet to pass the `contextConfigLocation`. This +is the location of the Spring configuration for your web +application. + +``` + + Spring MVC Dispatcher Servlet + org.springframework.web.servlet.DispatcherServlet + + contextConfigLocation + /WEB-INF/web-application-config.xml + + 1 + + + + Spring MVC Dispatcher Servlet + /spring/* + + +``` + +In order for JSF to bootstrap correctly, the`FacesServlet` must be configured in `web.xml` as it +normally would even though you generally will not need to route requests +through it at all when using JSF with Spring Web Flow. + +``` + + + Faces Servlet + javax.faces.webapp.FacesServlet + 1 + + + + + Faces Servlet + *.faces + + +``` + +The use of Facelets instead of JSP typically requires this in +web.xml: + +``` +!-- Use JSF view templates saved as *.xhtml, for use with Facelets --> + + javax.faces.DEFAULT_SUFFIX + .xhtml + + +``` + +## 13.3. Configuring Web Flow for use with JSF + +This section explains how to configure Web Flow with JSF. +Both Java and XML style configuration are supported. +The following is sample configuration for Web Flow and JSF in XML: + +``` + + + + + + + + + + + + + + + + + + + + + + + +``` + +The following is an example of the same in Java configuration: + +``` +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.faces.config.*; + +@Configuration +public class WebFlowConfig extends AbstractFacesFlowConfiguration { + + @Bean + public FlowExecutor flowExecutor() { + return getFlowExecutorBuilder(flowRegistry()) + .addFlowExecutionListener(new FlowFacesContextLifecycleListener()) + .build(); + } + + @Bean + public FlowDefinitionRegistry flowRegistry() { + return getFlowDefinitionRegistryBuilder() + .setBasePath("/WEB-INF") + .addFlowLocationPattern("**/*-flow.xml").build(); + +} + +``` + +The main points are the installation of a`FlowFacesContextLifecycleListener` that manages a single +FacesContext for the duration of Web Flow request and the use of the`flow-builder-services` element from the `faces`custom namespace to configure rendering for a JSF environment. + +In a JSF environment you'll also need this Spring MVC related +configuration: + +``` + + + + + + + + + + + +``` + +The `resources` custom namespace element delegates JSF +resource requests to the JSF resource API. The`JsfFlowHandlerAdapter` is a replacement for the`FlowHandlerAdapter` normally used with Web Flow. This adapter +initializes itself with a `JsfAjaxHandler` instead of the`SpringJavaSciprtAjaxHandler`. + +When using Java config, the `AbstractFacesFlowConfiguration`base class automatically registers `JsfResourceRequestHandler`so there is nothing further to do. + +## 13.4. Replacing the JSF Managed Bean Facility + +When using JSF with Spring Web Flow you can completely replace the +JSF managed bean facility with a combination of Web Flow managed variables +and Spring managed beans. It gives you a good deal more control over the +lifecycle of your managed objects with well-defined hooks for +initialization and execution of your domain model. Additionally, since you +are presumably already using Spring for your business layer, it reduces +the conceptual overhead of having to maintain two different managed bean +models. + +In doing pure JSF development, you will quickly find that request +scope is not long-lived enough for storing conversational model objects +that drive complex event-driven views. In JSF the usual option is to begin +putting things into session scope, with the extra +burden of needing to clean the objects up before progressing to another +view or functional area of the application. What is really needed is a +managed scope that is somewhere between request and session scope. JSF +provides flash and view scopes that can be accessed programmatically via +UIViewRoot.getViewMap(). Spring Web Flow provides access to flash, view, +flow, and conversation scopes. These scopes are seamlessly integrated +through JSF variable resolvers and work the same in all JSF +applications. + +### 13.4.1. Using Flow Variables + +The easiest and most natural way to declare and manage the model +is through the use of [flow +variables](defining-flows.html#flow-variables). You can declare these variables at the beginning of +the flow: + +``` + + +``` + +and then reference this variable in one of the flow's JSF view +templates through EL: + +``` + + +``` + +Note that you do not need to prefix the variable with its scope +when referencing it from the template (though you can do so if you need +to be more specific). As with standard JSF beans, all available scopes +will be searched for a matching variable, so you could change the scope +of the variable in your flow definition without having to modify the EL +expressions that reference it. + +You can also define view instance variables that are scoped to the +current view and get cleaned up automatically upon transitioning to +another view. This is quite useful with JSF as views are often +constructed to handle multiple in-page events across many requests +before transitioning to another view. + +To define a view instance variable, you can use the`var` element inside a `view-state`definition: + +``` + + + + +``` + +### 13.4.2. Using Scoped Spring Beans + +Though defining autowired flow instance variables provides nice +modularization and readability, occasions may arise where you want to +utilize the other capabilities of the Spring container such as AOP. In +these cases, you can define a bean in your Spring ApplicationContext and +give it a specific web flow scope: + +``` + + +``` + +The major difference with this approach is that the bean will not +be fully initialized until it is first accessed via an EL expression. +This sort of lazy instantiation via EL is quite similar to how JSF +managed beans are typically allocated. + +### 13.4.3. Manipulating The Model + +The need to initialize the model before view rendering (such as by +loading persistent entities from a database) is quite common, but JSF by +itself does not provide any convenient hooks for such initialization. +The flow definition language provides a natural facility for this +through its [Actions](defining-flows.html#flow-actions) . Spring Web +Flow provides some extra conveniences for converting the outcome of an +action into a JSF-specific data structure. For example: + +``` + + + + +``` + +This will take the result of the`bookingService.findBookings` method an wrap it in a custom +JSF DataModel so that the list can be used in a standard JSF DataTable +component: + +``` + + + Name + #{booking.hotel.name} + + + Confirmation number + #{booking.id} + + + Action + + + + +``` + +### 13.4.4. Data Model Implementations + +In the example above result-type="dataModel" results in the +wrapping of List\ with custom`DataModel` type. The custom`DataModel` provides extra conveniences such as +being serializable for storage beyond request scope as well as access to +the currently selected row in EL expressions. For example, on postback +from a view where the action event was fired by a component within a +DataTable, you can take action on the selected row's model +instance: + +``` + + + + +``` + +Spring Web Flow provides two custom DataModel types:`OneSelectionTrackingListDataModel` and`ManySelectionTrackingListDataModel`. As the names +indicate they keep track of one or multiple selected rows. This is done +with the help of a`SelectionTrackingActionListener` listener, which +responds to JSF action events and invokes the appopriate methods on the`SelectinAware` data models to record the currently +clicked row. + +To understand how this is configured, keep in mind the`FacesConversionService` registers a`DataModelConverter` against the alias "dataModel" +on startup. When result-type="dataModel" is used in a flow definition it +causes the `DataModelConverter` to be used. The +converter then wraps the given List with an instance of`OneSelectionTrackingListDataModel`. To use the`ManySelectionTrackingListDataModel` you will need +to register your own custom converter. + +## 13.5. Handling JSF Events With Spring Web Flow + +Spring Web Flow allows you to handle JSF action events in a +decoupled way, requiring no direct dependencies in your Java code on JSF +API's. In fact, these events can often be handled completely in the flow +definiton language without requiring any custom Java action code at all. +This allows for a more agile development process since the artifacts being +manipulated in wiring up events (JSF view templates and SWF flow +definitions) are instantly refreshable without requiring a build and +re-deploy of the whole application. + +### 13.5.1. Handling JSF In-page Action Events + +A simple but common case in JSF is the need to signal an event +that causes manipulation of the model in some way and then redisplays +the same view to reflect the changed state of the model. The flow +definition language has special support for this in the`transition` element. + +A good example of this is a table of paged list results. Suppose +you want to be able to load and display only a portion of a large result +list, and allow the user to page through the results. The initial`view-state` definition to load and display the list would +be: + +``` + + + + + + +``` + +You construct a JSF DataTable that displays the current`hotels` list, and then place a "More Results" link below the +table: + +``` + + +``` + +This commandLink signals a "next" event from its action attribute. +You can then handle the event by adding to the `view-state`definition: + +``` + + + + + + + + + +``` + +Here you handle the "next" event by incrementing the page count on +the searchCriteria instance. The `on-render` action is then +called again with the updated criteria, which causes the next page of +results to be loaded into the DataModel. The same view is re-rendered +since there was no `to` attribute on the`transition` element, and the changes in the model are +reflected in the view. + +### 13.5.2. Handling JSF Action Events + +The next logical level beyond in-page events are events that +require navigation to another view, with some manipulation of the model +along the way. Achieving this with pure JSF would require adding a +navigation rule to faces-config.xml and likely some intermediary Java +code in a JSF managed bean (both tasks requiring a re-deploy). With the +flow defintion language, you can handle such a case concisely in one +place in a quite similar way to how in-page events are handled. + +Continuing on with our use case of manipulating a paged list of +results, suppose we want each row in the displayed DataTable to contain +a link to a detail page for that row instance. You can add a column to +the table containing the following `commandLink`component: + +``` + + +``` + +This raises the "select" event which you can then handle by adding +another `transition` element to the existing`view-state` : + +``` + + + + + + + + + + + + +``` + +Here the "select" event is handled by pushing the currently +selected hotel instance from the DataTable into flow scope, so that it +may be referenced by the "reviewHotel" `view-state` . + +### 13.5.3. Performing Model Validation + +JSF provides useful facilities for validating input at field-level +before changes are applied to the model, but when you need to then +perform more complex validation at the model-level after the updates +have been applied, you are generally left with having to add more custom +code to your JSF action methods in the managed bean. Validation of this +sort is something that is generally a responsibility of the domain model +itself, but it is difficult to get any error messages propagated back to +the view without introducing an undesirable dependency on the JSF API in +your domain layer. + +With Web Flow, you can utilize the generic and low-level`MessageContext` in your business code and any messages added +there will then be available to the `FacesContext` at render +time. + +For example, suppose you have a view where the user enters the +necessary details to complete a hotel booking, and you need to ensure +the Check In and Check Out dates adhere to a given set of business +rules. You can invoke such model-level validation from a`transition` element: + +``` + + + + + + +``` + +Here the "proceed" event is handled by invoking a model-level +validation method on the booking instance, passing the generic`MessageContext` instance so that messages may be recorded. +The messages can then be displayed along with any other JSF messages +with the `h:messages` component, + +### 13.5.4. Handling Ajax Events In JSF + +JSF provides built-in support for sending Ajax requests and +performing partial processing and rendering on the server-side. You can +specify a list of id's for partial rendering through the \ +facelets tag. + +In Spring Web Flow you also have the option to specify the ids to +use for partial rendering on the server side with the render +action: + +``` + + + + + + + + + + +``` + +## 13.6. Embedding a Flow On a Page + +By default when a flow enters a view state, it executes a +client-side redirect before rendering the view. This approach is known as +POST-REDIRECT-GET. It has the advantage of separating the form processing +for one view from the rendering of the next view. As a result the browser +Back and Refresh buttons work seamlessly without causing any browser +warnings. + +Normally the client-side redirect is transparent from a user's +perspective. However, there are situations where POST-REDIRECT-GET may not +bring the same benefits. For example sometimes it may be useful to embed +a flow on a page and drive it via Ajax requests refreshing only the area +of the page where the flow is rendered. Not only is it unnecessary to use +client-side redirects in this case, it is also not the desired behavior +with regards to keeping the surrounding content of the page intact. + +To indicate a flow should execute in "page embedded" mode all +you need to do is pass an extra flow input attribute called "mode" +with a value of "embedded". Below is an example of a top-level +container flow invoking a sub-flow in an embedded mode: + +``` + + + + +``` + +When launched in "page embedded" mode the sub-flow will not issue +flow execution redirects during Ajax requests. + +If you'd like to see examples of an embedded flow please refer to +the webflow-primefaces-showcase project. You can check out +the source code locally, build it as you would a Maven project, and import +it into Eclipse: + +``` +cd some-directory +svn co https://src.springframework.org/svn/spring-samples/webflow-primefaces-showcase +cd webflow-primefaces-showcase +mvn package +# import into Eclipse +``` + +The specific example you need to look at is under the "Advanced Ajax" +tab and is called "Top Flow with Embedded Sub-Flow". + +## 13.7. Redirect In Same State + +By default Web Flow does a client-side redirect even it it remains in the same view state as long as the current request is not an Ajax request. +This is quite useful after form validation failures for example. +If the user hits Refresh or Back they won't see any browser warnings. +They would if the Web Flow didn't do a redirect. + +This can lead to a problem specific to JSF environments where a specific Sun Mojarra listener component caches the FacesContext assuming the same instance is available throughout the JSF lifecycle. +In Web Flow however the render phase is temporarily put on hold and a client-side redirect executed. + +The default behavior of Web Flow is desirable and it is unlikely JSF applications will experience the issue. +This is because Ajax is often enabled the default in JSF component libraries and Web Flow does not redirect during Ajax requests. +However if you experience this issue you can disable client-side redirects within the same view as follows: + +``` + + + + + + +``` + +## 13.8. Handling File Uploads with JSF + +Most JSF component providers include some form of 'file upload' component. Generally when working +with these components JSF must take complete control of parsing multi-part requests and Spring MVC's`MultipartResolver` cannot be used. + +Spring Web Flow has been tested with file upload components from PrimeFaces. Check the +documentation of your JSF component library for other providers to see how to configure file upload. + +### 13.8.1. File Uploads with PrimeFaces + +PrimeFaces provides a `` component for uploading files. In order +to use the component you need to configure the `org.primefaces.webapp.filter.FileUploadFilter`servlet filter. The filter needs to be configured against Spring MVC's`DispatcherServlet` in your `web.xml`: + +``` + + PrimeFaces FileUpload Filter + org.primefaces.webapp.filter.FileUploadFilter + + + PrimeFaces FileUpload Filter + Spring MVC Dispatcher Servlet + + + + primefaces.UPLOADER + commons + + +``` + +For more details refer to the[PrimeFaces documentation](http://primefaces.org/documentation.html). + +## 13.9. Using the Spring Security Facelets Tag Library + +To use the library you'll need to create a `.taglib.xml`file and register it in `web.xml`. + +Create the file`/WEB-INF/springsecurity.taglib.xml` with the following +content: + +``` + + + + http://www.springframework.org/security/tags + + authorize + org.springframework.faces.security.FaceletsAuthorizeTagHandler + + + areAllGranted + org.springframework.faces.security.FaceletsAuthorizeTagUtils + boolean areAllGranted(java.lang.String) + + + areAnyGranted + org.springframework.faces.security.FaceletsAuthorizeTagUtils + boolean areAnyGranted(java.lang.String) + + + areNotGranted + org.springframework.faces.security.FaceletsAuthorizeTagUtils + boolean areNotGranted(java.lang.String) + + + isAllowed + org.springframework.faces.security.FaceletsAuthorizeTagUtils + boolean isAllowed(java.lang.String, java.lang.String) + + + +``` + +Next, register the above file taglib in web.xml: + +``` + + javax.faces.FACELETS_LIBRARIES + /WEB-INF/springsecurity.taglib.xml + + +``` + +Now you are ready to use the tag library in your views. You can use +the authorize tag to include nested content conditionally: + +``` + + + + + Lorem ipsum dolor sit amet + + + + Lorem ipsum dolor sit amet + + + + Lorem ipsum dolor sit amet + + + + +``` + +You can also use one of several EL functions in the rendered or +other attribute of any JSF component: + +``` + + + + + + + + + + + + + + + + + +``` + +## 13.10. Third-Party Component Library Integration + +The Spring Web Flow JSF integration strives to be compatible with +any third-party JSF component library. By honoring all of the standard +semantics of the JSF specification within the SWF-driven JSF lifecycle, +third-party libraries in general should "just work". The main thing to +remember is that configuration in web.xml will change slightly since Web +Flow requests are not routed through the standard FacesServlet. Typically, +anything that is traditionally mapped to the FacesServlet should be mapped +to the Spring DispatcherServlet instead. (You can also map to both if for +example you are migrating a legacy JSF application page-by-page.). \ No newline at end of file diff --git a/docs/en/spring-web-flow/spring-js.md b/docs/en/spring-web-flow/spring-js.md new file mode 100644 index 0000000000000000000000000000000000000000..eda4b95af3331879ecfca657f6c259ba2fe930f5 --- /dev/null +++ b/docs/en/spring-web-flow/spring-js.md @@ -0,0 +1,267 @@ +# 12. Spring JavaScript Quick Reference + +## 12.1. Introduction + +The *spring-js-resources* module is a legacy module that is no longer recommended for use +but is provided still as an optional module for backwards compatibility. Its original aim is to provide a +client-side programming model for progressively enhancing a web page with behavior and Ajax remoting. + +Use of the Spring JS API is demonstrated in the[samples repository](https://github.com/spring-projects/spring-webflow-samples). + +## 12.2. Serving Javascript Resources + +The Spring Framework provides a mechanism for serving static resources. +See the[Spring Framework documentation](https://docs.spring.io/spring/docs/current/spring-framework-reference/web.html#mvc-config-static-resources)). +With the new \ element resource requests (.js, .css) are handled by the`DispatcherSevlet`. +Here is example configuration in XML (Java config is also available): + +``` + + + + + + + + ... + + + + +``` + +This incoming maps requests for `/resources` to resources found under`/META-INF/web-resources` on the classpath. That's where Spring JavaScript resources +are bundled. However, you can modify the location attribute in the above configuration in order +to serve resources from any classpath or web application relative location. + +Note that the full resource URL depends on how your DispatcherServlet is mapped. +In the mvc-booking sample we've chosen to map it with the default servlet mapping '/': + +``` + + DispatcherServlet + org.springframework.web.servlet.DispatcherServlet + + + + DispatcherServlet + / + + + +``` + +That means the full URL to load `Spring.js` is `/myapp/resources/spring/Spring.js`. +If your `DispatcherServlet` was instead mapped to `/main/*` then the full +URL would be `/myapp/main/resources/spring/Spring.js`. + +When using of the default servlet mapping it is also recommended to add this to your Spring MVC +configuration, which ensures that any resource requests not handled by your Spring MVC mappings +will be delegated back to the Servlet container. + +``` + + + + ... + + + + + + +``` + +## 12.3. Including Spring Javascript in a Page + +Spring JS is designed such that an implementation of its API can be built for any of the popular Javascript toolkits. +The initial implementation of Spring.js builds on the Dojo toolkit. + +Using Spring Javascript in a page requires including the underlying toolkit as normal, +the `Spring.js` base interface file, and the `Spring-(library implementation).js` file for the underlying toolkit. +As an example, the following includes obtain the Dojo implementation of Spring.js using the `ResourceServlet`: + +``` + + + + +``` + +When using the widget system of an underlying library, typically you must also include some CSS resources to obtain the desired look and feel. +For the booking-mvc reference application, Dojo's `tundra.css` is included: + +``` +" /> + +``` + +## 12.4. Spring Javascript Decorations + +A central concept in Spring Javascript is the notion of applying decorations to existing DOM nodes. +This technique is used to progressively enhance a web page such that the page will still be functional in a less capable browser. +The `addDecoration` method is used to apply decorations. + +The following example illustrates enhancing a Spring MVC `` tag with rich suggestion behavior: + +``` + + + +``` + +The `ElementDecoration` is used to apply rich widget behavior to an existing DOM node. +This decoration type does not aim to completely hide the underlying toolkit, so the toolkit's native widget type and attributes are used directly. +This approach allows you to use a common decoration model to integrate any widget from the underlying toolkit in a consistent manner. +See the `booking-mvc` reference application for more examples of applying decorations to do things from suggestions to client-side validation. + +When using the `ElementDecoration` to apply widgets that have rich validation behavior, a common need is to prevent the form from being submitted to the server until validation passes. +This can be done with the `ValidateAllDecoration`: + +``` + + + +``` + +This decorates the "Proceed" button with a special onclick event handler that fires the client side validators and does not allow the form to submit until they pass successfully. + +An `AjaxEventDecoration` applies a client-side event listener that fires a remote Ajax request to the server. It also auto-registers a callback function to link in the response: + +``` +Previous + + +``` + +This decorates the onclick event of the "Previous Results" link with an Ajax call, passing along a special parameter that specifies the fragment to be re-rendered in the response. +Note that this link would still be fully functional if Javascript was unavailable in the client. +(See [Section 12.5, “Handling Ajax Requests”](spring-js.html#spring-js-ajax) for details on how this request is handled on the server.) + +It is also possible to apply more than one decoration to an element. +The following example shows a button being decorated with Ajax and validate-all submit suppression: + +``` + + + +``` + +It is also possible to apply a decoration to multiple elements in a single statement using Dojo's query API. +The following example decorates a set of checkbox elements as Dojo Checkbox widgets: + +``` +
+ + + + +
+ +``` + +## 12.5. Handling Ajax Requests + +Spring Javascript's client-side Ajax response handling is built upon the notion of receiving "fragments" back from the server. +These fragments are just standard HTML that is meant to replace portions of the existing page. +The key piece needed on the server is a way to determine which pieces of a full response need to be pulled out for partial rendering. + +In order to be able to render partial fragments of a full response, the full response must be built using a +templating technology that allows the use of composition for constructing the response, and for the member +parts of the composition to be referenced and rendered individually. +Spring Javascript provides some simple Spring MVC extensions that make use of Tiles to achieve this. +The same technique could theoretically be used with any templating system supporting composition. + +Spring Javascript's Ajax remoting functionality is built upon the notion that the core handling code for an +Ajax request should not differ from a standard browser request, thus no special knowledge of an Ajax request +is needed directly in the code and the same hanlder can be used for both styles of request. + +### 12.5.1. Providing a Library-Specific AjaxHandler + +The key interface for integrating various Ajax libraries with the Ajax-aware behavior of Web Flow (such as not redirecting for a +partial page update) is `org.springframework.js.AjaxHandler`. A `SpringJavascriptAjaxHandler` is configured by default that is able to +detect an Ajax request submitted via the Spring JS client-side API and can respond appropriately in the case where a redirect is required. In +order to integrate a different Ajax library (be it a pure JavaScript library, or a higher-level abstraction such as an Ajax-capable JSF +component library), a custom `AjaxHandler` can be injected into the `FlowHandlerAdapter` or `FlowController`. + +### 12.5.2. Handling Ajax Requests with Spring MVC Controllers + +In order to handle Ajax requests with Spring MVC controllers, all that is needed is the configuration of +the provided Spring MVC extensions in your Spring application context for rendering the partial response +(note that these extensions require the use of Tiles for templating): + +``` + + + + +``` + +This configures the `AjaxUrlBasedViewResolver` which in turn interprets Ajax requests and creates `FlowAjaxTilesView` objects to handle rendering of the appropriate fragments. +Note that `FlowAjaxTilesView` is capable of handling the rendering for both Web Flow and pure Spring MVC requests. +The fragments correspond to individual attributes of a Tiles view definition. For example, take the following Tiles view definition: + +``` + + + + + + + + + +``` + +An Ajax request could specify the "body", "hotelSearchForm" or "bookingsTable" to be rendered as fragments in the request. + +### 12.5.3. Handling Ajax Requests with Spring MVC + Spring Web Flow + +Spring Web Flow handles the optional rendering of fragments directly in the flow definition language through use of the `render` element. +The benefit of this approach is that the selection of fragments is completely decoupled from client-side code, such that no special parameters need to be passed with the request the way they +currently must be with the pure Spring MVC controller approach. +For example, if you wanted to render the "hotelSearchForm" fragment from the previous example Tiles view into a rich Javascript popup: + +``` + + + + + + + + + +``` \ No newline at end of file diff --git a/docs/en/spring-web-flow/spring-mvc.md b/docs/en/spring-web-flow/spring-mvc.md new file mode 100644 index 0000000000000000000000000000000000000000..74a9fb5628994b6b7a3c53a9711f461ec8705315 --- /dev/null +++ b/docs/en/spring-web-flow/spring-mvc.md @@ -0,0 +1,403 @@ +# 11. Spring MVC Integration + +## 11.1. Introduction + +This chapter shows how to integrate Web Flow into a Spring MVC web +application. The `booking-mvc` sample application is a good +reference for Spring MVC with Web Flow. This application is a simplified +travel site that allows users to search for and book hotel rooms. + +## 11.2. Configuring web.xml + +The first step to using Spring MVC is to configure the`DispatcherServlet` in `web.xml`. You typically do +this once per web application. + +The example below maps all requests that begin with`/spring/` to the DispatcherServlet. An `init-param`is used to provide the `contextConfigLocation`. This is the +configuration file for the web application. + +``` + + Spring MVC Dispatcher Servlet + org.springframework.web.servlet.DispatcherServlet + + contextConfigLocation + /WEB-INF/web-application-config.xml + + + + + Spring MVC Dispatcher Servlet + /spring/* + +``` + +## 11.3. Dispatching to flows + +The `DispatcherServlet` maps requests for application +resources to handlers. A flow is one type of handler. + +### 11.3.1. Registering the FlowHandlerAdapter + +The first step to dispatching requests to flows is to enable flow +handling within Spring MVC. To this, install the`FlowHandlerAdapter`: + +``` + + + + + +``` + +### 11.3.2. Defining flow mappings + +Once flow handling is enabled, the next step is to map specific +application resources to your flows. The simplest way to do this is to +define a `FlowHandlerMapping`: + +``` + + + + + + +``` + +Configuring this mapping allows the Dispatcher to map application +resource paths to flows in a flow registry. For example, accessing the +resource path `/hotels/booking` would result in a registry +query for the flow with id `hotels/booking`. If a flow is +found with that id, that flow will handle the request. If no flow is +found, the next handler mapping in the Dispatcher's ordered chain will +be queried or a "noHandlerFound" response will be returned. + +### 11.3.3. Flow handling workflow + +When a valid flow mapping is found, the`FlowHandlerAdapter` figures out whether to start a new +execution of that flow or resume an existing execution based on +information present the HTTP request. There are a number of defaults +related to starting and resuming flow executions the adapter +employs: + +* HTTP request parameters are made available in the input map of + all starting flow executions. + +* When a flow execution ends without sending a final response, + the default handler will attempt to start a new execution in the + same request. + +* Unhandled exceptions are propagated to the Dispatcher unless + the exception is a NoSuchFlowExecutionException. The default handler + will attempt to recover from a NoSuchFlowExecutionException by + starting over a new execution. + +Consult the API documentation for `FlowHandlerAdapter`for more information. You may override these defaults by subclassing or +by implementing your own FlowHandler, discussed in the next +section. + +## 11.4. Implementing custom FlowHandlers + +`FlowHandler` is the extension point that can be used to +customize how flows are executed in a HTTP servlet environment. A`FlowHandler` is used by the `FlowHandlerAdapter`and is responsible for: + +* Returning the `id` of a flow definition to + execute + +* Creating the input to pass new executions of that flow as they + are started + +* Handling outcomes returned by executions of that flow as they + end + +* Handling any exceptions thrown by executions of that flow as + they occur + +These responsibilities are illustrated in the definition of the`org.springframework.mvc.servlet.FlowHandler` interface: + +``` +public interface FlowHandler { + + public String getFlowId(); + + public MutableAttributeMap createExecutionInputMap(HttpServletRequest request); + + public String handleExecutionOutcome(FlowExecutionOutcome outcome, + HttpServletRequest request, HttpServletResponse response); + + public String handleException(FlowException e, + HttpServletRequest request, HttpServletResponse response); +} + +``` + +To implement a FlowHandler, subclass`AbstractFlowHandler`. All these operations are optional, and +if not implemented the defaults will apply. You only need to override the +methods that you need. Specifically: + +* Override `getFlowId(HttpServletRequest)` when the id + of your flow cannot be directly derived from the HTTP request. By + default, the id of the flow to execute is derived from the pathInfo + portion of the request URI. For example,`http://localhost/app/hotels/booking?hotelId=1` results in + a flow id of `hotels/booking` by default. + +* Override`createExecutionInputMap(HttpServletRequest)` when you need + fine-grained control over extracting flow input parameters from the + HttpServletRequest. By default, all request parameters are treated as + flow input parameters. + +* Override `handleExecutionOutcome` when you need to + handle specific flow execution outcomes in a custom manner. The + default behavior sends a redirect to the ended flow's URL to restart a + new execution of the flow. + +* Override `handleException` when you need fine-grained + control over unhandled flow exceptions. The default behavior attempts + to restart the flow when a client attempts to access an ended or + expired flow execution. Any other exception is rethrown to the Spring + MVC ExceptionResolver infrastructure by default. + +### 11.4.1. Example FlowHandler + +A common interaction pattern between Spring MVC And Web Flow is +for a Flow to redirect to a @Controller when it ends. FlowHandlers allow +this to be done without coupling the flow definition itself with a +specific controller URL. An example FlowHandler that redirects to a +Spring MVC Controller is shown below: + +``` +public class BookingFlowHandler extends AbstractFlowHandler { + public String handleExecutionOutcome(FlowExecutionOutcome outcome, + HttpServletRequest request, HttpServletResponse response) { + if (outcome.getId().equals("bookingConfirmed")) { + return "/booking/show?bookingId=" + outcome.getOutput().get("bookingId"); + } else { + return "/hotels/index"; + } + } +} + +``` + +Since this handler only needs to handle flow execution outcomes in +a custom manner, nothing else is overridden. The`bookingConfirmed` outcome will result in a redirect to show +the new booking. Any other outcome will redirect back to the hotels +index page. + +### 11.4.2. Deploying a custom FlowHandler + +To install a custom FlowHandler, simply deploy it as a bean. The +bean name must match the id of the flow the handler should apply +to. + +``` + + +``` + +With this configuration, accessing the resource`/hotels/booking` will launch the `hotels/booking`flow using the custom BookingFlowHandler. When the booking flow ends, +the FlowHandler will process the flow execution outcome and redirect to +the appropriate controller. + +### 11.4.3. FlowHandler Redirects + +A FlowHandler handling a FlowExecutionOutcome or FlowException +returns a `String` to indicate the resource to redirect to +after handling. In the previous example, the`BookingFlowHandler` redirects to the`booking/show` resource URI for `bookingConfirmed`outcomes, and the `hotels/index` resource URI for all other +outcomes. + +By default, returned resource locations are relative to the +current servlet mapping. This allows for a flow handler to redirect to +other Controllers in the application using relative paths. In addition, +explicit redirect prefixes are supported for cases where more control is +needed. + +The explicit redirect prefixes supported are: + +* `servletRelative:` - redirect to a resource + relative to the current servlet + +* `contextRelative:` - redirect to a resource + relative to the current web application context path + +* `serverRelative:` - redirect to a resource relative + to the server root + +* `http://` or `https://` - redirect to a + fully-qualified resource URI + +These same redirect prefixes are also supported within a flow +definition when using the `externalRedirect:` directive in +conjunction with a view-state or end-state; for example,`view="externalRedirect:http://springframework.org"` + +## 11.5. View Resolution + +Web Flow 2 maps selected view identifiers to files located within +the flow's working directory unless otherwise specified. For existing +Spring MVC + Web Flow applications, an external `ViewResolver`is likely already handling this mapping for you. Therefore, to continue +using that resolver and to avoid having to change how your existing flow +views are packaged, configure Web Flow as follows: + +``` + + + + + + + + + + +``` + +The MvcViewFactoryCreator is the factory that allows you to +configure how the Spring MVC view system is used inside Spring Web Flow. +Use it to configure existing ViewResolvers, as well as other services such +as a custom MessageCodesResolver. You may also enable data binding use +Spring MVC's native BeanWrapper by setting the`useSpringBinding` flag to true. This is an alternative to +using the Unified EL for view-to-model data binding. See the +JavaDoc API of this class for more information. + +## 11.6. Signaling an event from a View + +When a flow enters a view-state it pauses, redirects the user to its +execution URL, and waits for a user event to resume. Events are generally +signaled by activating buttons, links, or other user interface commands. +How events are decoded server-side is specific to the view technology in +use. This section shows how to trigger events from HTML-based views +generated by templating engines such as JSP, Velocity, or +Freemarker. + +### 11.6.1. Using a named HTML button to signal an event + +The example below shows two buttons on the same form that signal`proceed` and `cancel` events when clicked, +respectively. + +``` + + + +``` + +When a button is pressed Web Flow finds a request parameter name +beginning with `_eventId_` and treats the remaining substring +as the event id. So in this example, submitting`_eventId_proceed` becomes `proceed`. This style +should be considered when there are several different events that can be +signaled from the same form. + +### 11.6.2. Using a hidden HTML form parameter to signal an event + +The example below shows a form that signals the`proceed` event when submitted: + +``` + + + +``` + +Here, Web Flow simply detects the special `_eventId`parameter and uses its value as the event id. This style should only be +considered when there is one event that can be signaled on the +form. + +### 11.6.3. Using a HTML link to signal an event + +The example below shows a link that signals the`cancel` event when activated: + +``` +Cancel + +``` + +Firing an event results in a HTTP request being sent back to the +server. On the server-side, the flow handles decoding the event from +within its current view-state. How this decoding process works is +specific to the view implementation. Recall a Spring MVC view +implementation simply looks for a request parameter named`_eventId`. If no `_eventId` parameter is found, +the view will look for a parameter that starts with`_eventId_` and will use the remaining substring as the event +id. If neither cases exist, no flow event is triggered. + +## 11.7. Embedding A Flow On A Page + +By default when a flow enters a view state, it executes a +client-side redirect before rendering the view. This approach is known as +POST-REDIRECT-GET. It has the advantage of separating the form processing +for one view from the rendering of the next view. As a result the browser +Back and Refresh buttons work seamlessly without causing any browser +warnings. + +Normally the client-side redirect is transparent from a user's +perspective. However, there are situations where POST-REDIRECT-GET may not +bring the same benefits. For example a flow may be embedded on a page and driven via +Ajax requests refreshing only the area of the page that belongs to the flow. +Not only is it unnecessary to use client-side redirects in this case, it +is also not the desired behavior with regards to keeping the surrounding +content of the page intact. + +The [Section 12.5, “Handling Ajax Requests”](spring-js.html#spring-js-ajax) explains how to do +partial rendering during Ajax requests. The focus of this section is to +explain how to control flow execution redirect behavior during +Ajax requests. To indicate a flow should execute in "page embedded" mode all +you need to do is append an extra parameter when launching the +flow: + +``` +/hotels/booking?mode=embedded +``` + +When launched in "page embedded" mode a flow will not issue +flow execution redirects during Ajax requests. The mode=embedded parameter +only needs to be passed when launching the flow. Your only other concern is +to use Ajax requests and to render only the content required to update +the portion of the page displaying the flow. + +### 11.7.1. Embedded Mode Vs Default Redirect Behavior + +By default Web Flow does a client-side redirect upon entering every view state. +However if you remain in the same view state -- for example a transition without a "to" attribute -- during an Ajax request there will not be a client-side redirect. +This behavior should be quite familiar to Spring Web Flow 2 users. +It is appropriate for a top-level flow that supports the browser back button while still taking advantage of Ajax and partial rendering for use cases where you remain in the same view such as form validation, paging trough search results, and others. +However transitions to a new view state are always followed with a client-side redirect. +That makes it impossible to embed a flow on a page or within a modal dialog and execute more than one view state without causing a full-page refresh. +Hence if your use case requires embedding a flow you can launch it in "embedded" mode. + +### 11.7.2. Embedded Flow Examples + +If you'd like to see examples of a flow embedded on a page and within +a modal dialog please refer to the webflow-showcase project. You can check out +the source code locally, build it as you would a Maven project, and import +it into Eclipse: + +``` +cd some-directory +svn co https://src.springframework.org/svn/spring-samples/webflow-showcase +cd webflow-showcase +mvn package +# import into Eclipse +``` + +## 11.8. Saving Flow Output to MVC Flash Scope + +Flow output can be automatically saved to MVC flash scope when an `end-state`performs an internal redirect. This is particularly useful when displaying a summary +screen at the end of a flow. For backwards compatibility this feature is disabled by +default, to enable set `saveOutputToFlashScopeOnRedirect` on your`FlowHandlerAdapter` to `true`. + +``` + + + + + + +``` + +The following example will add `confirmationNumber` to the MVC flash scope +before redirecting to the `summary` screen. + +``` + + + + +``` \ No newline at end of file diff --git a/docs/en/spring-web-flow/system-setup.md b/docs/en/spring-web-flow/system-setup.md new file mode 100644 index 0000000000000000000000000000000000000000..f373d10934e3c9e12ed2e0aa5a131e6bc635c162 --- /dev/null +++ b/docs/en/spring-web-flow/system-setup.md @@ -0,0 +1,488 @@ +# 10. System Setup + +## 10.1. Introduction + +This chapter shows you how to setup the Web Flow system for use in any web environment. + +## 10.2. Java Config and XML Namespace + +Web Flow provides dedicated configuration support for both Java and +XML-based configuration. + +To get started with XML based configuration declare the webflow config XML namespace: + +``` + + + + + + +``` + +To get started with Java configuration extend`AbstractFlowConfiguration` in an`@Configuration` class: + +``` +import org.springframework.context.annotation.Configuration; +import org.springframework.webflow.config.AbstractFlowConfiguration; + +@Configuration +public class WebFlowConfig extends AbstractFlowConfiguration { + +} + +``` + +## 10.3. Basic system configuration + +The next section shows the minimal configuration required to set up the Web Flow system in your application. + +### 10.3.1. FlowRegistry + +Register your flows in a `FlowRegistry` in XML: + +``` + + + + +``` + +Register your flows in a `FlowRegistry` in Java: + +``` +@Bean +public FlowDefinitionRegistry flowRegistry() { + return getFlowDefinitionRegistryBuilder() + .addFlowLocation("/WEB-INF/flows/booking/booking.xml") + .build(); +} + +``` + +### 10.3.2. FlowExecutor + +Deploy a FlowExecutor, the central service for executing flows in XML: + +``` + + +``` + +Deploy a FlowExecutor, the central service for executing flows in Java: + +``` +@Bean +public FlowExecutor flowExecutor() { + return getFlowExecutorBuilder(flowRegistry()).build(); +} + +``` + +See the Spring MVC and Spring Faces sections of this guide on how to integrate the Web Flow system with the MVC and JSF environment, respectively. + +## 10.4. flow-registry options + +This section explores flow-registry configuration options. + +### 10.4.1. Specifying flow locations + +Use the `location` element to specify paths to flow definitions to register. +By default, flows will be assigned registry identifiers equal to their filenames minus +the file extension, unless a registry bath path is defined. + +In XML: + +``` + + +``` + +In Java: + +``` +return getFlowDefinitionRegistryBuilder() + .addFlowLocation("/WEB-INF/flows/booking/booking.xml") + .build(); + +``` + +### 10.4.2. Assigning custom flow identifiers + +Specify an id to assign a custom registry identifier to a flow in XML: + +``` + + +``` + +Specify an id to assign a custom registry identifier to a flow in Java: + +``` +return getFlowDefinitionRegistryBuilder() + .addFlowLocation("/WEB-INF/flows/booking/booking.xml", "bookHotel") + .build(); + +``` + +### 10.4.3. Assigning flow meta-attributes + +Use the `flow-definition-attributes` element to assign custom meta-attributes to a registered flow. + +In XML: + +``` + + + + + + +``` + +In Java: + +``` +Map attrs = ... ; + +return getFlowDefinitionRegistryBuilder() + .addFlowLocation("/WEB-INF/flows/booking/booking.xml", null, attrs) + .build(); + +``` + +### 10.4.4. Registering flows using a location pattern + +Use the `flow-location-patterns` element to register flows that match a specific resource location pattern: + +In XML: + +``` + + +``` + +In Java: + +``` +return getFlowDefinitionRegistryBuilder() + .addFlowLocationPattern("/WEB-INF/flows/**/*-flow.xml") + .build(); + +``` + +### 10.4.5. Flow location base path + +Use the `base-path` attribute to define a base location for all flows in the application. +All flow locations are then relative to the base path. +The base path can be a resource path such as '/WEB-INF' or a location on the classpath like 'classpath:org/springframework/webflow/samples'. + +In XML: + +``` + + + + +``` + +In Java: + +``` +return getFlowDefinitionRegistryBuilder() + .setBasePath("/WEB-INF") + .addFlowLocationPattern("/hotels/booking/booking.xml") + .build(); + +``` + +With a base path defined, the algorithm that assigns flow identifiers changes slightly. +Flows will now be assigned registry identifiers equal to the the path segment between their base path and file name. +For example, if a flow definition is located at '/WEB-INF/hotels/booking/booking-flow.xml' and the base path is '/WEB-INF' the remaining path to this flow is 'hotels/booking' which becomes the flow id. + +| ![[Tip]](images/tip.png) |Directory per flow definition| +|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|:----------------------------| +|Recall it is a best practice to package each flow definition in a unique directory.
This improves modularity, allowing dependent resources to be packaged with the flow definition.
It also prevents two flows from having the same identifiers when using the convention.| | + +If no base path is not specified or if the flow definition is directly on the base path, flow id assignment from the filename (minus the extension) is used. +For example, if a flow definition file is 'booking.xml', the flow identifier is simply 'booking'. + +Location patterns are particularly powerful when combined with a registry base path. +Instead of the flow identifiers becoming '\*-flow', they will be based on the directory path. +For example in XML: + +``` + + + + +``` + +In Java: + +``` +return getFlowDefinitionRegistryBuilder() + .setBasePath("/WEB-INF") + .addFlowLocationPattern("/**/*-flow.xml") + .build(); + +``` + +In the above example, suppose you had flows located in `/user/login`, `/user/registration`, `/hotels/booking`, and `/flights/booking` directories within `WEB-INF`, +you'd end up with flow ids of `user/login`, `user/registration`, `hotels/booking`, and `flights/booking`, respectively. + +### 10.4.6. Configuring FlowRegistry hierarchies + +Use the `parent` attribute to link two flow registries together in a hierarchy. +When the child registry is queried, if it cannot find the requested flow it will delegate to its parent. + +In XML: + +``` + + + + + + + + + + +``` + +In Java: + +``` +@Configuration +public class WebFlowConfig extends AbstractFlowConfiguration { + + @Autowired + private SharedConfig sharedConfig; + + @Bean + public FlowDefinitionRegistry flowRegistry() { + return getFlowDefinitionRegistryBuilder() + .setParent(this.sharedConfig.sharedFlowRegistry()) + .addFlowLocation("/WEB-INF/flows/booking/booking.xml") + .build(); + } +} + +@Configuration +public class SharedConfig extends AbstractFlowConfiguration { + + @Bean + public FlowDefinitionRegistry sharedFlowRegistry() { + return getFlowDefinitionRegistryBuilder() + .addFlowLocation("/WEB-INF/flows/shared.xml") + .build(); + } +} + +``` + +### 10.4.7. Configuring custom FlowBuilder services + +Use the `flow-builder-services` attribute to customize the services and settings used to build flows in a flow-registry. +If no flow-builder-services tag is specified, the default service implementations are used. +When the tag is defined, you only need to reference the services you want to customize. + +In XML: + +``` + + + + + + +``` + +In Java: + +``` +@Bean +public FlowDefinitionRegistry flowRegistry() { + return getFlowDefinitionRegistryBuilder(flowBuilderServices()) + .addFlowLocation("/WEB-INF/flows/booking/booking.xml") + .build(); +} + +@Bean +public FlowBuilderServices flowBuilderServices() { + return getFlowBuilderServicesBuilder().build(); +} + +``` + +The configurable services are the `conversion-service`, `expression-parser`, and `view-factory-creator`. +These services are configured by referencing custom beans you define. + +For example in XML: + +``` + + + + + + +``` + +In Java: + +``` +@Bean +public FlowBuilderServices flowBuilderServices() { + return getFlowBuilderServicesBuilder() + .setConversionService(conversionService()) + .setExpressionParser(expressionParser) + .setViewFactoryCreator(mvcViewFactoryCreator()) + .build(); +} + +@Bean +public ConversionService conversionService() { + // ... +} + +@Bean +public ExpressionParser expressionParser() { + // ... +} + +@Bean +public ViewFactoryCreator viewFactoryCreator() { + // ... +} + +``` + +#### conversion-service + +Use the `conversion-service` attribute to customize the `ConversionService` used by the Web Flow system. +Type conversion is used to convert from one type to another when required during flow execution such as when processing request parameters, invoking actions, and so on. +Many common object types such as numbers, classes, and enums are supported. +However you'll probably need to provide your own type conversion and formatting logic for custom data types. +Please read [Section 5.7, “Performing type conversion”](views.html#view-type-conversion) for important information on how to provide custom type conversion logic. + +#### expression-parser + +Use the `expression-parser` attribute to customize the `ExpressionParser` used by the Web Flow system. +The default ExpressionParser uses the Unified EL if available on the classpath, otherwise Spring EL is used. + +#### view-factory-creator + +Use the `view-factory-creator` attribute to customize the `ViewFactoryCreator` used by the Web Flow system. +The default ViewFactoryCreator produces Spring MVC ViewFactories capable of rendering JSP, Velocity, and Freemarker views. + +The configurable settings are `development`. +These settings are global configuration attributes that can be applied during the flow construction process. + +#### development + +Set this to `true` to switch on flow *development mode*. +Development mode switches on hot-reloading of flow definition changes, including changes to dependent flow resources such as message bundles. + +## 10.5. flow-executor options + +This section explores flow-executor configuration options. + +### 10.5.1. Attaching flow execution listeners + +Use the `flow-execution-listeners` element to register listeners that observe the lifecycle +of flow executions. For example in XML: + +``` + + + + + +``` + +In Java: + +``` +@Bean +public FlowExecutor flowExecutor() { + return getFlowExecutorBuilder(flowRegistry()) + .addFlowExecutionListener(securityListener()) + .addFlowExecutionListener(persistenceListener()) + .build(); +} + +``` + +You may also configure a listener to observe only certain flows. For example in XML: + +``` + + +``` + +In Java: + +``` +@Bean +public FlowExecutor flowExecutor() { + return getFlowExecutorBuilder(flowRegistry()) + .addFlowExecutionListener(securityListener(), "securedFlow1,securedFlow2") + .build(); +} + +``` + +### 10.5.2. Tuning FlowExecution persistence + +Use the `flow-execution-repository` element to tune flow execution persistence settings. +For example in XML: + +``` + + + + +``` + +In Java: + +``` +@Bean +public FlowExecutor flowExecutor() { + return getFlowExecutorBuilder(flowRegistry()) + .setMaxFlowExecutions(5) + .setMaxFlowExecutionSnapshots(30) + .build(); +} + +``` + +#### max-executions + +Tune the `max-executions` attribute to place a cap on the number of flow executions that can be created per user session. +When the maximum number of executions is exceeded, the oldest execution is removed. + +| ![[Note]](images/note.png) |Note| +|:--------------------------------------------------------------------------------------------------------:|:---| +|The `max-executions` attribute is per user session, i.e. it works across instances of any flow definition.| | + +#### max-execution-snapshots + +Tune the `max-execution-snapshots` attribute to place a cap on the number of history snapshots that can be taken per flow execution. +To disable snapshotting, set this value to 0. To enable an unlimited number of snapshots, set this value to -1. + +| ![[Note]](images/note.png) |Note| +|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|:---| +|History snapshots enable browser back button support.
When snapshotting is disabled pressing the browser back button will not work.
It will result in using an execution key that points to a snapshot that has not be recorded.| | \ No newline at end of file diff --git a/docs/en/spring-web-flow/testing.md b/docs/en/spring-web-flow/testing.md new file mode 100644 index 0000000000000000000000000000000000000000..1794502527cc5a295600f56ba27acb5b5c869464 --- /dev/null +++ b/docs/en/spring-web-flow/testing.md @@ -0,0 +1,141 @@ +# 14. Testing flows + +## 14.1. Introduction + +This chapter shows you how to test flows. + +## 14.2. Extending AbstractXmlFlowExecutionTests + +To test the execution of a XML-based flow definition, extend `AbstractXmlFlowExecutionTests`: + +``` +public class BookingFlowExecutionTests extends AbstractXmlFlowExecutionTests { + +} + +``` + +## 14.3. Specifying the path to the flow to test + +At a minimum, you must override `getResource(FlowDefinitionResourceFactory)` to return the path to the flow you wish to test: + +``` +@Override +protected FlowDefinitionResource getResource(FlowDefinitionResourceFactory resourceFactory) { + return resourceFactory.createFileResource("src/main/webapp/WEB-INF/hotels/booking/booking.xml"); +} + +``` + +## 14.4. Registering flow dependencies + +If your flow has dependencies on externally managed services, +also override `configureFlowBuilderContext(MockFlowBuilderContext)` to register stubs or mocks of those services: + +``` +@Override +protected void configureFlowBuilderContext(MockFlowBuilderContext builderContext) { + builderContext.registerBean("bookingService", new StubBookingService()); +} + +``` + +If your flow extends from another flow, or has states that extend other states, +also override `getModelResources(FlowDefinitionResourceFactory)` to return the path to the parent flows. + +``` +@Override +protected FlowDefinitionResource[] getModelResources(FlowDefinitionResourceFactory resourceFactory) { +return new FlowDefinitionResource[] { + resourceFactory.createFileResource("src/main/webapp/WEB-INF/common/common.xml") +}; +} + +``` + +## 14.5. Testing flow startup + +Have your first test exercise the startup of your flow: + +``` +public void testStartBookingFlow() { + + Booking booking = createTestBooking(); + + MutableAttributeMap input = new LocalAttributeMap(); + input.put("hotelId", "1"); + MockExternalContext context = new MockExternalContext(); + context.setCurrentUser("keith"); + startFlow(input, context); + + assertCurrentStateEquals("enterBookingDetails"); + assertTrue(getRequiredFlowAttribute("booking") instanceof Booking); +} + +``` + +Assertions generally verify the flow is in the correct state you expect. + +## 14.6. Testing flow event handling + +Define additional tests to exercise flow event handling behavior. +You goal should be to exercise all paths through the flow. +You can use the convenient `setCurrentState(String)` method to jump to the flow state where you wish to begin your test. + +``` +public void testEnterBookingDetails_Proceed() { + + setCurrentState("enterBookingDetails"); + + getFlowScope().put("booking", createTestBooking()); + + MockExternalContext context = new MockExternalContext(); + context.setEventId("proceed"); + resumeFlow(context); + + assertCurrentStateEquals("reviewBooking"); +} + +``` + +## 14.7. Mocking a subflow + +To test calling a subflow, register a mock implementation of the subflow that asserts input was passed in correctly and +returns the correct outcome for your test scenario. + +``` +public void testBookHotel() { + + setCurrentState("reviewHotel"); + + Hotel hotel = new Hotel(); + hotel.setId(1L); + hotel.setName("Jameson Inn"); + getFlowScope().put("hotel", hotel); + + getFlowDefinitionRegistry().registerFlowDefinition(createMockBookingSubflow()); + + MockExternalContext context = new MockExternalContext(); + context.setEventId("book"); + resumeFlow(context); + + // verify flow ends on 'bookingConfirmed' + assertFlowExecutionEnded(); + assertFlowExecutionOutcomeEquals("finish"); +} + +public Flow createMockBookingSubflow() { + Flow mockBookingFlow = new Flow("booking"); + mockBookingFlow.setInputMapper(new Mapper() { + public MappingResults map(Object source, Object target) { + // assert that 1L was passed in as input + assertEquals(1L, ((AttributeMap) source).get("hotelId")); + return null; + } + }); + // immediately return the bookingConfirmed outcome so the caller can respond + new EndState(mockBookingFlow, "bookingConfirmed"); + return mockBookingFlow; +} + +``` \ No newline at end of file diff --git a/docs/en/spring-web-flow/views.md b/docs/en/spring-web-flow/views.md new file mode 100644 index 0000000000000000000000000000000000000000..5c7e44ad3ee868502e80cab6ba219e91ee5e79ac --- /dev/null +++ b/docs/en/spring-web-flow/views.md @@ -0,0 +1,845 @@ +# 5. Rendering views + +## 5.1. Introduction + +This chapter shows you how to use the `view-state` element to render views within a flow. + +## 5.2. Defining view states + +Use the `view-state` element to define a step of the flow that renders a view and waits for a user event to resume: + +``` + + + + +``` + +By convention, a view-state maps its id to a view template in the directory where the flow is located. +For example, the state above might render `/WEB-INF/hotels/booking/enterBookingDetails.xhtml`if the flow itself was located in the `/WEB-INF/hotels/booking` directory. + +Below is a sample directory structure showing views and other resources like message bundles co-located with their flow definition: + + + +Flow Packaging + +## 5.3. Specifying view identifiers + +Use the `view` attribute to specify the id of the view to render explicitly. + +### 5.3.1. Flow relative view ids + +The view id may be a relative path to view resource in the flow's working directory: + +``` + + +``` + +### 5.3.2. Absolute view ids + +The view id may be a absolute path to a view resource in the webapp root directory: + +``` + + +``` + +### 5.3.3. Logical view ids + +With some view frameworks, such as Spring MVC's view framework, the view id may also be a logical identifier resolved by the framework: + +``` + + +``` + +See the Spring MVC integration section for more information on how to integrate with the MVC `ViewResolver` infrastructure. + +## 5.4. View scope + +A view-state allocates a new `viewScope` when it enters. +This scope may be referenced within the view-state to assign variables that should live for the duration of the state. +This scope is useful for manipulating objects over a series of requests from the same view, often Ajax requests. +A view-state destroys its viewScope when it exits. + +### 5.4.1. Allocating view variables + +Use the `var` tag to declare a view variable. +Like a flow variable, any `@Autowired` references are automatically restored when the view state resumes. + +``` + + +``` + +### 5.4.2. Assigning a viewScope variable + +Use the `on-render` tag to assign a variable from an action result before the view renders: + +``` + + + + +``` + +### 5.4.3. Manipulating objects in view scope + +Objects in view scope are often manipulated over a series of requests from the same view. +The following example pages through a search results list. +The list is updated in view scope before each render. +Asynchronous event handlers modify the current data page, then request re-rendering of the search results fragment. + +``` + + + + + + + + + + + + + + +``` + +## 5.5. Executing render actions + +Use the `on-render` element to execute one or more actions before view rendering. +Render actions are executed on the initial render as well as any subsequent refreshes, including any partial re-renderings of the view. + +``` + + + + +``` + +## 5.6. Binding to a model + +Use the `model` attribute to declare a model object the view binds to. +This attribute is typically used in conjunction with views that render data controls, such as forms. +It enables form data binding and validation behaviors to be driven from metadata on your model object. + +The following example declares an `enterBookingDetails` state manipulates the `booking` model: + +``` + + +``` + +The model may be an object in any accessible scope, such as `flowScope` or `viewScope`. +Specifying a `model` triggers the following behavior when a view event occurs: + +1. View-to-model binding. On view postback, user input values are bound to model object properties for you. + +2. Model validation. After binding, if the model object requires validation that validation logic will be invoked. + +For a flow event to be generated that can drive a view state transition, model binding must complete successfully. +If model binding fails, the view is re-rendered to allow the user to revise their edits. + +## 5.7. Performing type conversion + +When request parameters are used to populate the model (commonly referred to as data binding), type conversion is required to parse String-based request parameter values before setting target model properties. +Default type conversion is available for many common Java types such as numbers, primitives, enums, and Dates. +Users also have the ability to register their own type conversion logic for user-defined types, and to override the default Converters. + +### 5.7.1. Type Conversion Options + +Starting with version 2.1 Spring Web Flow uses the [type conversion](http://static.springsource.org/spring/docs/3.0.x/spring-framework-reference/html/validation.html#core-convert) and [formatting](http://static.springsource.org/spring/docs/3.0.x/spring-framework-reference/html/validation.html#format) system introduced in Spring 3 for nearly all type conversion needs. +Previously Web Flow applications used a type conversion mechanism that was different from the one in Spring MVC, which relied on the `java.beans.PropertyEditor` abstraction. +Spring 3 offers a modern type conversion alternative to PropertyEditors that was actually influenced by Web Flow's own type conversion system. +Hence Web Flow users should find it natural to work with the new Spring 3 type conversion. +Another obvious and very important benefit of this change is that a single type conversion mechanism can now be used across Spring MVC And Spring Web Flow. + +### 5.7.2. Upgrading to Spring 3 Type Conversion And Formatting + +What does this practically mean for existing applications? +Existing applications are likely registering their own converters of type `org.springframework.binding.convert.converters.Converter` through a sub-class of `DefaultConversionService` available in Spring Binding. +Those converters can continue to be registered as before. +They will be adapted as Spring 3 `GenericConverter` types and registered with a Spring 3 `org.springframework.core.convert.ConversionService` instance. +In other words existing converters will be invoked through Spring's type conversion service. + +The only exception to this rule are named converters, which can be referenced from a `binding` element in a `view-state`: + +``` +public class ApplicationConversionService extends DefaultConversionService { + public ApplicationConversionService() { + addDefaultConverters(); + addDefaultAliases(); + addConverter("customConverter", new CustomConverter()); + } +} + +``` + +``` + + + + + + +``` + +Named converters are not supported and cannot be used with the type conversion service available in Spring 3. +Therefore such converters will not be adapted and will continue to work as before, i.e. will not involve the Spring 3 type conversion. +However, this mechanism is deprecated and applications are encouraged to favor Spring 3 type conversion and formatting features. + +Also note that the existing Spring Binding `DefaultConversionService` no longer registers any default converters. +Instead Web Flow now relies on the default type converters and formatters in Spring 3. + +In summary the Spring 3 type conversion and formatting is now used almost exclusively in Web Flow. +Although existing applications will work without any changes, we encourage moving towards unifying the type conversion needs of Spring MVC and Spring Web Flow parts of applications. + +### 5.7.3. Configuring Type Conversion and Formatting + +In Spring MVC an instance of a `FormattingConversionService` is created automatically through the custom MVC namespace: + +``` + + + + + + +``` + +Internally that is done with the help of `FormattingConversionServiceFactoryBean`, which registers a default set of converters and formatters. +You can customize the conversion service instance used in Spring MVC through the `conversion-service` attribute: + +``` + + +``` + +In Web Flow an instance of a Spring Binding `DefaultConversionService` is created automatically, which does not register any converters. +Instead it delegates to a `FormattingConversionService` instance for all type conversion needs. +By default this is not the same `FormattingConversionService` instance as the one used in Spring 3. +However that won't make a practical difference until you start registering your own formatters. + +The `DefaultConversionService` used in Web Flow can be customized through the flow-builder-services element: + +``` + + +``` + +Connecting the dots in order to register your own formatters for use in both Spring MVC and in Spring Web Flow you can do the following. +Create a class to register your custom formatters: + +``` +public class ApplicationConversionServiceFactoryBean extends FormattingConversionServiceFactoryBean { + + @Override + protected void installFormatters(FormatterRegistry registry) { + // ... + } + +} + + +``` + +Configure it for use in Spring MVC: + +``` + + + + + + + + + + +``` + +Connection the Web Flow `DefaultConversionService` to the same "applicationConversionService" bean used in Spring MVC: + +``` + + + + + + + + +``` + +Of course it is also possible to mix and match. +Register new Spring 3 `Formatter` types through the "applicationConversionService". +Register existing Spring Binding `Converter` types through the "defaultConversionService". + +### 5.7.4. Working With Spring 3 Type Conversion And Formatting + +An important concept to understand is the difference between type converters and formatters. + +Type converters in Spring 3, provided in `org.springframework.core`, are for general-purpose type conversion between any two object types. +In addition to the most simple `Converter` type, two other interfaces are `ConverterFactory` and `GenericConverter`. + +Formatters in Spring 3, provided in `org.springframework.context`, have the more specialized purpose of representing Objects as Strings. +The `Formatter` interface extends the `Printer` and `Parser` interfaces for converting an Object to a String and turning a String into an Object. + +Web developers will find the `Formatter` interface most relevant because it fits the needs of web applications for type conversion. + +| ![[Note]](images/note.png) |Note| +|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|:---| +|An important point to be made is that Object-to-Object conversion is a generalization of the more specific Object-to-String conversion.
In fact in the end `Formatters` are reigstered as `GenericConverter` types with Spring's `GenericConversionService` making them equal to any other converter.| | + +### 5.7.5. Formatting Annotations + +One of the best features of the new type conversion is the ability to use annotations for a better control over formatting in a concise manner. +Annotations can be placed on model attributes and on arguments of @Controller methods that are mapped to requests. +Out of the box Spring provides two annotations `NumberFormat` and `DateTimeFormat` but you can create your own and have them registered along with the associated formatting logic. +You can see examples of the `DateTimeFormat` annotation in the [Spring Travel](https://src.springframework.org/svn/spring-samples/travel) and in the [Petcare](https://src.springframework.org/svn/spring-samples/petcare) along with other samples in the [Spring Samples](https://src.springframework.org/svn/spring-samples) repository. + +### 5.7.6. Working With Dates + +The `DateTimeFormat` annotation implies use of [Joda Time](http://joda-time.sourceforge.net/). +If that is present on the classpath the use of this annotation is enabled automatically. +By default neither Spring MVC nor Web Flow register any other date formatters or converters. +Therefore it is important for applications to register a custom formatter to specify the default way for printing and parsing dates. +The `DateTimeFormat` annotation on the other hand provides more fine-grained control where it is necessary to deviate from the default. + +For more information on working with Spring 3 type conversion and formatting please refer to the relevant sections of the [Spring documentation](http://static.springsource.org/spring/docs/3.0.x/spring-framework-reference/html/index.html). + +## 5.8. Suppressing binding + +Use the `bind` attribute to suppress model binding and validation for particular view events. +The following example suppresses binding when the `cancel` event occurs: + +``` + + + + + +``` + +## 5.9. Specifying bindings explicitly + +Use the `binder` element to configure the exact set of model properties to +apply data binding to. This is useful to restrict the set of "allowed fields" per view. +Not using this could lead to a security issue, depending on the application domain and actual users, +since by default if the binder element is not specified all public properties of the model are +eligible for data binding by the view. By contrast when the `binder` element is specified, +only the explicitly configured bindings are allowed. Below is an example: + +``` + + + + + + + + + + + + +``` + +Each binding may also apply a converter to format the model property value for display in a custom manner. +If no converter is specified, the default converter for the model property's type will be used. + +``` + + + + + + + + + + + + + + +``` + +In the example above, the `shortDate` converter is bound to the`checkinDate` and `checkoutDate` properties. +Custom converters may be registered with the application's ConversionService. + +Each binding may also apply a required check that will generate a validation error +if the user provided value is null on form postback: + +``` + + + + + + + + + + + + + +``` + +In the example above, all of the bindings are required. +If one or more blank input values are bound, validation errors will be generated and the view will re-render with those errors. + +## 5.10. Validating a model + +Model validation is driven by constraints specified against a model object. +Web Flow supports enforcing such constraints programatically as well as +declaratively with JSR-303 Bean Validation annotations. + +### 5.10.1. JSR-303 Bean Validation + +Web Flow provides built-in support for the JSR-303 Bean Validation API +building on equivalent support available in Spring MVC. +To enable JSR-303 validation configure the flow-builder-services with +Spring MVC's `LocalValidatorFactoryBean`: + +``` + + + + + + +``` + +With the above in place, the configured validator will be applied to +all model attributes after data binding. + +Note that JSR-303 bean validation and validation by convention +(explained in the next section) are not mutually exclusive. +In other words Web Flow will apply all available validation +mechanisms. + +#### Partial Validation + +JSR-303 Bean Validation supports partial validation through validation groups. For example: + +``` +@NotNull +@Size(min = 2, max = 30, groups = State1.class) +private String name; + +``` + +In a flow definition you can specify validation hints on a view state +or on a transition and those will be resolved to validation groups. +For example: + +``` + + +``` + +The *validation-hints* attribute is an expression +that in the above example resolves to a comma-delimited String consisting +of the hints "group1" and "group2". A `ValidationHintResolver`is used to resolve these hints. The `BeanValidationHintResolver`used by default tries to resolve these strings to Class-based bean validation +groups. To do that it looks for matching inner types in the model or its parent. + +For example given `org.example.MyModel` with inner types`Group1` and `Group2` it is +sufficient to supply the simple type names, i.e. "group1" and "group2". +You can also provide fully qualified type names. + +A hint with the value "default" has a special meaning and is translated +to the default validation group in Bean Validation`javax.validation.groups.Default`. + +A custom `ValidationHintResolver`can be configured if necessary through the validationHintResolver property +of the flow-builder-services element: + +``` + + + + +``` + +### 5.10.2. Programmatic validation + +There are two ways to perform model validation programatically. +The first is to implement validation logic in your model object. +The second is to implement an external `Validator`. +Both ways provide you with a `ValidationContext` to record error messages and access information about the current user. + +#### Implementing a model validate method + +Defining validation logic in your model object is the simplest way to validate its state. +Once such logic is structured according to Web Flow conventions, Web Flow will automatically invoke that logic during the view-state postback lifecycle. +Web Flow conventions have you structure model validation logic by view-state, allowing you to easily validate the subset of model properties that are editable on that view. +To do this, simply create a public method with the name `validate${state}`, where `${state}` is the id of your view-state where you want validation to run. +For example: + +``` +public class Booking { + private Date checkinDate; + private Date checkoutDate; + ... + + public void validateEnterBookingDetails(ValidationContext context) { + MessageContext messages = context.getMessageContext(); + if (checkinDate.before(today())) { + messages.addMessage(new MessageBuilder().error().source("checkinDate"). + defaultText("Check in date must be a future date").build()); + } else if (!checkinDate.before(checkoutDate)) { + messages.addMessage(new MessageBuilder().error().source("checkoutDate"). + defaultText("Check out date must be later than check in date").build()); + } + } +} + + +``` + +In the example above, when a transition is triggered in a `enterBookingDetails` view-state that is editing a `Booking` model, +Web Flow will invoke the `validateEnterBookingDetails(ValidationContext)` method automatically unless validation has been suppressed for that transition. +An example of such a view-state is shown below: + +``` + + + + +``` + +Any number of validation methods are defined. Generally, a flow edits a model over a series of views. In that case, a validate method would be defined +for each view-state where validation needs to run. + +#### Implementing a Validator + +The second way is to define a separate object, called a *Validator*, which validates your model object. +To do this, first create a class whose name has the pattern ${model}Validator, where `${model}` is the capitialized form of the model expression, such as `booking`. +Then define a public method with the name `validate${state}`, where `${state}` is the id of your view-state, such as `enterBookingDetails`. +The class should then be deployed as a Spring bean. Any number of validation methods can be defined. +For example: + +``` +@Component +public class BookingValidator { + public void validateEnterBookingDetails(Booking booking, ValidationContext context) { + MessageContext messages = context.getMessageContext(); + if (booking.getCheckinDate().before(today())) { + messages.addMessage(new MessageBuilder().error().source("checkinDate"). + defaultText("Check in date must be a future date").build()); + } else if (!booking.getCheckinDate().before(booking.getCheckoutDate())) { + messages.addMessage(new MessageBuilder().error().source("checkoutDate"). + defaultText("Check out date must be later than check in date").build()); + } + } +} + +``` + +In the example above, when a transition is triggered in a `enterBookingDetails` view-state that is editing a `Booking` model, +Web Flow will invoke the `validateEnterBookingDetails(Booking, ValidationContext)` method automatically unless validation has been suppressed for that transition. + +A Validator can also accept a Spring MVC `Errors` object, which is required for invoking existing Spring Validators. + +Validators must be registered as Spring beans employing the naming convention `${model}Validator` to be detected and invoked automatically. +In the example above, Spring 2.5 classpath-scanning would detect the `@Component` and automatically register it as a bean with the name `bookingValidator`. +Then, anytime the `booking` model needs to be validated, this `bookingValidator` instance would be invoked for you. + +#### Default validate method + +A *Validator* class can also define a method called `validate` not associated (by convention) with any specific view-state. + +``` +@Component +public class BookingValidator { + public void validate(Booking booking, ValidationContext context) { + //... + } +} + +``` + +In the above code sample the method `validate` will be called every time a Model of type `Booking` is validated (unless validation has been suppressed for that transition). +If needed the default method can also be called in addition to an existing state-specific method. Consider the following example: + +``` +@Component +public class BookingValidator { + public void validate(Booking booking, ValidationContext context) { + //... + } + public void validateEnterBookingDetails(Booking booking, ValidationContext context) { + //... + } +} + +``` + +In above code sample the method `validateEnterBookingDetails` will be called first. +The default `validate` method will be called next. + +### 5.10.3. ValidationContext + +A ValidationContext allows you to obtain a `MessageContext` to record messages during validation. +It also exposes information about the current user, such as the signaled `userEvent` and the current user's `Principal` identity. +This information can be used to customize validation logic based on what button or link was activated in the UI, or who is authenticated. +See the API Javadocs for `ValidationContext` for more information. + +## 5.11. Suppressing validation + +Use the `validate` attribute to suppress model validation for particular view events: + +``` + + + + + +``` + +In this example, data binding will still occur on `back` but validation will be suppressed. + +## 5.12. Executing view transitions + +Define one or more `transition` elements to handle user events that may occur on the view. +A transition may take the user to another view, or it may simply execute an action and re-render the current view. +A transition may also request the rendering of parts of a view called "fragments" when handling an Ajax event. +Finally, "global" transitions that are shared across all views may also be defined. + +Implementing view transitions is illustrated in the following sections. + +### 5.12.1. Transition actions + +A view-state transition can execute one or more actions before executing. +These actions may return an error result to prevent the transition from exiting the +current view-state. If an error result occurs, the view will re-render and should display +an appropriate message to the user. + +If the transition action invokes a plain Java method, the invoked method may return a boolean +whose value, true or false, indicates whether the transition should take place or be prevented +from executing. A method may also return a String where the literal values "success", "yes", or +"true" indicate the transition should occur, and any other value means the opposite. +This technique can be used to handle exceptions thrown by service-layer methods. +The example below invokes an action that calls a service and handles an exceptional situation: + +``` + + + + +``` + +``` +public class BookingAction { + public boolean makeBooking(Booking booking, MessageContext context) { + try { + bookingService.make(booking); + return true; + } catch (RoomNotAvailableException e) { + context.addMessage(new MessageBuilder().error(). + .defaultText("No room is available at this hotel").build()); + return false; + } + } +} + +``` + +When there is more than one action defined on a transition, if one returns an error result the +remaining actions in the set will *not* be executed. If you need to ensure one +transition action's result cannot impact the execution of another, define a single transition +action that invokes a method that encapsulates all the action logic. + +### 5.12.2. Global transitions + +Use the flow's `global-transitions` element to create transitions that apply across all views. +Global-transitions are often used to handle global menu links that are part of the layout. + +``` + + + + + +``` + +### 5.12.3. Event handlers + +From a view-state, transitions without targets can also be defined. Such transitions are called "event handlers": + +``` + + + + +``` + +These event handlers do not change the state of the flow. +They simply execute their actions and re-render the current view or one or more fragments of the current view. + +### 5.12.4. Rendering fragments + +Use the `render` element within a transition to request partial re-rendering of the current view after handling the event: + +``` + + + + + +``` + +The fragments attribute should reference the id(s) of the view element(s) you wish to re-render. +Specify multiple elements to re-render by separating them with a comma delimiter. + +Such partial rendering is often used with events signaled by Ajax to update a specific zone of the view. + +## 5.13. Working with messages + +Spring Web Flow's `MessageContext` is an API for recording messages during the course of flow executions. +Plain text messages can be added to the context, as well as internationalized messages resolved by a Spring `MessageSource`. +Messages are renderable by views and automatically survive flow execution redirects. +Three distinct message severities are provided: `info`, `warning`, and `error`. +In addition, a convenient `MessageBuilder` exists for fluently constructing messages. + +### 5.13.1. Adding plain text messages + +``` +MessageContext context = ... +MessageBuilder builder = new MessageBuilder(); +context.addMessage(builder.error().source("checkinDate") + .defaultText("Check in date must be a future date").build()); +context.addMessage(builder.warn().source("smoking") + .defaultText("Smoking is bad for your health").build()); +context.addMessage(builder.info() + .defaultText("We have processed your reservation - thank you and enjoy your stay").build()); + +``` + +### 5.13.2. Adding internationalized messages + +``` +MessageContext context = ... +MessageBuilder builder = new MessageBuilder(); +context.addMessage(builder.error().source("checkinDate").code("checkinDate.notFuture").build()); +context.addMessage(builder.warn().source("smoking").code("notHealthy") + .resolvableArg("smoking").build()); +context.addMessage(builder.info().code("reservationConfirmation").build()); + +``` + +### 5.13.3. Using message bundles + +Internationalized messages are defined in message bundles accessed by a Spring `MessageSource`. +To create a flow-specific message bundle, simply define `messages.properties` file(s) in your flow's directory. +Create a default `messages.properties` file and a .properties file for each additional `Locale` you need to support. + +``` +#messages.properties +checkinDate=Check in date must be a future date +notHealthy={0} is bad for your health +reservationConfirmation=We have processed your reservation - thank you and enjoy your stay + +``` + +From within a view or a flow, you may also access message resources using the `resourceBundle` EL variable: + +``` + + +``` + +### 5.13.4. Understanding system generated messages + +There are several places where Web Flow itself will generate messages to display to the user. +One important place this occurs is during view-to-model data binding. +When a binding error occurs, such as a type conversion error, Web Flow will map that error to a message retrieved from your resource bundle automatically. +To lookup the message to display, Web Flow tries resource keys that contain the binding error code and target property name. + +As an example, consider a binding to a `checkinDate` property of a `Booking` object. +Suppose the user typed in a alphabetic string. +In this case, a type conversion error will be raised. +Web Flow will map the 'typeMismatch' error code to a message by first querying your resource bundle for a message with the following key: + +``` +booking.checkinDate.typeMismatch + +``` + +The first part of the key is the model class's short name. +The second part of the key is the property name. The third part is the error code. +This allows for the lookup of a unique message to display to the user when a binding fails on a model property. +Such a message might say: + +``` +booking.checkinDate.typeMismatch=The check in date must be in the format yyyy-mm-dd. + +``` + +If no such resource key can be found of that form, a more generic key will be tried. +This key is simply the error code. The field name of the property is provided as a message argument. + +``` +typeMismatch=The {0} field is of the wrong type. + +``` + +## 5.14. Displaying popups + +Use the `popup` attribute to render a view in a modal popup dialog: + +``` + + +``` + +When using Web Flow with the Spring Javascript, no client side code is necessary for the popup to display. +Web Flow will send a response to the client requesting a redirect to the view from a popup, and the client will honor the request. + +## 5.15. View backtracking + +By default, when you exit a view state and transition to a new view state, you can go back to the previous state using the browser back button. +These view state history policies are configurable on a per-transition basis by using the `history` attribute. + +### 5.15.1. Discarding history + +Set the history attribute to `discard` to prevent backtracking to a view: + +``` + + +``` + +### 5.15.2. Invalidating history + +Set the history attribute to `invalidate` to prevent backtracking to a view as well all previously displayed views: + +``` + + +``` \ No newline at end of file diff --git a/docs/en/spring-web-flow/whatsnew.md b/docs/en/spring-web-flow/whatsnew.md new file mode 100644 index 0000000000000000000000000000000000000000..a1580803c9e46d80088240778217f43da0c3c61d --- /dev/null +++ b/docs/en/spring-web-flow/whatsnew.md @@ -0,0 +1,226 @@ +# 2. What's New + +## 2.1. Spring Web Flow 2.5 + +This release provides an upgrade path to Spring Framework 5 that in turn requires +Java 8+, Servlet 3.1, Hibernate 5, Tiles 3. See the[Spring Framework wiki](https://github.com/spring-projects/spring-framework/wiki/What%27s-New-in-Spring-Framework-5.x)for more details. The [samples repository](https://github.com/spring-projects/spring-webflow-samples)has been upgraded to Spring Web Flow 2.5. + +As of 2.5 there is no longer a *spring-js* module. The classes from that module +have been kept but moved to new packages in the *spring-webflow* module. +The *spring-js-resources* module is available as an optional module that +must be included explicitly. + +This release requires JSF 2.2 or higher. + +## 2.2. Spring Web Flow 2.4 + +This release requires JDK 1.6. + +### 2.2.1. Java-based Configuration + +Web Flow now supports a Java-based alternative for its system configuration. +See the updated [Chapter 10, *System Setup*](system-setup.html). + +Also see the[booking-mvc](https://github.com/spring-projects/spring-webflow-samples/tree/master/booking-mvc) and[booking-faces](https://github.com/spring-projects/spring-webflow-samples/tree/master/booking-faces)samples that have been updated to use all Java config. + +### 2.2.2. Spring MVC Flash Scope Integration + +When a flow ends it can now redirect to a Spring MVC controller after saving +attributes in Spring MVC's flash scope for the controller to access. + +See [Section 11.8, “Saving Flow Output to MVC Flash Scope”](spring-mvc.html#spring-mvc-flash-output). + +### 2.2.3. Partial JSR-303 Bean Validation + +A flow definition can apply partial validation on the model through the validation-hints +attribute supported on view state and transition elements. + +See [the section called “Partial Validation”](views.html#view-validation-jsr303-partial). + +### 2.2.4. Hibernate Support + +The `HibernateFlowExecutionListener` now supports Hibernate 4 in addition to Hibernate 3. + +As of 2.4.4 the `HibernateFlowExecutionListener` also works with Hibernate 5. + +### 2.2.5. Tiles 3 Support + +The `AjaxTilesView` now supports Tiles 3 in addition to Tiles 2.2. + +### 2.2.6. Minimum JSF 2.0 Requirement + +Java ServerFaces version 1.2 and earlier are no longer supported by Spring Web Flow, if you have not done so already you will need to upgrade to JSF 2.0 or above. +In addition the Spring Faces components that were previously provided with JSF 1.2 for progressive AJAX enhancements have been removed in this release. + +See [???](). + +### 2.2.7. Portlet API 2.0 and JSF 2.0 support + +The internal Portlet integration introduced in Spring Web Flow 2.2 has been upgraded for JSF 2.0 compatibility. +Some of the more advanced JSF 2.0 features, such as partial state saving, are not supported in a Portlet environment, however, existing application can now upgrade to the minimum required JSF version. +Upgraded projects will need to ensure that the `` elements is +included as part of their Spring configuration. + +### 2.2.8. Deprecations + +This release deprecates *Spring.js*. The deprecation includes the entire*spring-js-resources* module including *Spring.js* and*Spring-Dojo.js* and the bundled Dojo and CSS Framework. +Also deprecated is the `SpringJavascriptAjaxHandler`from the *spring-js* module. The rest of *spring-js*, +e.g. `AjaxHandler`, `AjaxTilesView`, will be +folded into *spring-webflow* in a future release. + +OGNL support is now deprecated. + +## 2.3. Spring Web Flow 2.3 + +### 2.3.1. Embedding A Flow On A Page + +By default Web Flow does a client-side redirect upon entering every view state. +That makes it impossible to embed a flow on a page or within a modal dialog and execute more than one view state without causing a full-page refresh. +Web Flow now supports launching a flow in "embedded" mode. +In this mode a flow can transition to other view states without a client-side redirect during Ajax requests. +See [Section 11.7, “Embedding A Flow On A Page”](spring-mvc.html#spring-mvc-embedded-flow) and [Section 13.6, “Embedding a Flow On a Page”](spring-faces.html#spring-faces-embedded-mode). + +### 2.3.2. Support For JSR-303 Bean Validation + +Support for the JSR-303 Bean Validation API is now available building on equivalent support available in Spring MVC. +See [Section 5.10, “Validating a model”](views.html#view-validate) for more details. + +### 2.3.3. Flow-Managed Persistence Context Propagation + +Starting with Web Flow 2.3 a flow managed `PersistenceContext` is automatically extended (propagated) to sub-flows assuming the subflow also has the feature enabled as well. +See [Section 7.3, “Flow Managed Persistence And Sub-Flows”](flow-managed-persistence.html#flow-managed-persistence-propagation). + +### 2.3.4. Portlet 2.0 Resource Requests + +Support for Portlet 2.0 resource requests has now been added enabling Ajax requests with partial rendering. +URLs for such requests can be prepared with the `` tag in JSP pages. +Server-side processing is similar to a combined an action and a render requests but combined in a single request. +Unlike a render request, the response from a resource request includes content from the target portlet only. + +### 2.3.5. Custom ConversationManager + +The `` element now provides a conversation-manager attribute accepting a reference to a ConversationManager instance. + +### 2.3.6. Redirect In Same State + +By default Web Flow does a client-side redirect when remaining in the same view state as long as the current request is not an Ajax request. +This is useful after form validation failure. +Hitting Refresh or Back won't result in browser warnings. Hence this behavior is usually desirable. +However a new flow execution attribute makes it possible to disable it and that may also be necessary in some cases specific to JSF applications. +See [Section 13.7, “Redirect In Same State”](spring-faces.html#spring-faces-redirect-in-same-state). + +### 2.3.7. Samples + +The process for building the samples included with the distribution has been simplified. +Maven can be used to build all samples in one step. +Eclipse settings include source code references to simplify debugging. + +Additional samples can be accessed as follows: + +``` +mkdir spring-samples +cd spring-samples +svn co https://src.springframework.org/svn/spring-samples/webflow-primefaces-showcase +cd webflow-primefaces-showcase +mvn package +# import into Eclipse + +``` + +``` +mkdir spring-samples +cd spring-samples +svn co https://src.springframework.org/svn/spring-samples/webflow-showcase +cd webflow-showcase +mvn package +# import into Eclipse + +``` + +## 2.4. Spring Web Flow 2.2 + +### 2.4.1. JSF 2 Support + +#### Comprehensive JSF 2 Support + +Building on 2.1, Spring Web Flow version 2.2 adds support for core JSF 2 features +The following features that were not supported in 2.1 are now available: +partial state saving, JSF 2 resource request, handling, and JSF 2 Ajax requests. +At this point support for JSF 2 is considered +comprehensive although not covering every JSF 2 feature -- +excluded are mostly features that overlap with the core value Web Flow provides +such as those relating to navigation and state management. + +See [Section 13.3, “Configuring Web Flow for use with JSF”](spring-faces.html#spring-faces-webflow-config) for important configuration changes. +Note that partial state saving is only supported with Sun Mojarra 2.0.3 or later. +It is not yet supported with Apache MyFaces. This is due to the +fact MyFaces was not as easy to customize with regards to how component state is stored. +We will work with Apache MyFaces to provide this support. In the mean time you will need to use +the `javax.faces.PARTIAL_STATE_SAVING` context parameter in `web.xml`to disable partial state saving with Apache MyFaces. + +#### Travel Sample With the PrimeFaces Components + +The main Spring Travel sample demonstrating Spring Web Flow and JSF support +is now built on JSF 2 and components from the PrimeFaces component library. +Please check out the booking-faces sample in the distribution. + +Additional samples can be found at the Spring Web Flow - Prime Faces[Showcase](https://src.springframework.org/svn/spring-samples/webflow-primefaces-showcase), an SVN repository within the[spring-samples](https://src.springframework.org/svn/spring-samples)repository. Use these commands to check out and build: + +``` +svn co https://src.springframework.org/svn/spring-samples/webflow-primefaces-showcase + cd webflow-primefaces-showcase + mvn package + +``` + +### 2.4.2. Spring Security Facelets Tag Library + +A new Spring Security tag library is available for use with with JSF 2.0 or with JSF 1.2 Facelets views. +It provides an \ tag as well as several EL functions. +See [Section 13.9, “Using the Spring Security Facelets Tag Library”](spring-faces.html#spring-faces-security-taglib) for more details. + +### 2.4.3. Spring JavaScript Updates + +#### Deprecated ResourcesServlet + +Starting with Spring 3.0.4, the Spring Framework includes +a replacement for the ResourcesServlet. Please see +the Spring Framework documentation for details on the custom mvc namespace, +specifically the new["resources"](http://static.springsource.org/spring/docs/3.0.x/spring-framework-reference/html/mvc.html#mvc-static-resources)element. + +#### Dojo 1.5 and dojox + +The bundled custom Dojo build is upgraded to version 1.5. It now includes dojox. + +Note that applications are generally encouraged to prepare their own custom +Dojo build for optimized performance depending on what parts of Dojo are +commonly used together. For examples see the[scripts](https://src.springframework.org/svn/spring-webflow/branches/spring-webflow-2.2-maintenance/spring-js-resources/scripts/dojo)used by Spring Web Flow to prepare its own custom Dojo build. + +#### Two Spring JS artifacts + +The `spring-js` artifact has been split in two -- the new artifact +(`spring-js-resources`) contains client side resource (.js, .css, etc.) while +the existing artifact (`spring-js`) contains server-side Java code only. + +Applications preparing their own custom Dojo build have an option now to +avoid including `spring-js-resources` and put `Spring.js` and`Spring-Dojo.js` directly under the root of their web application. + +#### Client resources moved into META-INF/web-resources + +Bundled client resources (.js, .css, etc.) +have been moved to `META-INF/web-resources` from their previous location +under `META-INF`. This change is transparent for applications but will result +in simpler and safer configuration when using the new resource handling +mechanism available in Spring 3.0.4. + +### 2.4.4. JSF Portlet Support + +#### Portlet API 2.0 and JSF 1.2 support + +In previous versions of Spring Web Flow support for JSF Portlets relied on +a Portlet Bridge for JSF implementation and was considered experimental. +Spring Web Flow 2.2 adds support for JSF Portlets based on its own internal +Portlet integration targeting Portlet API 2.0 and JSF 1.2 environments. +See [???]() for more details. +The Spring Web Flow Travel JSF Portlets sample has been successfully +tested on the Apache Pluto portal container. \ No newline at end of file diff --git a/docs/en/spring-web-services/READEME.md b/docs/en/spring-web-services/READEME.md new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/en/spring-web-services/spring-web-service.md b/docs/en/spring-web-services/spring-web-service.md new file mode 100644 index 0000000000000000000000000000000000000000..977b1794e5c0af2624a990ee6761862cc7e406e3 --- /dev/null +++ b/docs/en/spring-web-services/spring-web-service.md @@ -0,0 +1,3476 @@ +# Spring Web Services Reference Documentation + +## Preface + +In the current age of Service Oriented Architectures, more and more people use web services to connect previously unconnected systems. Initially, web services were considered to be just another way to do a Remote Procedure Call (RPC). Over time, however, people found out that there is a big difference between RPCs and web services. Especially when interoperability with other platforms is important, it is often better to send encapsulated XML documents that contain all the data necessary to process the request. Conceptually, XML-based web services are better compared to message queues than to remoting solutions. Overall, XML should be considered the platform-neutral representation of data, the *common language* of SOA. When developing or using web services, the focus should be on this XML and not on Java. + +Spring Web Services focuses on creating these document-driven web services. Spring Web Services facilitates contract-first SOAP service development, allowing for the creation of flexible web services by using one of the many ways to manipulate XML payloads. Spring-WS provides a powerful [message dispatching framework](#server), a [WS-Security](#security) solution that integrates with your existing application security solution, and a [Client-side API](#client) that follows the familiar Spring template pattern. + +# I. Introduction + +This first part of the reference documentation [is an overview](#what-is-spring-ws) of Spring Web Services and the underlying concepts. Spring-WS is then introduced, and [the concepts](#why-contract-first) behind contract-first web service development are explained. + +## 1. What is Spring Web Services? + +### 1.1. Introduction + +Spring Web Services (Spring-WS) is a product of the Spring community and is focused on creating document-driven web services. Spring Web Services aims to facilitate contract-first SOAP service development, allowing for the creation of flexible web services by using one of the many ways to manipulate XML payloads. The product is based on Spring itself, which means you can use the Spring concepts (such as dependency injection) as an integral part of your web service. + +People use Spring-WS for many reasons, but most are drawn to it after finding alternative SOAP stacks lacking when it comes to following web service best practices. Spring-WS makes the best practice an easy practice. This includes practices such as the WS-I basic profile, contract-first development, and having a loose coupling between contract and implementation. The other key features of Spring Web Services are: + +* [Powerful mappings](#features-powerful-mappings) + +* [XML API support](#features-xml-api-support) + +* [Flexible XML Marshalling](#features-flexible-xml-marshalling) + +* [Reusing Your Spring expertise](#features-reusing-your-spring-expertise) + +* [Support for WS-Security](#features-support-for-ws-security) + +* [Integration with Spring Security](#features-integration-with-spring-security) + +* [Apache license](#features-apache-license) + +#### 1.1.1. Powerful mappings + +You can distribute incoming XML requests to any object, depending on message payload, SOAP Action header, or an XPath expression. + +#### 1.1.2. XML API support + +Incoming XML messages can be handled not only with standard JAXP APIs such as DOM, SAX, and StAX, but also with JDOM, dom4j, XOM, or even marshalling technologies. + +#### 1.1.3. Flexible XML Marshalling + +Spring Web Services builds on the Object/XML Mapping module in the Spring Framework, which supports JAXB 1 and 2, Castor, XMLBeans, JiBX, and XStream. + +#### 1.1.4. Reusing Your Spring expertise + +Spring-WS uses Spring application contexts for all configuration, which should help Spring developers get up-to-speed quickly. Also, the architecture of Spring-WS resembles that of Spring-MVC. + +#### 1.1.5. Support for WS-Security + +WS-Security lets you sign SOAP messages, encrypt and decrypt them, or authenticate against them. + +#### 1.1.6. Integration with Spring Security + +The WS-Security implementation of Spring Web Services provides integration with Spring Security. This means you can use your existing Spring Security configuration for your SOAP service as well. + +#### 1.1.7. Apache license + +You can confidently use Spring-WS in your project. + +### 1.2. Runtime environment + +Spring Web Services requires a standard Java 8 Runtime Environment. Spring-WS is built on Spring Framework 4.0.9, but higher versions are supported. + +Spring-WS consists of a number of modules, which are described in the remainder of this section. + +* The XML module (`spring-xml.jar`) contains various XML support classes for Spring Web Services. This module is mainly intended for the Spring-WS framework itself and not web service developers. + +* The Core module (`spring-ws-core.jar`) is the central part of the Spring’s web services functionality. It provides the central [`WebServiceMessage`](#web-service-messages) and [`SoapMessage`](#soap-message) interfaces, the [server-side](#server) framework (with powerful message dispatching), the various support classes for implementing web service endpoints, and the [client-side](#client) `WebServiceTemplate`. + +* The Support module (`spring-ws-support.jar`) contains additional transports (JMS, Email, and others). + +* The [Security](#security) package (`spring-ws-security.jar`) provides a WS-Security implementation that integrates with the core web service package. It lets you sign, decrypt and encrypt, and add principal tokens to SOAP messages. Additionally, it lets you use your existing Spring Security security implementation for authentication and authorization. + +The following figure shows and the dependencies between the Spring-WS modules. Arrows indicate dependencies (that is, Spring-WS Core depends on Spring-XML and the OXM module found in Spring 3 and higher). + +![spring deps](spring-deps.png) + +### 1.3. Supported standards + +Spring Web Services supports the following standards: + +* SOAP 1.1 and 1.2 + +* WSDL 1.1 and 2.0 (XSD-based generation is supported only for WSDL 1.1) + +* WS-I Basic Profile 1.0, 1.1, 1.2, and 2.0 + +* WS-Addressing 1.0 and the August 2004 draft + +* SOAP Message Security 1.1, Username Token Profile 1.1, X.509 Certificate Token Profile 1.1, SAML Token Profile 1.1, Kerberos Token Profile 1.1, Basic Security Profile 1.1 + +## 2. Why Contract First? + +When creating web services, there are two development styles: contract-last and contract-first. When you use a contract-last approach, you start with the Java code and let the web service contract (in WSDL — see sidebar) be generated from that. When using contract-first, you start with the WSDL contract and use Java to implement the contract. + +**What is WSDL?** + +WSDL stands for Web Service Description Language. A WSDL file is an XML document that describes a web service. It specifies the location of the service and the operations (or methods) the service exposes. For more information about WSDL, see the [WSDL specification](https://www.w3.org/TR/wsdl). + +Spring-WS supports only the contract-first development style, and this section explains why. + +### 2.1. Object/XML Impedance Mismatch + +Similar to the field of ORM, where we have an [Object/Relational impedance mismatch](https://en.wikipedia.org/wiki/Object-Relational_impedance_mismatch), converting Java objects to XML has a similar problem. At first glance, the O/X mapping problem appears simple: Create an XML element for each Java object to convert all Java properties and fields to sub-elements or attributes. However, things are not as simple as they appear, because there is a fundamental difference between hierarchical languages, such as XML (and especially XSD), and the graph model of Java. + +| |Most of the contents in this section were inspired by [[alpine]](#alpine) and [[effective-enterprise-java]](#effective-enterprise-java).| +|---|----------------------------------------------------------------------------------------------------------------------------------------| + +#### 2.1.1. XSD Extensions + +In Java, the only way to change the behavior of a class is to subclass it to add the new behavior to that subclass. In XSD, you can extend a data type by restricting it — that is, constraining the valid values for the elements and attributes. For instance, consider the following example: + +``` + + + + + +``` + +This type restricts a XSD string by way of a regular expression, allowing only three upper case letters. If this type is converted to Java, we end up with an ordinary `java.lang.String`. The regular expression is lost in the conversion process, because Java does not allow for these sorts of extensions. + +#### 2.1.2. Unportable Types + +One of the most important goals of a web service is to be interoperable: to support multiple platforms such as Java, .NET, Python, and others. Because all of these languages have different class libraries, you must use some common, cross-language format to communicate between them. That format is XML, which is supported by all of these languages. + +Because of this conversion, you must make sure that you use portable types in your service implementation. Consider, for example, a service that returns a `java.util.TreeMap`: + +``` +public Map getFlights() { + // use a tree map, to make sure it's sorted + TreeMap map = new TreeMap(); + map.put("KL1117", "Stockholm"); + ... + return map; +} +``` + +Undoubtedly, the contents of this map can be converted into some sort of XML, but since there is no standard way to describe a map in XML, it will be proprietary. Also, even if it can be converted to XML, many platforms do not have a data structure similar to the `TreeMap`. So when a .NET client accesses your web service, it probably ends up with a `System.Collections.Hashtable`, which has different semantics. + +This problem is also present when working on the client side. Consider the following XSD snippet, which describes a service contract: + +``` + + + + + + + + + +``` + +This contract defines a request that takes an `date`, which is a XSD datatype representing a year, month, and day. If we call this service from Java, we probably use either a `java.util.Date` or `java.util.Calendar`. However, both of these classes actually describe times, rather than dates. So, we actually end up sending data that represents the fourth of April 2007 at midnight (`2007-04-04T00:00:00`), which is not the same as `2007-04-04`. + +#### 2.1.3. Cyclic Graphs + +Imagine we have the following class structure: + +``` +public class Flight { + private String number; + private List passengers; + + // getters and setters omitted +} + +public class Passenger { + private String name; + private Flight flight; + + // getters and setters omitted +} +``` + +This is a cyclic graph: the `Flight` refers to the `Passenger`, which refers to the `Flight` again. Cyclic graphs like these are quite common in Java. If we take a naive approach to converting this to XML, we end up with something like: + +``` + + + + Arjen Poutsma + + + + Arjen Poutsma + + + + Arjen Poutsma + ... +``` + +Processing such a structure is likely to take a long time to finish, because there is no stop condition for this loop. + +One way to solve this problem is to use references to objects that were already marshalled: + +``` + + + + Arjen Poutsma + + + ... + + +``` + +This solves the recursion problem but introduces new ones. For one, you cannot use an XML validator to validate this structure. Another issue is that the standard way to use these references in SOAP (RPC/encoded) has been deprecated in favor of document/literal (see the WS-I [Basic Profile](http://www.ws-i.org/Profiles/BasicProfile-1.1.html#SOAP_encodingStyle_Attribute)). + +These are just a few of the problems when dealing with O/X mapping. It is important to respect these issues when writing web services. The best way to respect them is to focus on the XML completely, while using Java as an implementation language. This is what contract-first is all about. + +### 2.2. Contract-first Versus Contract-last + +Besides the Object/XML Mapping issues mentioned in the previous section, there are other reasons for preferring a contract-first development style. + +* [Fragility](#contract-first-fragility) + +* [Performance](#contract-first-performance) + +* [Reusability](#contract-first-reusability) + +* [Versioning](#contract-first-versioning) + +#### 2.2.1. Fragility + +As mentioned earlier, the contract-last development style results in your web service contract (WSDL and your XSD) being generated from your Java contract (usually an interface). If you use this approach, you have no guarantee that the contract stays constant over time. Each time you change your Java contract and redeploy it, there might be subsequent changes to the web service contract. + +Additionally, not all SOAP stacks generate the same web service contract from a Java contract. This means that changing your current SOAP stack for a different one (for whatever reason) might also change your web service contract. + +When a web service contract changes, users of the contract have to be instructed to obtain the new contract and potentially change their code to accommodate for any changes in the contract. + +For a contract to be useful, it must remain constant for as long as possible. If a contract changes, you have to contact all the users of your service and instruct them to get the new version of the contract. + +#### 2.2.2. Performance + +When a Java object is automatically transformed into XML, there is no way to be sure as to what is sent across the wire. An object might reference another object, which refers to another, and so on. In the end, half of the objects on the heap in your virtual machine might be converted into XML, which results in slow response times. + +When using contract-first, you explicitly describe what XML is sent where, thus making sure that it is exactly what you want. + +#### 2.2.3. Reusability + +Defining your schema in a separate file lets you reuse that file in different scenarios. Consider the definition of an `AirportCode` in a file called `airline.xsd`: + +``` + + + + + +``` + +You can reuse this definition in other schemas, or even WSDL files, by using an `import` statement. + +#### 2.2.4. Versioning + +Even though a contract must remain constant for as long as possible, they do need to be changed sometimes. In Java, this typically results in a new Java interface, such as `AirlineService2`, and a (new) implementation of that interface. Of course, the old service must be kept around, because there might be clients who have not yet migrated. + +If using contract-first, we can have a looser coupling between contract and implementation. Such a looser coupling lets us implement both versions of the contract in one class. We could, for instance, use an XSLT stylesheet to convert any “old-style” messages to the “new-style” messages. + +## 3. Writing Contract-First Web Services + +This tutorial shows you how to write [contract-first web services](#why-contract-first) — that is, how to develop web services that start with the XML Schema or WSDL contract first followed by the Java code second. Spring-WS focuses on this development style, and this tutorial should help you get started. Note that the first part of this tutorial contains almost no Spring-WS specific information. It is mostly about XML, XSD, and WSDL. The [second part](#tutorial-creating-project) focuses on implementing this contract with Spring-WS . + +The most important thing when doing contract-first web service development is tothink in terms of XML. This means that Java language concepts are of lesser importance. It is the XML that is sent across the wire, and you should focus on that. Java being used to implement the web service is an implementation detail. + +In this tutorial, we define a web service that is created by a Human Resources department. Clients can send holiday request forms to this service to book a holiday. + +### 3.1. Messages + +In this section, we focus on the actual XML messages that are sent to and from the web service. We start out by determining what these messages look like. + +#### 3.1.1. Holiday + +In the scenario, we have to deal with holiday requests, so it makes sense to determine what a holiday looks like in XML: + +``` + + 2006-07-03 + 2006-07-07 + +``` + +A holiday consists of a start date and an end date. We have also decided to use the standard [ISO 8601](https://www.cl.cam.ac.uk/~mgk25/iso-time.html) date format for the dates, because that saves a lot of parsing hassle. We have also added a namespace to the element, to make sure our elements can used within other XML documents. + +#### 3.1.2. Employee + +There is also the notion of an employee in the scenario. Here is what it looks like in XML: + +``` + + 42 + Arjen + Poutsma + +``` + +We have used the same namespace as before. If this `` element could be used in other scenarios, it might make sense to use a different namespace, such as `[http://example.com/employees/schemas](http://example.com/employees/schemas)`. + +#### 3.1.3. HolidayRequest + +Both the `holiday` element and the `employee` element can be put in a ``: + +``` + + + 2006-07-03 + 2006-07-07 + + + 42 + Arjen + Poutsma + + +``` + +The order of the two elements does not matter: `` could have been the first element. What matters is that all of the data is there. In fact, the data is the only thing that is important: We take a data-driven approach. + +### 3.2. Data Contract + +Now that we have seen some examples of the XML data that we can use, it makes sense to formalize this into a schema. This data contract defines the message format we accept. There are four different ways of defining such a contract for XML: + +* DTDs + +* [XML Schema (XSD)](https://www.w3.org/XML/Schema) + +* [RELAX NG](http://www.relaxng.org/) + +* [Schematron](http://www.schematron.com/) + +DTDs have limited namespace support, so they are not suitable for web services. Relax NG and Schematron are easier than XML Schema. Unfortunately, they are not so widely supported across platforms. As a result, we use XML Schema. + +By far, the easiest way to create an XSD is to infer it from sample documents. Any good XML editor or Java IDE offers this functionality. Basically, these tools use some sample XML documents to generate a schema that validates them all. The end result certainly needs to be polished up, but it is a great starting point. + +Using the sample described earlier, we end up with the following generated schema: + +``` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +``` + +This generated schema can be improved. The first thing to notice is that every type has a root-level element declaration. This means that the web service should be able to accept all of these elements as data. This is not desirable: We want to accept only a ``. By removing the wrapping element tags (thus keeping the types) and inlining the results, we can accomplish this, as follows: + +``` + + + + + + + + + + + + + + + + + + + + + + + +``` + +The schema still has one problem: With a schema like this, you can expect the following message to validate: + +``` + + + this is not a date + neither is this + + PlainText Section qName:lineannotation level:4, chunks:[<, !-- ... --, >] attrs:[:] + +``` + +Clearly, we must make sure that the start and end date are really dates. XML Schema has an excellent built-in `date` type that we can use. We also change the `NCName` s to `string` instances. Finally, we change the `sequence` in `` to `all`. This tells the XML parser that the order of `` and `` is not significant. Our final XSD now looks like the following listing: + +``` + + + + + (1) + (1) + + + + + + (2) + (2) + + + + + + (3) + (3) + + + +``` + +|**1**| `all` tells the XML parser that the order of `` and `` is not significant. | +|-----|-----------------------------------------------------------------------------------------------------------------| +|**2**|We use the `xs:date` data type (which consist of a year, a month, and a day) for `` and ``.| +|**3**| `xs:string` is used for the first and last names. | + +We store this file as `hr.xsd`. + +### 3.3. Service Contract + +A service contract is generally expressed as a [WSDL](https://www.w3.org/TR/wsdl) file. Note that, in Spring-WS, writing the WSDL by hand is not required. Based on the XSD and some conventions, Spring-WS can create the WSDL for you, as explained in the section entitled [Implementing the Endpoint](#tutorial-implementing-endpoint). The remainder of this section shows how to write WSDL by hand. You may want to skip to [the next section](#tutorial-creating-project). + +We start our WSDL with the standard preamble and by importing our existing XSD. To separate the schema from the definition, we use a separate namespace for the WSDL definitions: `[http://mycompany.com/hr/definitions](http://mycompany.com/hr/definitions)`. The following listing shows the preamble: + +``` + + + + + + +``` + +Next, we add our messages based on the written schema types. We only have one message, the `` we put in the schema: + +``` + + + +``` + +We add the message to a port type as an operation: + +``` + + + + + +``` + +That message finishes the abstract part of the WSDL (the interface, as it were) and leaves the concrete part. The concrete part consists of a `binding` (which tells the client how to invoke the operations you have just defined) and a `service` (which tells the client where to invoke it). + +Adding a concrete part is pretty standard. To do so, refer to the abstract part you defined previously, make sure you use `document/literal` for the `soap:binding` elements (`rpc/encoded` is deprecated), pick a `soapAction` for the operation (in this case, `[http://mycompany.com/RequestHoliday](http://mycompany.com/RequestHoliday)`, but any URI works), and determine the `location` URL where you want the request to arrive (in this case, `[http://mycompany.com/humanresources](http://mycompany.com/humanresources)`): + +``` + + + + + + + (2) + (3) + + (4) + + (2) + + + (4)(5) + (7) + + (8) + + (6) + + + + + (5) + (9) + + + +``` + +|**1**| We import the schema defined in [Data Contract](#tutorial.xsd). | +|-----|---------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| We define the `HolidayRequest` message, which gets used in the `portType`. | +|**3**| The `HolidayRequest` type is defined in the schema. | +|**4**| We define the `HumanResource` port type, which gets used in the `binding`. | +|**5**| We define the `HumanResourceBinding` binding, which gets used in the `port`. | +|**6**| We use a document/literal style. | +|**7**| The literal `[http://schemas.xmlsoap.org/soap/http](http://schemas.xmlsoap.org/soap/http)` signifies a HTTP transport. | +|**8**| The `soapAction` attribute signifies the `SOAPAction` HTTP header that will be sent with every request. | +|**9**|The `[http://localhost:8080/holidayService/](http://localhost:8080/holidayService/)` address is the URL where the web service can be invoked.| + +The preceding listing shows the final WSDL. We describe how to implement the resulting schema and WSDL in the next section. + +### 3.4. Creating the project + +In this section, we use [Maven](https://maven.apache.org/) to create the initial project structure for us. Doing so is not required but greatly reduces the amount of code we have to write to setup our HolidayService. + +The following command creates a Maven web application project for us by using the Spring-WS archetype (that is, project template): + +``` +mvn archetype:create -DarchetypeGroupId=org.springframework.ws \ + -DarchetypeArtifactId=spring-ws-archetype \ + -DarchetypeVersion= \ + -DgroupId=com.mycompany.hr \ + -DartifactId=holidayService +``` + +The preceding command creates a new directory called `holidayService`. In this directory is a `src/main/webapp` directory, which contains the root of the WAR file. You can find the standard web application deployment descriptor (`'WEB-INF/web.xml'`) here, which defines a Spring-WS `MessageDispatcherServlet` and maps all incoming requests to this servlet: + +``` + + + MyCompany HR Holiday Service + + + + spring-ws + org.springframework.ws.transport.http.MessageDispatcherServlet + + + + spring-ws + /* + + + +``` + +In addition to the preceding `WEB-INF/web.xml` file, you also need another, Spring-WS-specific, configuration file, named `WEB-INF/spring-ws-servlet.xml`. This file contains all of the Spring-WS-specific beans, such as `EndPoints` and `WebServiceMessageReceivers` and is used to create a new Spring container. The name of this file is derived from the name of the attendant servlet (in this case `'spring-ws'`) with `-servlet.xml` appended to it. So if you define a `MessageDispatcherServlet` with the name `'dynamite'`, the name of the Spring-WS-specific configuration file becomes `WEB-INF/dynamite-servlet.xml`. + +(You can see the contents of the `WEB-INF/spring-ws-servlet.xml` file for this example in [[tutorial.example.sws-conf-file]](#tutorial.example.sws-conf-file).) + +Once you had the project structure created, you can put the schema and the WSDL from the previous section into `'WEB-INF/'` folder. + +### 3.5. Implementing the Endpoint + +In Spring-WS, you implement endpoints to handle incoming XML messages. An endpoint is typically created by annotating a class with the `@Endpoint` annotation. In this endpoint class, you can create one or more methods that handle incoming request. The method signatures can be quite flexible. You can include almost any sort of parameter type related to the incoming XML message, as we explain later in this chapter. + +#### 3.5.1. Handling the XML Message + +In this sample application, we use [JDom 2](http://www.jdom.org/) to handle the XML message. We also use [XPath](https://www.w3.org/TR/xpath20/), because it lets us select particular parts of the XML JDOM tree without requiring strict schema conformance. + +The following listing shows the class that defines our holiday endpoint: + +``` +package com.mycompany.hr.ws; + +import java.text.ParseException; +import java.text.SimpleDateFormat; +import java.util.Arrays; +import java.util.Date; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.ws.server.endpoint.annotation.Endpoint; +import org.springframework.ws.server.endpoint.annotation.PayloadRoot; +import org.springframework.ws.server.endpoint.annotation.RequestPayload; + +import com.mycompany.hr.service.HumanResourceService; +import org.jdom2.Element; +import org.jdom2.JDOMException; +import org.jdom2.Namespace; +import org.jdom2.filter.Filters; +import org.jdom2.xpath.XPathExpression; +import org.jdom2.xpath.XPathFactory; + +@Endpoint (1) +public class HolidayEndpoint { + + private static final String NAMESPACE_URI = "http://mycompany.com/hr/schemas"; + + private XPathExpression startDateExpression; + + private XPathExpression endDateExpression; + + private XPathExpression firstNameExpression; + + private XPathExpression lastNameExpression; + + private HumanResourceService humanResourceService; + + @Autowired (2) + public HolidayEndpoint(HumanResourceService humanResourceService) throws JDOMException { + this.humanResourceService = humanResourceService; + + Namespace namespace = Namespace.getNamespace("hr", NAMESPACE_URI); + XPathFactory xPathFactory = XPathFactory.instance(); + startDateExpression = xPathFactory.compile("//hr:StartDate", Filters.element(), null, namespace); + endDateExpression = xPathFactory.compile("//hr:EndDate", Filters.element(), null, namespace); + firstNameExpression = xPathFactory.compile("//hr:FirstName", Filters.element(), null, namespace); + lastNameExpression = xPathFactory.compile("//hr:LastName", Filters.element(), null, namespace); + } + + @PayloadRoot(namespace = NAMESPACE_URI, localPart = "HolidayRequest") (3) + public void handleHolidayRequest(@RequestPayload Element holidayRequest) throws Exception {(4) + Date startDate = parseDate(startDateExpression, holidayRequest); + Date endDate = parseDate(endDateExpression, holidayRequest); + String name = firstNameExpression.evaluateFirst(holidayRequest).getText() + " " + lastNameExpression.evaluateFirst(holidayRequest).getText(); + + humanResourceService.bookHoliday(startDate, endDate, name); + } + + private Date parseDate(XPathExpression expression, Element element) throws ParseException { + Element result = expression.evaluateFirst(element); + if (result != null) { + SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd"); + return dateFormat.parse(result.getText()); + } else { + throw new IllegalArgumentException("Could not evaluate [" + expression + "] on [" + element + "]"); + } + } + +} +``` + +|**1**| The `HolidayEndpoint` is annotated with `@Endpoint`. This marks the class as a special sort of `@Component`, suitable for handling XML messages in Spring-WS, and also makes it eligible for suitable for component scanning. | +|-----|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| The `HolidayEndpoint` requires the `HumanResourceService` business service to operate, so we inject the dependency in the constructor and annotate it with `@Autowired`.
Next, we set up XPath expressions by using the JDOM2 API. There are four expressions: `//hr:StartDate` for extracting the `` text value, `//hr:EndDate` for extracting the end date, and two for extracting the names of the employee. | +|**3**| The `@PayloadRoot` annotation tells Spring-WS that the `handleHolidayRequest` method is suitable for handling XML messages. The sort of message that this method can handle is indicated by the annotation values. In this case, it can
handle XML elements that have the `HolidayRequest` local part and the `[http://mycompany.com/hr/schemas](http://mycompany.com/hr/schemas)` namespace.
More information about mapping messages to endpoints is provided in the next section. | +|**4**|The `handleHolidayRequest(..)` method is the main handling method, which gets passed the ``element from the incoming XML message. The `@RequestPayload` annotation indicates that the `holidayRequest` parameter should be mapped to the payload of the
request message. We use the XPath expressions to extract the string values from the XML messages and convert these values to `Date` objects by using a`SimpleDateFormat` (the `parseData` method). With these values, we invoke a method on the business service.
Typically, this results in a database transaction being started and some records being altered in the database.
Finally, we define a `void` return type, which indicates to Spring-WS that we do not want to send a response message.
If we want a response message, we could return a JDOM Element to represent the payload of the response message.| + +Using JDOM is just one of the options to handle the XML. Other options include DOM, dom4j, XOM, SAX, and StAX, but also marshalling techniques like JAXB, Castor, XMLBeans, JiBX, and XStream, as explained in [the next chapter](#common). We chose JDOM because it gives us access to the raw XML and because it is based on classes (not interfaces and factory methods as with W3C DOM and dom4j), which makes the code less verbose. We use XPath because it is less fragile than marshalling technologies. We do not need strict schema conformance as long as we can find the dates and the name. + +Because we use JDOM, we must add some dependencies to the Maven `pom.xml`, which is in the root of our project directory. Here is the relevant section of the POM: + +``` + + + org.springframework.ws + spring-ws-core + + + + jdom + jdom + 2.0.1 + + + jaxen + jaxen + 1.1 + + +``` + +Here is how we would configure these classes in our `spring-ws-servlet.xml` Spring XML configuration file by using component scanning. We also instruct Spring-WS to use annotation-driven endpoints, with the `` element. + +``` + + + + + + + +``` + +#### 3.5.2. Routing the Message to the Endpoint + +As part of writing the endpoint, we also used the `@PayloadRoot` annotation to indicate which sort of messages can be handled by the `handleHolidayRequest` method. In Spring-WS, this process is the responsibility of an `EndpointMapping`. Here, we route messages based on their content by using a `PayloadRootAnnotationMethodEndpointMapping`. The following listing shows the annotation we used earlier: + +``` +@PayloadRoot(namespace = "http://mycompany.com/hr/schemas", localPart = "HolidayRequest") +``` + +The annotation shown in the preceding example basically means that whenever an XML message is received with the namespace `[http://mycompany.com/hr/schemas](http://mycompany.com/hr/schemas)` and the `HolidayRequest` local name, it is routed to the `handleHolidayRequest` method. By using the `` element in our configuration, we enable the detection of the `@PayloadRoot` annotations. It is possible (and quite common) to have multiple, related handling methods in an endpoint, each of them handling different XML messages. + +There are also other ways to map endpoints to XML messages, which is described in [the next chapter](#common). + +#### 3.5.3. Providing the Service and Stub implementation + +Now that we have the endpoint, we need `HumanResourceService` and its implementation for use by `HolidayEndpoint`. The following listing shows the `HumanResourceService` interface: + +``` +package com.mycompany.hr.service; + +import java.util.Date; + +public interface HumanResourceService { + void bookHoliday(Date startDate, Date endDate, String name); +} +``` + +For tutorial purposes, we use a simple stub implementation of the `HumanResourceService`: + +``` +package com.mycompany.hr.service; + +import java.util.Date; + +import org.springframework.stereotype.Service; + +@Service (1) +public class StubHumanResourceService implements HumanResourceService { + public void bookHoliday(Date startDate, Date endDate, String name) { + System.out.println("Booking holiday for [" + startDate + "-" + endDate + "] for [" + name + "] "); + } +} +``` + +|**1**|The `StubHumanResourceService` is annotated with `@Service`. This marks the class as a business facade, which makes this a candidate for injection by `@Autowired` in `HolidayEndpoint`.| +|-----|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 3.6. Publishing the WSDL + +Finally, we need to publish the WSDL. As stated in [Service Contract](#tutorial-service-contract), we do not need to write a WSDL ourselves. Spring-WS can generate one based on some conventions. Here is how we define the generation: + +``` + (5) + (2) + +``` + +|**1**| The `id` determines the URL where the WSDL can be retrieved. In this case, the `id` is `holiday`, which means that the WSDL can be retrieved
as `holiday.wsdl` in the servlet context. The full URL is `[http://localhost:8080/holidayService/holiday.wsdl](http://localhost:8080/holidayService/holiday.wsdl)`. | +|-----|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| Next, we set the WSDL port type to be `HumanResource`. | +|**3**|We set the location where the service can be reached: `/holidayService/`. We use a relative URI, and we instruct the framework to transform it
dynamically to an absolute URI. Hence, if the service is deployed to different contexts, we do not have to change the URI manually.
For more information, see [the section called “Automatic WSDL exposure”](#server-automatic-wsdl-exposure). For the location transformation to work, we need to add an init parameter to `spring-ws`servlet in `web.xml` (shown in the next listing).| +|**4**| We define the target namespace for the WSDL definition itself. Setting this attribute is not required. If not set, the WSDL has the same namespace as the XSD schema. | +|**5**| The `xsd` element refers to the human resource schema we defined in [Data Contract](#tutorial.xsd). We placed the schema in the `WEB-INF` directory of the application. | + +The following listing shows how to add the init parameter: + +``` + + transformWsdlLocations + true + +``` + +You can create a WAR file by using `mvn install`. If you deploy the application (to Tomcat, Jetty, and so on) and point your browser at [this location](http://localhost:8080/holidayService/holiday.wsdl), you see the generated WSDL. This WSDL is ready to be used by clients, such as [soapUI](http://www.soapui.org/) or other SOAP frameworks. + +That concludes this tutorial. The tutorial code can be found in the full distribution of Spring-WS. If you wish to continue, look at the echo sample application that is part of the distribution. After that, look at the airline sample, which is a bit more complicated, because it uses JAXB, WS-Security, Hibernate, and a transactional service layer. Finally, you can read the rest of the reference documentation. + +# II. Reference + +This part of the reference documentation details the various components that comprise Spring Web Services. This includes [a chapter](#common) that discusses the parts common to both client- and server-side WS, a chapter devoted to the specifics of [writing server-side web services](#server), a chapter about using web services on [the client-side](#client), and a chapter on using [WS-Security](#security). + +## 4. Shared components + +This chapter explores the components that are shared between client- and server-side Spring-WS development. These interfaces and classes represent the building blocks of Spring-WS, so you need to understand what they do, even if you do not use them directly. + +### 4.1. Web Service Messages + +This section describes the messages and message factories that Spring-WS uses. + +#### 4.1.1. `WebServiceMessage` + +One of the core interfaces of Spring Web Services is the `WebServiceMessage`. This interface represents a protocol-agnostic XML message. The interface contains methods that provide access to the payload of the message, in the form of a `javax.xml.transform.Source` or a `javax.xml.transform.Result`. `Source` and `Result` are tagging interfaces that represent an abstraction over XML input and output. Concrete implementations wrap various XML representations, as indicated in the following table: + +| Source or Result implementation | Wrapped XML representation | +|-----------------------------------------|-----------------------------------------------------------| +| `javax.xml.transform.dom.DOMSource` | `org.w3c.dom.Node` | +| `javax.xml.transform.dom.DOMResult` | `org.w3c.dom.Node` | +| `javax.xml.transform.sax.SAXSource` | `org.xml.sax.InputSource` and `org.xml.sax.XMLReader` | +| `javax.xml.transform.sax.SAXResult` | `org.xml.sax.ContentHandler` | +|`javax.xml.transform.stream.StreamSource`|`java.io.File`, `java.io.InputStream`, or `java.io.Reader` | +|`javax.xml.transform.stream.StreamResult`|`java.io.File`, `java.io.OutputStream`, or `java.io.Writer`| + +In addition to reading from and writing to the payload, a web service message can write itself to an output stream. + +#### 4.1.2. `SoapMessage` + +`SoapMessage` is a subclass of `WebServiceMessage`. It contains SOAP-specific methods, such as getting SOAP Headers, SOAP Faults, and so on. Generally, your code should not be dependent on `SoapMessage`, because the content of the SOAP Body (the payload of the message) can be obtained by using `getPayloadSource()` and `getPayloadResult()` in the `WebServiceMessage`. Only when it is necessary to perform SOAP-specific actions (such as adding a header, getting an attachment, and so on) should you need to cast `WebServiceMessage` to `SoapMessage`. + +#### 4.1.3. Message Factories + +Concrete message implementations are created by a `WebServiceMessageFactory`. This factory can create an empty message or read a message from an input stream. There are two concrete implementations of `WebServiceMessageFactory`. One is based on SAAJ, the SOAP with Attachments API for Java. The other is based on Axis 2’s AXIOM (AXis Object Model). + +##### `SaajSoapMessageFactory` + +The `SaajSoapMessageFactory` uses the SOAP with Attachments API for Java (SAAJ) to create `SoapMessage` implementations. SAAJ is part of J2EE 1.4, so it should be supported under most modern application servers. Here is an overview of the SAAJ versions supplied by common application servers: + +| Application Server | SAAJ Version | +|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------| +| BEA WebLogic 8 | 1.1 | +| BEA WebLogic 9 |1.1/1.21| +| IBM WebSphere 6 | 1.2 | +| SUN Glassfish 1 | 1.3 | +|1Weblogic 9 has a known bug in the SAAJ 1.2 implementation: it implements all the 1.2 interfaces but throws an `UnsupportedOperationException` when called. Spring Web Services has a workaround: It uses SAAJ 1.1 when operating on WebLogic 9.| | + +Additionally, Java SE 6 includes SAAJ 1.3. You can wire up a `SaajSoapMessageFactory` as follows: + +``` + +``` + +| |SAAJ is based on DOM, the Document Object Model. This means that all SOAP messages are stored in memory. For larger SOAP messages, this may not be performant. In that case, the `AxiomSoapMessageFactory` might be more applicable.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +##### `AxiomSoapMessageFactory` + +The `AxiomSoapMessageFactory` uses the AXis 2 Object Model (AXIOM) to create `SoapMessage` implementations. AXIOM is based on StAX, the Streaming API for XML. StAX provides a pull-based mechanism for reading XML messages, which can be more efficient for larger messages. + +To increase reading performance on the `AxiomSoapMessageFactory`, you can set the `payloadCaching` property to false (default is true). Doing so causesthe contents of the SOAP body to be read directly from the socket stream. When this setting is enabled, the payload can be read only once. This means that you have to make sure that any pre-processing (logging or other work) of the message does not consume it. + +You can use the `AxiomSoapMessageFactory` as follows: + +``` + + + +``` + +In addition to payload caching, AXIOM supports full streaming messages, as defined in the `StreamingWebServiceMessage`. This means that you can directly set the payload on the response message, rather than writing it to a DOM tree or buffer. + +Full streaming for AXIOM is used when a handler method returns a JAXB2-supported object. It automatically sets this marshalled object into the response message and writes it out to the outgoing socket stream when the response is going out. + +For more information about full streaming, see the class-level Javadoc for `StreamingWebServiceMessage` and `StreamingPayload`. + +##### SOAP 1.1 or 1.2 + +Both the `SaajSoapMessageFactory` and the `AxiomSoapMessageFactory` have a `soapVersion` property, where you can inject a `SoapVersion` constant. By default, the version is 1.1, but you can set it to 1.2: + +``` + + + + + + + + + +``` + +In the preceding example, we define a `SaajSoapMessageFactory` that accepts only SOAP 1.2 messages. + +| |Even though both versions of SOAP are quite similar in format, the 1.2 version is not backwards compatible with 1.1, because it uses a different XML namespace. Other major differences between SOAP 1.1 and 1.2 include the different structure of a fault and the fact that `SOAPAction` HTTP headers are effectively deprecated, though they still work.

One important thing to note with SOAP version numbers (or WS-\* specification version numbers in general) is that the latest version of a specification is generally not the most popular version. For SOAP, this means that (currently) the best version to use is 1.1. Version 1.2 might become more popular in the future, but 1.1 is currently the safest bet.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +#### 4.1.4. `MessageContext` + +Typically, messages come in pairs: a request and a response. A request is created on the client-side, which is sent over some transport to the server-side, where a response is generated. This response gets sent back to the client, where it is read. + +In Spring Web Services, such a conversation is contained in a `MessageContext`, which has properties to get request and response messages. On the client-side, the message context is created by the [`WebServiceTemplate`](#client-web-service-template). On the server-side, the message context is read from the transport-specific input stream. For example, in HTTP, it is read from the `HttpServletRequest`, and the response is written back to the `HttpServletResponse`. + +### 4.2. `TransportContext` + +One of the key properties of the SOAP protocol is that it tries to be transport-agnostic. This is why, for instance, Spring-WS does not support mapping messages to endpoints by HTTP request URL but rather by message content. + +However, it is sometimes necessary to get access to the underlying transport, either on the client or the server side. For this, Spring Web Services has the `TransportContext`. The transport context allows access to the underlying `WebServiceConnection`, which typically is a `HttpServletConnection` on the server side or a `HttpUrlConnection` or `CommonsHttpConnection` on the client side. For example, you can obtain the IP address of the current request in a server-side endpoint or interceptor: + +``` +TransportContext context = TransportContextHolder.getTransportContext(); +HttpServletConnection connection = (HttpServletConnection )context.getConnection(); +HttpServletRequest request = connection.getHttpServletRequest(); +String ipAddress = request.getRemoteAddr(); +``` + +### 4.3. Handling XML With XPath + +One of the best ways to handle XML is to use XPath. Quoting [[effective-xml]](#effective-xml), item 35: + +> XPath is a fourth generation declarative language that allows you to specify which nodes you want to process without specifying exactly how the processor is supposed to navigate to those nodes. XPath’s data model is very well designed to support exactly what almost all developers want from XML. For instance, it merges all adjacent text including that in CDATA sections, allows values to be calculated that skip over comments and processing instructions` and include text from child and descendant elements, and requires all external entity references to be resolved. In practice, XPath expressions tend to be much more robust against unexpected but perhaps insignificant changes in the input document. + +— Elliotte Rusty Harold + +Spring Web Services has two ways to use XPath within your application: the faster `XPathExpression` or the more flexible `XPathTemplate`. + +#### 4.3.1. `XPathExpression` + +The `XPathExpression` is an abstraction over a compiled XPath expression, such as the Java 5 `javax.xml.xpath.XPathExpression` interface or the Jaxen `XPath` class. To construct an expression in an application context, you can use `XPathExpressionFactoryBean`. The following example uses this factory bean: + +``` + + + + + + + + + + + +``` + +The preceding expression does not use namespaces, but we could set those by using the `namespaces` property of the factory bean. The expression can be used in the code as follows: + +``` +package sample; + +public class MyXPathClass { + + private final XPathExpression nameExpression; + + public MyXPathClass(XPathExpression nameExpression) { + this.nameExpression = nameExpression; + } + + public void doXPath(Document document) { + String name = nameExpression.evaluateAsString(document.getDocumentElement()); + System.out.println("Name: " + name); + } + +} +``` + +For a more flexible approach, you can use a `NodeMapper`, which is similar to the `RowMapper` in Spring’s JDBC support. The following example shows how to use it: + +``` +package sample; + +public class MyXPathClass { + + private final XPathExpression contactExpression; + + public MyXPathClass(XPathExpression contactExpression) { + this.contactExpression = contactExpression; + } + + public void doXPath(Document document) { + List contacts = contactExpression.evaluate(document, + new NodeMapper() { + public Object mapNode(Node node, int nodeNum) throws DOMException { + Element contactElement = (Element) node; + Element nameElement = (Element) contactElement.getElementsByTagName("Name").item(0); + Element phoneElement = (Element) contactElement.getElementsByTagName("Phone").item(0); + return new Contact(nameElement.getTextContent(), phoneElement.getTextContent()); + } + }); + PlainText Section qName; // do something with the list of Contact objects + } +} +``` + +Similar to mapping rows in Spring JDBC’s `RowMapper`, each result node is mapped by using an anonymous inner class. In this case, we create a `Contact` object, which we use later on. + +#### 4.3.2. `XPathTemplate` + +The `XPathExpression` lets you evaluate only a single, pre-compiled expression. A more flexible, though slower, alternative is the `XpathTemplate`. This class follows the common template pattern used throughout Spring (`JdbcTemplate`, `JmsTemplate`, and others). The following listing shows an example: + +``` +package sample; + +public class MyXPathClass { + + private XPathOperations template = new Jaxp13XPathTemplate(); + + public void doXPath(Source source) { + String name = template.evaluateAsString("/Contacts/Contact/Name", request); + // do something with name + } + +} +``` + +### 4.4. Message Logging and Tracing + +When developing or debugging a web service, it can be quite useful to look at the content of a (SOAP) message when it arrives or before it is sent. Spring Web Services offer this functionality, through the standard Commons Logging interface. + +| |Make sure to use Commons Logging version 1.1 or higher. Earlier versions have class loading issues and do not integrate with the Log4J TRACE level.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------| + +To log all server-side messages, set the `org.springframework.ws.server.MessageTracing` logger level to `DEBUG` or `TRACE`. On the `DEBUG` level, only the payload root element is logged. On the `TRACE` level, the entire message content is logged. If you want to log only sent messages, use the `org.springframework.ws.server.MessageTracing.sent` logger. Similarly, you can use `org.springframework.ws.server.MessageTracing.received` to log only received messages. + +On the client-side, similar loggers exist: `org.springframework.ws.client.MessageTracing.sent` and `org.springframework.ws.client.MessageTracing.received`. + +The following example of a `log4j.properties` configuration file logs the full content of sent messages on the client side and only the payload root element for client-side received messages. On the server-side, the payload root is logged for both sent and received messages: + +``` +log4j.rootCategory=INFO, stdout +log4j.logger.org.springframework.ws.client.MessageTracing.sent=TRACE +log4j.logger.org.springframework.ws.client.MessageTracing.received=DEBUG + +log4j.logger.org.springframework.ws.server.MessageTracing=DEBUG + +log4j.appender.stdout=org.apache.log4j.ConsoleAppender +log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +log4j.appender.stdout.layout.ConversionPattern=%p [%c{3}] %m%n +``` + +With this configuration, a typical output is: + +``` +TRACE [client.MessageTracing.sent] Sent request [ +``` + +Alternatively, it can be a `@Bean` method in a `@Configuration` class: + +``` +@Bean +public SimpleWsdl11Definition orders() { + return new SimpleWsdl11Definition(new ClassPathResource("orders.wsdl")); +} +``` + +You can access the WSDL defined in the `orders.wsdl` file on the classpath through `GET` requests to a URL of the following form (substitute the host, port and servlet context path as appropriate): + +``` +http://localhost:8080/spring-ws/orders.wsdl +``` + +| |All `WsdlDefinition` bean definitions are exposed by the `MessageDispatcherServlet` under their bean name with a suffix of`.wsdl`. So, if the bean name is `echo`, the host name is `server`, and the Servlet context (war name) is `spring-ws`, the WSDL can be found at `[http://server/spring-ws/echo.wsdl](http://server/spring-ws/echo.wsdl)`.| +|---|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Another nice feature of the `MessageDispatcherServlet` (or more correctly the `WsdlDefinitionHandlerAdapter`) is that it can transform the value of the `location` of all the WSDL that it exposes to reflect the URL of the incoming request. + +Note that this `location` transformation feature is off by default. To switch this feature on, you need to specify an initialization parameter to the `MessageDispatcherServlet`: + +``` + + + + spring-ws + org.springframework.ws.transport.http.MessageDispatcherServlet + + transformWsdlLocations + true + + + + + spring-ws + /* + + + +``` + +If you use `AbstractAnnotationConfigMessageDispatcherServletInitializer`, enabling transformation is as simple as overriding the `isTransformWsdlLocations()` method to return `true`. + +Consult the class-level Javadoc on the [`WsdlDefinitionHandlerAdapter`](https://docs.spring.io/spring-ws/docs/current/org/springframework/ws/transport/http/WsdlDefinitionHandlerAdapter.html) class to learn more about the whole transformation process. + +As an alternative to writing the WSDL by hand and exposing it with ``, Spring Web Services can also generate a WSDL from an XSD schema. This is the approach shown in [Publishing the WSDL](#tutorial-publishing-wsdl). The next application context snippet shows how to create such a dynamic WSDL file: + +``` + + + +``` + +Alternatively, you can use the Java `@Bean` method: + +``` +@Bean +public DefaultWsdl11Definition orders() { + DefaultWsdl11Definition definition = new DefaultWsdl11Definition(); + definition.setPortTypeName("Orders"); + definition.setLocationUri("http://localhost:8080/ordersService/"); + definition.setSchema(new SimpleXsdSchema(new ClassPathResource("echo.xsd"))); + + return definition; +} +``` + +The `` element depends on the `DefaultWsdl11Definition` class. This definition class uses WSDL providers in the [`org.springframework.ws.wsdl.wsdl11.provider`](https://docs.spring.io/spring-ws/sites/1.5/apidocs/org/springframework/ws/wsdl/wsdl11/provider/package-summary.html) package and the [`ProviderBasedWsdl4jDefinition`](https://docs.spring.io/spring-ws/docs/current/org/springframework/ws/wsdl/wsdl11/ProviderBasedWsdl4jDefinition.html) class to generate a WSDL the first time it is requested. See the class-level Javadoc of these classes to see how you can extend this mechanism, if necessary. + +The `DefaultWsdl11Definition` (and therefore, the `` tag) builds a WSDL from an XSD schema by using conventions. It iterates over all `element` elements found in the schema and creates a `message` for all elements. Next, it creates a WSDL `operation` for all messages that end with the defined request or response suffix. The default request suffix is `Request`. The default response suffix is `Response`, though these can be changed by setting the `requestSuffix` and `responseSuffix` attributes on ``, respectively. It also builds a `portType`, `binding`, and `service` based on the operations. + +For instance, if our `Orders.xsd` schema defines the `GetOrdersRequest` and `GetOrdersResponse` elements, `` creates a `GetOrdersRequest` and `GetOrdersResponse` message and a `GetOrders` operation, which is put in a `Orders` port type. + +To use multiple schemas, either by includes or imports, you can put Commons XMLSchema on the class path. If Commons XMLSchema is on the class path, the `` element follows all XSD imports and includes and inlines them in the WSDL as a single XSD. This greatly simplifies the deployment of the schemas, while still making it possible to edit them separately. + +| |Even though it can be handy to create the WSDL at runtime from your XSDs, there are a couple of drawbacks to this approach. First, though we try to keep the WSDL generation process consistent between releases, there is still the possibility that it changes (slightly). Second, the generation is a bit slow, though, once generated, the WSDL is cached for later reference.| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Therefore, you should use `` only during the development stages of your project. We recommend using your browser to download the generated WSDL, store it in the project, and expose it with ``. This is the only way to be really sure that the WSDL does not change over time. + +#### 5.2.2. Wiring up Spring-WS in a `DispatcherServlet` + +As an alternative to the `MessageDispatcherServlet`, you can wire up a `MessageDispatcher` in a standard, Spring-Web MVC `DispatcherServlet`. By default, the `DispatcherServlet` can delegate only to `Controllers`, but we can instruct it to delegate to a `MessageDispatcher` by adding a `WebServiceMessageReceiverHandlerAdapter` to the servlet’s web application context: + +``` + + + + + + + + + ... + + + + +``` + +Note that, by explicitly adding the `WebServiceMessageReceiverHandlerAdapter`, the dispatcher servlet does not load the default adapters and is unable to handle standard Spring-MVC `@Controllers`. Therefore, we add the `RequestMappingHandlerAdapter` at the end. + +In a similar fashion, you can wire a `WsdlDefinitionHandlerAdapter` to make sure the `DispatcherServlet` can handle implementations of the `WsdlDefinition` interface: + +``` + + + + + + + + + + myServiceDefinition + + + + + + + + + + + + ... + + +``` + +#### 5.2.3. JMS transport + +Spring Web Services supports server-side JMS handling through the JMS functionality provided in the Spring framework. Spring Web Services provides the `WebServiceMessageListener` to plug in to a `MessageListenerContainer`. This message listener requires a `WebServiceMessageFactory` and `MessageDispatcher` to operate. The following configuration example shows this: + +``` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +``` + +#### 5.2.4. Email Transport + +In addition to HTTP and JMS, Spring Web Services also provides server-side email handling. This functionality is provided through the `MailMessageReceiver` class. This class monitors a POP3 or IMAP folder, converts the email to a `WebServiceMessage`, and sends any response by using SMTP. You can configure the host names through the `storeUri`, which indicates the mail folder to monitor for requests (typically a POP3 or IMAP folder), and a `transportUri`, which indicates the server to use for sending responses (typically an SMTP server). + +You can configure how the `MailMessageReceiver` monitors incoming messages with a pluggable strategy: the `MonitoringStrategy`. By default, a polling strategy is used, where the incoming folder is polled for new messages every five minutes. You can change this interval by setting the `pollingInterval` property on the strategy. By default, all `MonitoringStrategy` implementations delete the handled messages. You can change this setting by setting the `deleteMessages` property. + +As an alternative to the polling approaches, which are quite inefficient, there is a monitoring strategy that uses IMAP IDLE. The IDLE command is an optional expansion of the IMAP email protocol that lets the mail server send new message updates to the `MailMessageReceiver` asynchronously. If you use an IMAP server that supports the IDLE command, you can plug the `ImapIdleMonitoringStrategy` into the `monitoringStrategy` property. In addition to a supporting server, you need to use JavaMail version 1.4.1 or higher. + +The following piece of configuration shows how to use the server-side email support, overriding the default polling interval to check every 30 seconds (30.000 milliseconds): + +``` + + + + + + + + + + + + + + + + + + + + + + + + + + + +``` + +#### 5.2.5. Embedded HTTP Server transport + +Spring Web Services provides a transport based on Sun’s JRE 1.6 [HTTP server](http://java.sun.com/javase/6/docs/jre/api/net/httpserver/spec/index.html). The embedded HTTP Server is a standalone server that is simple to configure. It offers a lighter alternative to conventional servlet containers. + +When using the embedded HTTP server, you need no external deployment descriptor (`web.xml`). You need only define an instance of the server and configure it to handle incoming requests. The remoting module in the Core Spring Framework contains a convenient factory bean for the HTTP server: the `SimpleHttpServerFactoryBean`. The most important property is `contexts`, which maps context paths to corresponding `HttpHandler` instances. + +Spring Web Services provides two implementations of the `HttpHandler` interface: [`WsdlDefinitionHttpHandler`](https://docs.spring.io/spring-ws/docs/current/api/org/springframework/ws/transport/http/WsdlDefinitionHttpHandler.html) and [`WebServiceMessageReceiverHttpHandler`](https://docs.spring.io/spring-ws/docs/current/api/org/springframework/ws/transport/http/WebServiceMessageReceiverHttpHandler.html). The former maps an incoming GET request to a `WsdlDefinition`. The latter is responsible for handling POST requests for web services messages and, thus, needs a `WebServiceMessageFactory` (typically a `SaajSoapMessageFactory`) and a `WebServiceMessageReceiver` (typically the `SoapMessageDispatcher`) to accomplish its task. + +To draw parallels with the servlet world, the `contexts` property plays the role of servlet mappings in `web.xml` and the `WebServiceMessageReceiverHttpHandler` is the equivalent of a `MessageDispatcherServlet`. + +The following snippet shows a configuration example of the HTTP server transport: + +``` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +``` + +For more information on the `SimpleHttpServerFactoryBean`, see the [Javadoc](http://static.springframework.org/spring/docs/2.5.x/api/org/springframework/remoting/support/SimpleHttpServerFactoryBean.html). + +#### 5.2.6. XMPP transport + +Spring Web Services 2.0 introduced support for XMPP, otherwise known as Jabber. The support is based on the [Smack](https://www.igniterealtime.org/projects/smack/index.jsp) library. + +Spring Web Services support for XMPP is very similar to the other transports: There is a a `XmppMessageSender` for the `WebServiceTemplate` and a `XmppMessageReceiver` to use with the `MessageDispatcher`. + +The following example shows how to set up the server-side XMPP components: + +``` + + + + + + + + + + + + + + + + + + + + + + + + + + + +``` + +#### 5.2.7. MTOM + +[MTOM](https://en.wikipedia.org/wiki/Message_Transmission_Optimization_Mechanism) is the mechanism for sending binary data to and from Web Services. You can look at how to implement this with Spring WS through the [MTOM sample](https://github.com/spring-projects/spring-ws-samples/tree/main/mtom). + +### 5.3. Endpoints + +Endpoints are the central concept in Spring-WS’s server-side support. Endpoints provide access to the application behavior, which is typically defined by a business service interface. An endpoint interprets the XML request message and uses that input to (typically) invoke a method on the business service. The result of that service invocation is represented as a response message. Spring-WS has a wide variety of endpoints and uses various ways to handle the XML message and to create a response. + +You can create an endpoint by annotating a class with the `@Endpoint` annotation. In the class, you define one or more methods that handle the incoming XML request, by using a wide variety of parameter types (such as DOM elements, JAXB2 objects, and others). You can indicate the sort of messages a method can handle by using another annotation (typically `@PayloadRoot`). + +Consider the following sample endpoint: + +``` +package samples; + +import org.w3c.dom.Element; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.ws.server.endpoint.annotation.Endpoint; +import org.springframework.ws.server.endpoint.annotation.PayloadRoot; +import org.springframework.ws.soap.SoapHeader; + +@Endpoint (1) +public class AnnotationOrderEndpoint { + + private final OrderService orderService; + + @Autowired (2) + public AnnotationOrderEndpoint(OrderService orderService) { + this.orderService = orderService; + } + + @PayloadRoot(localPart = "order", namespace = "http://samples") (5) + public void order(@RequestPayload Element orderElement) { (3) + Order order = createOrder(orderElement); + orderService.createOrder(order); + } + + @PayloadRoot(localPart = "orderRequest", namespace = "http://samples") (5) + @ResponsePayload + public Order getOrder(@RequestPayload OrderRequest orderRequest, SoapHeader header) { (4) + checkSoapHeaderForSomething(header); + return orderService.getOrder(orderRequest.getId()); + } + + ... + +} +``` + +|**1**| The class is annotated with `@Endpoint`, marking it as a Spring-WS endpoint. | +|-----|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| The constructor is marked with `@Autowired` so that the `OrderService` business service is injected into this endpoint. | +|**3**| The `order` method takes an `Element` (annotated with `@RequestPayload`) as a parameter. This means that the payload of the message is passed on this method as a DOM element. The method has a `void` return type, indicating that no response message is sent.
For more information about endpoint methods, see [`@Endpoint` handling methods](#server-atEndpoint-methods). | +|**4**|The `getOrder` method takes an `OrderRequest` (also annotated with `@RequestPayload`) as a parameter. This parameter is a JAXB2-supported object (it is annotated with `@XmlRootElement`). This means that the payload of the message is passed to this method as a unmarshalled object. The `SoapHeader` type is also given as a parameter. On invocation, this parameter contains the SOAP header of the request message. The method is also annotated with `@ResponsePayload`, indicating that the return value (the `Order`) is used as the payload of the response message.
For more information about endpoint methods, see [`@Endpoint` handling methods](#server-atEndpoint-methods).| +|**5**| The two handling methods of this endpoint are marked with `@PayloadRoot`, indicating what sort of request messages can be handled by the method: the `getOrder` method is invoked for requests with a `orderRequest` local name and a `[http://samples](http://samples)` namespace URI. The order method is invoked for requests with a `order` local name.
For more information about `@PayloadRoot`, see [Endpoint mappings](#server-endpoint-mapping). | + +To enable the support for `@Endpoint` and related Spring-WS annotations, you need to add the following to your Spring application context: + +``` + + + * + + +``` + +Alternatively, if you use `@Configuration` classes instead of Spring XML, you can annotate your configuration class with `@EnableWs`: + +``` +@EnableWs +@Configuration +public class EchoConfig { + + // @Bean definitions go here + +} +``` + +To customize the `@EnableWs` configuration, you can implement `WsConfigurer` or, better yet, extend the `WsConfigurerAdapter`: + +``` +@Configuration +@EnableWs +@ComponentScan(basePackageClasses = { MyConfiguration.class }) +public class MyConfiguration extends WsConfigurerAdapter { + + @Override + public void addInterceptors(List interceptors) { + interceptors.add(new MyInterceptor()); + } + + @Override + public void addArgumentResolvers(List argumentResolvers) { + argumentResolvers.add(new MyArgumentResolver()); + } + + // More overridden methods ... +} +``` + +In the next couple of sections, a more elaborate description of the `@Endpoint` programming model is given. + +| |Endpoints, like any other Spring Bean, are scoped as a singleton by default. That is, one instance of the bean definition is created per container. Being a singleton implies that more than one thread can use it at the same time, so the endpoint has to be thread safe. If you want to use a different scope, such as prototype, see the [Spring Reference documentation](https://docs.spring.io/spring/docs/current/spring-framework-reference/core.html#beans-factory-scopes).| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +``` +Note that all abstract base classes provided in Spring-WS are thread safe, unless otherwise indicated in the class-level Javadoc. +``` + +#### 5.3.1. `@Endpoint` handling methods + +For an endpoint to actually handle incoming XML messages, it needs to have one or more handling methods. Handling methods can take wide range of parameters and return types. However, they typically have one parameter that contains the message payload, and they return the payload of the response message (if any). This section covers which parameter and return types are supported. + +To indicate what sort of messages a method can handle, the method is typically annotated with either the `@PayloadRoot` or the `@SoapAction` annotation. You can learn more about these annotations in [Endpoint mappings](#server-endpoint-mapping). + +The following example shows a handling method: + +``` +@PayloadRoot(localPart = "order", namespace = "http://samples") +public void order(@RequestPayload Element orderElement) { + Order order = createOrder(orderElement); + orderService.createOrder(order); +} +``` + +The `order` method takes an `Element` (annotated with `@RequestPayload`) as a parameter. This means that the payload of the message is passed on this method as a DOM element. The method has a `void` return type, indicating that no response message is sent. + +##### Handling Method Parameters + +The handling method typically has one or more parameters that refer to various parts of the incoming XML message. Most commonly, the handling method has a single parameter that maps to the payload of the message, but it can also map to other parts of the request message, such as a SOAP header. This section describes the parameters you can use in your handling method signatures. + +To map a parameter to the payload of the request message, you need to annotate this parameter with the `@RequestPayload` annotation. This annotation tells Spring-WS that the parameter needs to be bound to the request payload. + +The following table describes the supported parameter types. It shows the supported types, whether the parameter should be annotated with `@RequestPayload`, and any additional notes. + +| Name | Supported parameter types |`@RequestPayload` required?| Additional notes | +|---------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------|-------------------------------------------------------------------------------------| +| TrAX | `javax.xml.transform.Source` and sub-interfaces (`DOMSource`, `SAXSource`, `StreamSource`, and `StAXSource`) | Yes | Enabled by default. | +| W3C DOM | `org.w3c.dom.Element` | Yes | Enabled by default | +| dom4j | `org.dom4j.Element` | Yes | Enabled when dom4j is on the classpath. | +| JDOM | `org.jdom.Element` | Yes | Enabled when JDOM is on the classpath. | +| XOM | `nu.xom.Element` | Yes | Enabled when XOM is on the classpath. | +| StAX | `javax.xml.stream.XMLStreamReader` and `javax.xml.stream.XMLEventReader` | Yes | Enabled when StAX is on the classpath. | +| XPath |Any boolean, double, `String`, `org.w3c.Node`, `org.w3c.dom.NodeList`, or type that can be converted from a `String` by a Spring [conversion service](https://docs.spring.io/spring/docs/current/spring-framework-reference/core.html#core-convert-ConversionService-API), and that is annotated with `@XPathParam`.| No | Enabled by default, see [the section called `XPathParam`](#server-xpath-param). | +|Message context| `org.springframework.ws.context.MessageContext` | No | Enabled by default. | +| SOAP | `org.springframework.ws.soap.SoapMessage`, `org.springframework.ws.soap.SoapBody`, `org.springframework.ws.soap.SoapEnvelope`, `org.springframework.ws.soap.SoapHeader`, and `org.springframework.ws.soap.SoapHeaderElement`s when used in combination with the `@SoapHeader` annotation. | No | Enabled by default. | +| JAXB2 | Any type that is annotated with `javax.xml.bind.annotation.XmlRootElement`, and `javax.xml.bind.JAXBElement`. | Yes | Enabled when JAXB2 is on the classpath. | +| OXM | Any type supported by a Spring OXM [`Unmarshaller`](https://docs.spring.io/spring/docs/current/spring-framework-reference/data-access.html#oxm-marshaller-unmarshaller). | Yes |Enabled when the `unmarshaller` attribute of `` is specified.| + +The next few examples show possible method signatures. The following method is invoked with the payload of the request message as a DOM `org.w3c.dom.Element`: + +``` +public void handle(@RequestPayload Element element) +``` + +The following method is invoked with the payload of the request message as a `javax.xml.transform.dom.DOMSource`. The `header` parameter is bound to the SOAP header of the request message. + +``` +public void handle(@RequestPayload DOMSource domSource, SoapHeader header) +``` + +The following method is invoked with the payload of the request message unmarshalled into a `MyJaxb2Object` (which is annotated with `@XmlRootElement`). The payload of the message is also given as a DOM `Element`. The whole [message context](#message-context) is passed on as the third parameter. + +``` +public void handle(@RequestPayload MyJaxb2Object requestObject, @RequestPayload Element element, Message messageContext) +``` + +As you can see, there are a lot of possibilities when it comes to defining how to handle method signatures. You can even extend this mechanism to support your own parameter types. See the Javadoc of [`DefaultMethodEndpointAdapter`](https://docs.spring.io/spring-ws/docs/current/api/org/springframework/ws/server/endpoint/adapter/DefaultMethodEndpointAdapter.html) and [`MethodArgumentResolver`](https://docs.spring.io/spring-ws/docs/current/api/org/springframework/ws/server/endpoint/adapter/method/MethodArgumentResolver.html) to see how. + +###### `@XPathParam` + +One parameter type needs some extra explanation: `@XPathParam`. The idea here is that you annotate one or more method parameters with an XPath expression and that each such annotated parameter is bound to the evaluation of the expression. The following example shows how to do so: + +``` +package samples; + +import javax.xml.transform.Source; + +import org.springframework.ws.server.endpoint.annotation.Endpoint; +import org.springframework.ws.server.endpoint.annotation.Namespace; +import org.springframework.ws.server.endpoint.annotation.PayloadRoot; +import org.springframework.ws.server.endpoint.annotation.XPathParam; + +@Endpoint +public class AnnotationOrderEndpoint { + + private final OrderService orderService; + + public AnnotationOrderEndpoint(OrderService orderService) { + this.orderService = orderService; + } + + @PayloadRoot(localPart = "orderRequest", namespace = "http://samples") + @Namespace(prefix = "s", uri="http://samples") + public Order getOrder(@XPathParam("/s:orderRequest/@id") int orderId) { + Order order = orderService.getOrder(orderId); + // create Source from order and return it + } + +} +``` + +Since we use the `s` prefix in our XPath expression, we must bind it to the `[http://samples](http://samples)` namespace. This is accomplished with the `@Namespace` annotation. Alternatively, we could have placed this annotation on the type-level to use the same namespace mapping for all handler methods or even the package-level (in `package-info.java`) to use it for multiple endpoints. + +By using the `@XPathParam`, you can bind to all the data types supported by XPath: + +* `boolean` or `Boolean` + +* `double` or `Double` + +* `String` + +* `Node` + +* `NodeList` + +In addition to this list, you can use any type that can be converted from a `String` by a Spring [conversion service](https://docs.spring.io/spring/docs/current/spring-framework-reference/core.html#core-convert-ConversionService-API). + +##### Handling method return types + +To send a response message, the handling needs to specify a return type. If no response message is required, the method can declare a `void` return type. Most commonly, the return type is used to create the payload of the response message. However, you can also map to other parts of the response message. This section describes the return types you can use in your handling method signatures. + +To map the return value to the payload of the response message, you need to annotate the method with the `@ResponsePayload` annotation. This annotation tells Spring-WS that the return value needs to be bound to the response payload. + +The following table describes the supported return types. It shows the supported types, whether the parameter should be annotated with `@ResponsePayload`, and any additional notes. + +| Name | Supported return types |`@ResponsePayload` required?| Additional notes | +|-----------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------|-----------------------------------------------------------------------------------| +|No response| `void` | No | Enabled by default. | +| TrAX | `javax.xml.transform.Source` and sub-interfaces (`DOMSource`, `SAXSource`, `StreamSource`, and `StAXSource`) | Yes | Enabled by default. | +| W3C DOM | `org.w3c.dom.Element` | Yes | Enabled by default | +| dom4j | `org.dom4j.Element` | Yes | Enabled when dom4j is on the classpath. | +| JDOM | `org.jdom.Element` | Yes | Enabled when JDOM is on the classpath. | +| XOM | `nu.xom.Element` | Yes | Enabled when XOM is on the classpath. | +| JAXB2 | Any type that is annotated with `javax.xml.bind.annotation.XmlRootElement`, and `javax.xml.bind.JAXBElement`. | Yes | Enabled when JAXB2 is on the classpath. | +| OXM |Any type supported by a Spring OXM [`Marshaller`](https://docs.spring.io/spring/docs/current/spring-framework-reference/data-access.html#oxm-marshaller-unmarshaller).| Yes |Enabled when the `marshaller` attribute of `` is specified.| + +There are a lot of possibilities when it comes to defining handling method signatures. It is even possible to extend this mechanism to support your own parameter types. See the class-level Javadoc of [`DefaultMethodEndpointAdapter`](https://docs.spring.io/spring-ws/docs/current/api/org/springframework/ws/server/endpoint/adapter/DefaultMethodEndpointAdapter.html) and [`MethodReturnValueHandler`](https://docs.spring.io/spring-ws/docs/current/api/org/springframework/ws/server/endpoint/adapter/method/MethodReturnValueHandler.html) to see how. + +### 5.4. Endpoint mappings + +The endpoint mapping is responsible for mapping incoming messages to appropriate endpoints. Some endpoint mappings are enabled by default — for example, the `PayloadRootAnnotationMethodEndpointMapping` or the `SoapActionAnnotationMethodEndpointMapping`. However, we first need to examine the general concept of an `EndpointMapping`. + +An `EndpointMapping` delivers a `EndpointInvocationChain`, which contains the endpoint that matches the incoming request and may also contain a list of endpoint interceptors that are applied to the request and response. When a request comes in, the `MessageDispatcher` hands it over to the endpoint mapping to let it inspect the request and come up with an appropriate `EndpointInvocationChain`. Then the `MessageDispatcher` invokes the endpoint and any interceptors in the chain. + +The concept of configurable endpoint mappings that can optionally contain interceptors (which can, in turn, manipulate the request, the response, or both) is extremely powerful. A lot of supporting functionality can be built into custom `EndpointMapping` implementations. For example, a custom endpoint mapping could choose an endpoint based not only on the contents of a message but also on a specific SOAP header (or, indeed, multiple SOAP headers). + +Most endpoint mappings inherit from the `AbstractEndpointMapping`, which offers an ‘interceptors’ property, which is the list of interceptors to use. `EndpointInterceptors` are discussed in [Intercepting Requests — the `EndpointInterceptor` Interface](#server-endpoint-interceptor). Additionally, there is the `defaultEndpoint`, which is the default endpoint to use when this endpoint mapping does not result in a matching endpoint. + +As explained in [Endpoints](#server-endpoints), the `@Endpoint` style lets you handle multiple requests in one endpoint class. This is the responsibility of the `MethodEndpointMapping`. This mapping determines which method is to be invoked for an incoming request message. + +There are two endpoint mappings that can direct requests to methods: the `PayloadRootAnnotationMethodEndpointMapping` and the `SoapActionAnnotationMethodEndpointMapping` You can enable both methods by using `` in your application context. + +The `PayloadRootAnnotationMethodEndpointMapping` uses the `@PayloadRoot` annotation, with the `localPart` and `namespace` elements, to mark methods with a particular qualified name. Whenever a message comes in with this qualified name for the payload root element, the method is invoked. For an example, see [above](#server-payload-root-annotation). + +Alternatively, the `SoapActionAnnotationMethodEndpointMapping` uses the `@SoapAction` annotation to mark methods with a particular SOAP Action. Whenever a message comes in with this `SOAPAction` header, the method is invoked. + +#### 5.4.1. WS-Addressing + +WS-Addressing specifies a transport-neutral routing mechanism. It is based on the `To` and `Action` SOAP headers, which indicate the destination and intent of the SOAP message, respectively. Additionally, WS-Addressing lets you define a return address (for normal messages and for faults) and a unique message identifier, which can be used for correlation. For more information on WS-Addressing, see [https://en.wikipedia.org/wiki/WS-Addressing](https://en.wikipedia.org/wiki/WS-Addressing). The following example shows a WS-Addressing message: + +``` + + + urn:uuid:21363e0d-2645-4eb7-8afd-2f5ee1bb25cf + + http://example.com/business/client1 + + http://example/com/fabrikam + http://example.com/fabrikam/mail/Delete + + + + 42 + + + +``` + +In the preceding example, the destination is set to `[http://example/com/fabrikam](http://example/com/fabrikam)`, while the action is set to `[http://example.com/fabrikam/mail/Delete](http://example.com/fabrikam/mail/Delete)`. Additionally, there is a message identifier and a reply-to address. By default, this address is the “anonymous” address, indicating that a response should be sent byusing the same channel as the request (that is, the HTTP response), but it can also be another address, as indicated in this example. + +In Spring Web Services, WS-Addressing is implemented as an endpoint mapping. By using this mapping, you associate WS-Addressing actions with endpoints, similar to the `SoapActionAnnotationMethodEndpointMapping` described earlier. + +##### Using `AnnotationActionEndpointMapping` + +The `AnnotationActionEndpointMapping` is similar to the `SoapActionAnnotationMethodEndpointMapping` but uses WS-Addressing headers instead of the SOAP Action transport header. + +To use the `AnnotationActionEndpointMapping`, annotate the handling methods with the `@Action` annotation, similar to the `@PayloadRoot` and `@SoapAction` annotations described in [`@Endpoint` handling methods](#server-atEndpoint-methods) and [Endpoint mappings](#server-endpoint-mapping). The following example shows how to do so: + +``` +package samples; + +import org.springframework.ws.server.endpoint.annotation.Endpoint; +import org.springframework.ws.soap.addressing.server.annotation.Action + +@Endpoint +public class AnnotationOrderEndpoint { + private final OrderService orderService; + + public AnnotationOrderEndpoint(OrderService orderService) { + this.orderService = orderService; + } + + @Action("http://samples/RequestOrder") + public Order getOrder(OrderRequest orderRequest) { + return orderService.getOrder(orderRequest.getId()); + } + + @Action("http://samples/CreateOrder") + public void order(Order order) { + orderService.createOrder(order); + } + +} +``` + +The preceding mapping routes requests that have a WS-Addressing `Action` of `[http://samples/RequestOrder](http://samples/RequestOrder)` to the `getOrder` method. Requests with `[http://samples/CreateOrder](http://samples/CreateOrder)` are routed to the `order` method.. + +By default, the `AnnotationActionEndpointMapping` supports both the 1.0 (May 2006), and the August 2004 editions of WS-Addressing. These two versions are most popular and are interoperable with Axis 1 and 2, JAX-WS, XFire, Windows Communication Foundation (WCF), and Windows Services Enhancements (WSE) 3.0. If necessary, specific versions of the spec can be injected into the `versions` property. + +In addition to the `@Action` annotation, you can annotate the class with the `@Address` annotation. If set, the value is compared to the `To` header property of the incoming message. + +Finally, there is the `messageSenders` property, which is required for sending response messages to non-anonymous, out-of-bound addresses. You can set `MessageSender` implementations in this property, the same as you would on the `WebServiceTemplate`. See [URIs and Transports](#client-transports). + +#### 5.4.2. Intercepting Requests — the `EndpointInterceptor` Interface + +The endpoint mapping mechanism has the notion of endpoint interceptors. These can be extremely useful when you want to apply specific functionality to certain requests — for example, dealing with security-related SOAP headers or the logging of request and response message. + +Endpoint interceptors are typically defined by using a `` element in your application context. In this element, you can define endpoint interceptor beans that apply to all endpoints defined in that application context. Alternatively, you can use `` or `` elements to specify for which payload root name or SOAP action the interceptor should apply. The following example shows how to do so: + +``` + + + + + + + + + + + + +``` + +In the preceding example, we define one “global” interceptor (`MyGlobalInterceptor`) that intercepts all requests and responses. We also define an interceptor that applies only to XML messages that have the `[http://www.example.com](http://www.example.com)` as a payload root namespace. We could have defined a `localPart` attribute in addition to the `namespaceUri` to further limit the messages the to which interceptor applies. Finally, we define two interceptors that apply when the message has a `[http://www.example.com/SoapAction](http://www.example.com/SoapAction)` SOAP action. Notice how the second interceptor is actually a reference to a bean definition outside of the `` element. You can use bean references anywhere inside the `` element. + +When you use `@Configuration` classes, you can extend from `WsConfigurerAdapter` to add interceptors: + +``` +@Configuration +@EnableWs +public class MyWsConfiguration extends WsConfigurerAdapter { + + @Override + public void addInterceptors(List interceptors) { + interceptors.add(new MyPayloadRootInterceptor()); + } + +} +``` + +Interceptors must implement the `EndpointInterceptor` interface from the `org.springframework.ws.server` package. This interface defines three methods, one that can be used for handling the request message **before** the actual endpoint is processed, one that can be used for handling a normal response message, and one that can be used for handling fault messages. The second two are called **after** the endpoint is processed. These three methods should provide enough flexibility to do all kinds of pre- and post-processing. + +The `handleRequest(..)` method on the interceptor returns a boolean value. You can use this method to interrupt or continue the processing of the invocation chain. When this method returns `true`, the endpoint processing chain will continue. When it returns `false`, the `MessageDispatcher` interprets this to mean that the interceptor itself has taken care of things and does not continue processing the other interceptors and the actual endpoint in the invocation chain. The `handleResponse(..)` and `handleFault(..)` methods also have a boolean return value. When these methods return `false`, the response will not be sent back to the client. + +There are a number of standard `EndpointInterceptor` implementations that you can use in your Web service. Additionally, there is the `XwsSecurityInterceptor`, which is described in [`XwsSecurityInterceptor`](#security-xws-security-interceptor). + +##### `PayloadLoggingInterceptor` and `SoapEnvelopeLoggingInterceptor` + +When developing a web service, it can be useful to log the incoming and outgoing XML messages. Spring WS facilitates this with the `PayloadLoggingInterceptor` and `SoapEnvelopeLoggingInterceptor` classes. The former logs only the payload of the message to the Commons Logging Log. The latter logs the entire SOAP envelope, including SOAP headers. The following example shows how to define the `PayloadLoggingInterceptor` in an endpoint mapping: + +``` + + + +``` + +Both of these interceptors have two properties, `logRequest` and `logResponse`, which can be set to `false` to disable logging for either request or response messages. + +You could use the `WsConfigurerAdapter` approach, as described earlier, for the `PayloadLoggingInterceptor` as well. + +##### `PayloadValidatingInterceptor` + +One of the benefits of using a contract-first development style is that we can use the schema to validate incoming and outgoing XML messages. Spring-WS facilitates this with the `PayloadValidatingInterceptor`. This interceptor requires a reference to one or more W3C XML or RELAX NG schemas and can be set to validate requests, responses, or both. + +| |Note that request validation may sound like a good idea, but it makes the resulting Web service very strict. Usually, it is not really important whether the request validates, only if the endpoint can get sufficient information to fulfill a request. Validating the response is a good idea, because the endpoint should adhere to its schema. Remember Postel’s Law:
"Be conservative in what you do; be liberal in what you accept from others."| +|---|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The following example uses the `PayloadValidatingInterceptor`. In this example, we use the schema in `/WEB-INF/orders.xsd` to validate the response but not the request. Note that the `PayloadValidatingInterceptor` can also accept multiple schemas by setting the `schemas` property. + +``` + + + + + +``` + +Of course, you could use the `WsConfigurerAdapter` approach, as described earlier, for the `PayloadValidatingInterceptor` as well. + +##### Using `PayloadTransformingInterceptor` + +To transform the payload to another XML format, Spring Web Services offers the `PayloadTransformingInterceptor`. This endpoint interceptor is based on XSLT style sheets and is especially useful when supporting multiple versions of a web service, because you can transform the older message format to the newer format. The following example uses the `PayloadTransformingInterceptor`: + +``` + + + + +``` + +In the preceding example, we transform requests by using `/WEB-INF/oldRequests.xslt` and response messages by using `/WEB-INF/oldResponses.xslt`. Note that, since endpoint interceptors are registered at the endpoint-mapping level, you can create an endpoint mapping that applies to the “old style” messages and add the interceptor to that mapping. Hence, the transformation applies only to these “old style” message. + +You could use the `WsConfigurerAdapter` approach, as described earlier, for the `PayloadTransformingInterceptor` as well. + +### 5.5. Handling Exceptions + +Spring-WS provides `EndpointExceptionResolvers` to ease the pain of unexpected exceptions occurring while your message is being processed by an endpoint that matched the request. Endpoint exception resolvers somewhat resemble the exception mappings that can be defined in the web application descriptor `web.xml`. However, they provide a more flexible way to handle exceptions. They provide information about what endpoint was invoked when the exception was thrown. Furthermore, a programmatic way of handling exceptions gives you many more options for how to respond appropriately. Rather than expose the innards of your application by giving an exception and stack trace, you can handle the exception any way you want — for example, by returning a SOAP fault with a specific fault code and string. + +Endpoint exception resolvers are automatically picked up by the `MessageDispatcher`, so no explicit configuration is necessary. + +Besides implementing the `EndpointExceptionResolver` interface, which is only a matter of implementing the `resolveException(MessageContext, endpoint, Exception)` method, you may also use one of the provided implementations. The simplest implementation is the `SimpleSoapExceptionResolver`, which creates a SOAP 1.1 Server or SOAP 1.2 Receiver fault and uses the exception message as the fault string. The `SimpleSoapExceptionResolver` is the default, but it can be overridden by explicitly adding another resolver. + +#### 5.5.1. `SoapFaultMappingExceptionResolver` + +The `SoapFaultMappingExceptionResolver` is a more sophisticated implementation. This resolver lets you take the class name of any exception that might be thrown and map it to a SOAP Fault: + +``` + + + + + + org.springframework.oxm.ValidationFailureException=CLIENT,Invalid request + + + + +``` + +The key values and default endpoint use a format of `faultCode,faultString,locale`, where only the fault code is required. If the fault string is not set, it defaults to the exception message. If the language is not set, it defaults to English. The preceding configuration maps exceptions of type `ValidationFailureException` to a client-side SOAP fault with a fault string of `Invalid request`, as follows: + +``` + + + + SOAP-ENV:Client + Invalid request + + + +``` + +If any other exception occurs, it returns the default fault: a server-side fault with the exception message as the fault string. + +#### 5.5.2. Using `SoapFaultAnnotationExceptionResolver` + +You can also annotate exception classes with the `@SoapFault` annotation, to indicate the SOAP fault that should be returned whenever that exception is thrown. For these annotations to be picked up, you need to add the `SoapFaultAnnotationExceptionResolver` to your application context. The elements of the annotation include a fault code enumeration, fault string or reason, and language. The following example shows such an exception: + +``` +package samples; + +import org.springframework.ws.soap.server.endpoint.annotation.FaultCode; +import org.springframework.ws.soap.server.endpoint.annotation.SoapFault; + +@SoapFault(faultCode = FaultCode.SERVER) +public class MyBusinessException extends Exception { + + public MyClientException(String message) { + super(message); + } +} +``` + +Whenever the `MyBusinessException` is thrown with the constructor string `"Oops!"` during endpoint invocation, it results in the following response: + +``` + + + + SOAP-ENV:Server + Oops! + + + +``` + +### 5.6. Server-side Testing + +When it comes to testing your Web service endpoints, you have two possible approaches: + +* Write Unit Tests, where you provide (mock) arguments for your endpoint to consume. + + The advantage of this approach is that it is quite easy to accomplish (especially for classes annotated with `@Endpoint`). The disadvantage is that you are not really testing the exact content of the XML messages that are sent over the wire. + +* Write Integrations Tests, which do test the contents of the message. + +The first approach can easily be accomplished with mocking frameworks such as EasyMock, JMock, and others. The next section focuses on writing integration tests, using the test features introduced in Spring Web Services 2.0. + +#### 5.6.1. Writing server-side integration tests + +Spring Web Services 2.0 introduced support for creating endpoint integration tests. In this context, an endpoint is a class that handles (SOAP) messages (see [Endpoints](#server-endpoints)). + +The integration test support lives in the `org.springframework.ws.test.server` package. The core class in that package is the `MockWebServiceClient`. The underlying idea is that this client creates a request message and then sends it over to the endpoints that are configured in a standard `MessageDispatcherServlet` application context (see [`MessageDispatcherServlet`](#message-dispatcher-servlet)). These endpoints handle the message and create a response. The client then receives this response and verifies it against registered expectations. + +The typical usage of the `MockWebServiceClient` is: . + +1. Create a `MockWebServiceClient` instance by calling `MockWebServiceClient.createClient(ApplicationContext)` or `MockWebServiceClient.createClient(WebServiceMessageReceiver, WebServiceMessageFactory)`. + +2. Send request messages by calling `sendRequest(RequestCreator)`, possibly by using the default `RequestCreator` implementations provided in `RequestCreators` (which can be statically imported). + +3. Set up response expectations by calling `andExpect(ResponseMatcher)`, possibly by using the default `ResponseMatcher` implementations provided in `ResponseMatchers` (which can be statically imported). Multiple expectations can be set up by chaining `andExpect(ResponseMatcher)` calls. + +| |Note that the `MockWebServiceClient` (and related classes) offers a “fluent” API, so you can typically use the code-completion features in your IDE to guide you through the process of setting up the mock server.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Also note that you can rely on the standard logging features available in Spring Web Services in your unit tests. Sometimes, it might be useful to inspect the request or response message to find out why a particular tests failed. See [Message Logging and Tracing](#logging) for more information.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Consider, for example, the following web service endpoint class: + +``` +import org.springframework.ws.server.endpoint.annotation.Endpoint; +import org.springframework.ws.server.endpoint.annotation.RequestPayload; +import org.springframework.ws.server.endpoint.annotation.ResponsePayload; + +@Endpoint (1) +public class CustomerEndpoint { + + @ResponsePayload (2) + public CustomerCountResponse getCustomerCount( (2) + @RequestPayload CustomerCountRequest request) { (2) + CustomerCountResponse response = new CustomerCountResponse(); + response.setCustomerCount(10); + return response; + } + +} +``` + +|**1**| The `CustomerEndpoint` in annotated with `@Endpoint`. See [Endpoints](#server-endpoints). | +|-----|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**|The `getCustomerCount()` method takes a `CustomerCountRequest` as its argument and returns a `CustomerCountResponse`. Both of these classes are objects supported by a marshaller. For instance, they can have a `@XmlRootElement` annotation to be supported by JAXB2.| + +The following example shows a typical test for `CustomerEndpoint`: + +``` +import javax.xml.transform.Source; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.context.ApplicationContext; +import org.springframework.test.context.ContextConfiguration; +import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; +import org.springframework.xml.transform.StringSource; + +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; + +import org.springframework.ws.test.server.MockWebServiceClient; (1) +import static org.springframework.ws.test.server.RequestCreators.*; (1) +import static org.springframework.ws.test.server.ResponseMatchers.*; (1) + +@RunWith(SpringJUnit4ClassRunner.class) (2) +@ContextConfiguration("spring-ws-servlet.xml") (2) +public class CustomerEndpointIntegrationTest { + + @Autowired + private ApplicationContext applicationContext; (3) + + private MockWebServiceClient mockClient; + + @Before + public void createClient() { + mockClient = MockWebServiceClient.createClient(applicationContext); (4) + } + + @Test + public void customerEndpoint() throws Exception { + Source requestPayload = new StringSource( + "" + + "John Doe" + + ""); + Source responsePayload = new StringSource( + "" + + "10" + + ""); + + mockClient.sendRequest(withPayload(requestPayload)). (5) + andExpect(payload(responsePayload)); (5) + } +} +``` + +|**1**| The `CustomerEndpointIntegrationTest` imports the `MockWebServiceClient` and statically imports `RequestCreators` and `ResponseMatchers`. | +|-----|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| This test uses the standard testing facilities provided in the Spring Framework. This is not required but is generally the easiest way to set up the test. | +|**3**| The application context is a standard Spring-WS application context (see [`MessageDispatcherServlet`](#message-dispatcher-servlet)), read from `spring-ws-servlet.xml`. In this case, the application context contains a bean definition for `CustomerEndpoint` (or perhaps a `` is used). | +|**4**| In a `@Before` method, we create a `MockWebServiceClient` by using the `createClient` factory method. | +|**5**|We send a request by calling `sendRequest()` with a `withPayload()` `RequestCreator` provided by the statically imported `RequestCreators` (see [Using `RequestCreator` and `RequestCreators`](#server-test-request-creator)).

We also set up response expectations by calling `andExpect()` with a `payload()` `ResponseMatcher` provided by the statically imported `ResponseMatchers` (see [Using `ResponseMatcher` and `ResponseMatchers`](#server-test-response-matcher)).

This part of the test might look a bit confusing, but the code completion features of your IDE are of great help. After typing `sendRequest(`, your IDE can provide you with a list of possible request creating strategies, provided you statically imported `RequestCreators`. The same applies to `andExpect()`, provided you statically imported `ResponseMatchers`.| + +#### 5.6.2. Using `RequestCreator` and `RequestCreators` + +Initially, the `MockWebServiceClient` needs to create a request message for the endpoint to consume. The client uses the `RequestCreator` strategy interface for this purpose: + +``` +public interface RequestCreator { + + WebServiceMessage createRequest(WebServiceMessageFactory messageFactory) + throws IOException; + +} +``` + +You can write your own implementations of this interface, creating a request message by using the message factory, but you certainly do not have to. The `RequestCreators` class provides a way to create a `RequestCreator` based on a given payload in the `withPayload()` method. You typically statically import `RequestCreators`. + +#### 5.6.3. Using `ResponseMatcher` and `ResponseMatchers` + +When the request message has been processed by the endpoint and a response has been received, the `MockWebServiceClient` can verify whether this response message meets certain expectations. The client uses the `ResponseMatcher` strategy interface for this purpose: + +``` +public interface ResponseMatcher { + + void match(WebServiceMessage request, + WebServiceMessage response) + throws IOException, AssertionError; + +} +``` + +Once again, you can write your own implementations of this interface, throwing `AssertionError` instances when the message does not meet your expectations, but you certainly do not have to, as the `ResponseMatchers` class provides standard `ResponseMatcher` implementations for you to use in your tests. You typically statically import this class. + +The `ResponseMatchers` class provides the following response matchers: + +| `ResponseMatchers` method | Description | +|---------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------| +| `payload()` | Expects a given response payload. | +| `validPayload()` | Expects the response payload to validate against given XSD schemas. | +| `xpath()` |Expects a given XPath expression to exist, not exist, or evaluate to a given value.| +| `soapHeader()` | Expects a given SOAP header to exist in the response message. | +| `noFault()` | Expects that the response message does not contain a SOAP Fault. | +|`mustUnderstandFault()`, `clientOrSenderFault()`, `serverOrReceiverFault()`, and `versionMismatchFault()`| Expects the response message to contain a specific SOAP Fault. | + +You can set up multiple response expectations by chaining `andExpect()` calls: + +``` +mockClient.sendRequest(...). + andExpect(payload(expectedResponsePayload)). + andExpect(validPayload(schemaResource)); +``` + +For more information on the response matchers provided by `ResponseMatchers`, see the [Javadoc](https://docs.spring.io/spring-ws/docs/current/api/org/springframework/ws/test/server/ResponseMatchers.html). + +## 6. Using Spring Web Services on the Client + +Spring-WS provides a client-side Web service API that allows for consistent, XML-driven access to web services. It also caters to the use of marshallers and unmarshallers so that your service-tier code can deal exclusively with Java objects. + +The `org.springframework.ws.client.core` package provides the core functionality for using the client-side access API. It contains template classes that simplify the use of Web services, much like the core Spring `JdbcTemplate` does for JDBC. The design principle common to Spring template classes is to provide helper methods to perform common operations and, for more sophisticated usage, delegate to user implemented callback interfaces. The web service template follows the same design. The classes offer various convenience methods for + +* Sending and receiving of XML messages + +* Marshalling objects to XML before sending + +* Allowing for multiple transport options + +### 6.1. Using the Client-side API + +This section describs how to use the client-side API. For how to use the server-side API, see [Creating a Web service with Spring-WS](#server). + +#### 6.1.1. `WebServiceTemplate` + +The `WebServiceTemplate` is the core class for client-side web service access in Spring-WS. It contains methods for sending `Source` objects and receiving response messages as either `Source` or `Result`. Additionally, it can marshal objects to XML before sending them across a transport and unmarshal any response XML into an object again. + +##### URIs and Transports + +The `WebServiceTemplate` class uses an URI as the message destination. You can either set a `defaultUri` property on the template itself or explicitly supply a URI when calling a method on the template. The URI is resolved into a `WebServiceMessageSender`, which is responsible for sending the XML message across a transport layer. You can set one or more message senders by using the `messageSender` or `messageSenders` properties of the `WebServiceTemplate` class. + +###### HTTP transports + +There are two implementations of the `WebServiceMessageSender` interface for sending messages over HTTP. The default implementation is the `HttpUrlConnectionMessageSender`, which uses the facilities provided by Java itself. The alternative is the `HttpComponentsMessageSender`, which uses the [Apache HttpComponents HttpClient](https://hc.apache.org/httpcomponents-client-ga). Use the latter if you need more advanced and easy-to-use functionality (such as authentication, HTTP connection pooling, and so forth). + +To use the HTTP transport, either set the `defaultUri` to something like `[http://example.com/services](http://example.com/services)` or supply the `uri` parameter for one of the methods. + +The following example shows how to use default configuration for HTTP transports: + +``` + + + + + + + + + + +``` + +The following example shows how to override the default configuration and how to use Apache HttpClient to authenticate with HTTP authentication: + +``` + + + + + + + + + + + + + +``` + +###### JMS transport + +For sending messages over JMS, Spring Web Services provides `JmsMessageSender`. This class uses the facilities of the Spring framework to transform the `WebServiceMessage` into a JMS `Message`, send it on its way on a `Queue` or `Topic`, and receive a response (if any). + +To use `JmsMessageSender`, you need to set the `defaultUri` or `uri` parameter to a JMS URI, which — at a minimum — consists of the `jms:` prefix and a destination name. Some examples of JMS URIs are: `jms:SomeQueue`, `jms:SomeTopic?priority=3&deliveryMode=NON_PERSISTENT`, and `jms:RequestQueue?replyToName=ResponseName`. For more information on this URI syntax, see the [Javadoc for `JmsMessageSender`](https://docs.spring.io/spring-ws/docs/current/api/org/springframework/ws/transport/jms/JmsMessageSender.html). + +By default, the `JmsMessageSender` sends JMS `BytesMessage`, but you can override this to use `TextMessages` by using the `messageType` parameter on the JMS URI — for example, `jms:Queue?messageType=TEXT_MESSAGE`. Note that `BytesMessages` are the preferred type, because `TextMessages` do not support attachments and character encodings reliably. + +The following example shows how to use the JMS transport in combination with an ActiveMQ connection factory: + +``` + + + + + + + + + + + + + + + + + + + +``` + +###### Email Transport + +Spring Web Services also provides an email transport, which you can use to send web service messages over SMTP and retrieve them over either POP3 or IMAP. The client-side email functionality is contained in the `MailMessageSender` class. This class creates an email message from the request `WebServiceMessage` and sends it over SMTP. It then waits for a response message to arrive at the incoming POP3 or IMAP server. + +To use the `MailMessageSender`, set the `defaultUri` or `uri` parameter to a `mailto` URI — for example, `mailto:[[email protected]](/cdn-cgi/l/email-protection)` or `mailto:[[email protected]](/cdn-cgi/l/email-protection)?subject=SOAP%20Test`. Make sure that the message sender is properly configured with a `transportUri`, which indicates the server to use for sending requests (typically a SMTP server), and a `storeUri`, which indicates the server to poll for responses (typically a POP3 or IMAP server). + +The following example shows how to use the email transport: + +``` + + + + + + + + + + + + + + + + + +``` + +###### XMPP Transport + +Spring Web Services 2.0 introduced an XMPP (Jabber) transport, which you can use to send and receive web service messages over XMPP. The client-side XMPP functionality is contained in the `XmppMessageSender` class. This class creates an XMPP message from the request `WebServiceMessage` and sends it over XMPP. It then listens for a response message to arrive. + +To use the `XmppMessageSender`, set the `defaultUri` or `uri` parameter to a `xmpp` URI — for example, `xmpp:[[email protected]](/cdn-cgi/l/email-protection)`. The sender also requires an `XMPPConnection` to work, which can be conveniently created by using the `org.springframework.ws.transport.xmpp.support.XmppConnectionFactoryBean`. + +The following example shows how to use the XMPP transport: + +``` + + + + + + + + + + + + + + + + + + + + + +``` + +##### Message factories + +In addition to a message sender, the `WebServiceTemplate` requires a web service message factory. There are two message factories for SOAP: `SaajSoapMessageFactory` and `AxiomSoapMessageFactory`. If no message factory is specified (by setting the `messageFactory` property), Spring-WS uses the `SaajSoapMessageFactory` by default. + +#### 6.1.2. Sending and Receiving a `WebServiceMessage` + +The `WebServiceTemplate` contains many convenience methods to send and receive web service messages. There are methods that accept and return a `Source` and those that return a `Result`. Additionally, there are methods that marshal and unmarshal objects to XML. The following example sends a simple XML message to a web service: + +``` +import java.io.StringReader; +import javax.xml.transform.stream.StreamResult; +import javax.xml.transform.stream.StreamSource; + +import org.springframework.ws.WebServiceMessageFactory; +import org.springframework.ws.client.core.WebServiceTemplate; +import org.springframework.ws.transport.WebServiceMessageSender; + +public class WebServiceClient { + + private static final String MESSAGE = + "Hello, Web Service World"; + + private final WebServiceTemplate webServiceTemplate = new WebServiceTemplate(); + + public void setDefaultUri(String defaultUri) { + webServiceTemplate.setDefaultUri(defaultUri); + } + + // send to the configured default URI + public void simpleSendAndReceive() { + StreamSource source = new StreamSource(new StringReader(MESSAGE)); + StreamResult result = new StreamResult(System.out); + webServiceTemplate.sendSourceAndReceiveToResult(source, result); + } + + // send to an explicit URI + public void customSendAndReceive() { + StreamSource source = new StreamSource(new StringReader(MESSAGE)); + StreamResult result = new StreamResult(System.out); + webServiceTemplate.sendSourceAndReceiveToResult("http://localhost:8080/AnotherWebService", + source, result); + } + +} +``` + +``` + + + + + + + +``` + +The preceding example uses the `WebServiceTemplate` to send a “Hello, World” message to the web service located at `[http://localhost:8080/WebService](http://localhost:8080/WebService)` (in the case of the `simpleSendAndReceive()` method) and writes the result to the console. The `WebServiceTemplate` is injected with the default URI, which is used because no URI was supplied explicitly in the Java code. + +Note that the `WebServiceTemplate` class is thread-safe once configured (assuming that all of its dependencies are also thread-safe, which is the case for all of the dependencies that ship with Spring-WS), so multiple objects can use the same shared `WebServiceTemplate` instance. The `WebServiceTemplate` exposes a zero-argument constructor and `messageFactory` and `messageSender` bean properties that you can use to construct the instance (by using a Spring container or plain Java code). Alternatively, consider deriving from Spring-WS’s `WebServiceGatewaySupport` convenience base class, which exposes convenient bean properties to enable easy configuration. (You do not have to extend this base class. It is provided as a convenience class only.) + +#### 6.1.3. Sending and Receiving POJOs — Marshalling and Unmarshalling + +To facilitate the sending of plain Java objects, the `WebServiceTemplate` has a number of `send(..)` methods that take an `Object` as an argument for a message’s data content. The method `marshalSendAndReceive(..)` in the `WebServiceTemplate` class delegates the conversion of the request object to XML to a `Marshaller` and the conversion of the response XML to an object to an `Unmarshaller`. (For more information about marshalling and unmarshaller, see [the Spring Framework reference documentation](https://docs.spring.io/spring/docs/current/spring-framework-reference/data-access.html#oxm-marshaller-unmarshaller).) By using the marshallers, your application code can focus on the business object that is being sent or received and not be concerned with the details of how it is represented as XML. To use the marshalling functionality, you have to set a marshaller and an unmarshaller with the `marshaller` and `unmarshaller` properties of the `WebServiceTemplate` class. + +#### 6.1.4. Using `WebServiceMessageCallback` + +To accommodate setting SOAP headers and other settings on the message, the `WebServiceMessageCallback` interface gives you access to the message after it has been created but before it is sent. The following example demonstrates how to set the SOAP action header on a message that is created by marshalling an object: + +``` +public void marshalWithSoapActionHeader(MyObject o) { + + webServiceTemplate.marshalSendAndReceive(o, new WebServiceMessageCallback() { + + public void doWithMessage(WebServiceMessage message) { + ((SoapMessage)message).setSoapAction("http://tempuri.org/Action"); + } + }); +} +``` + +| |Note that you can also use the `org.springframework.ws.soap.client.core.SoapActionCallback` to set the SOAP action header.| +|---|--------------------------------------------------------------------------------------------------------------------------| + +##### WS-Addressing + +In addition to the [server-side WS-Addressing](#server-ws-addressing) support, Spring Web Services also has support for this specification on the client-side. + +For setting WS-Addressing headers on the client, you can use `org.springframework.ws.soap.addressing.client.ActionCallback`. This callback takes the desired action header as a parameter. It also has constructors for specifying the WS-Addressing version and a `To` header. If not specified, the `To` header defaults to the URL of the connection being made. + +The following example sets the `Action` header to `[http://samples/RequestOrder](http://samples/RequestOrder)`: + +``` +webServiceTemplate.marshalSendAndReceive(o, new ActionCallback("http://samples/RequestOrder")); +``` + +#### 6.1.5. Using `WebServiceMessageExtractor` + +The `WebServiceMessageExtractor` interface is a low-level callback interface that you have full control over the process to extract an `Object` from a received `WebServiceMessage`. The `WebServiceTemplate` invokes the `extractData(..)` method on a supplied `WebServiceMessageExtractor` while the underlying connection to the serving resource is still open. The following example shows the `WebServiceMessageExtractor` in action: + +``` +public void marshalWithSoapActionHeader(final Source s) { + final Transformer transformer = transformerFactory.newTransformer(); + webServiceTemplate.sendAndReceive(new WebServiceMessageCallback() { + public void doWithMessage(WebServiceMessage message) { + transformer.transform(s, message.getPayloadResult()); + }, + new WebServiceMessageExtractor() { + public Object extractData(WebServiceMessage message) throws IOException { + // do your own transforms with message.getPayloadResult() + // or message.getPayloadSource() + } + } + }); +} +``` + +### 6.2. Client-side Testing + +When it comes to testing your Web service clients (that is, classes that use the `WebServiceTemplate` to access a Web service), you have two possible approaches: + +* Write unit tests, which mock away the `WebServiceTemplate` class, `WebServiceOperations` interface, or the complete client class. + + The advantage of this approach is that it s easy to accomplish. The disadvantage is that you are not really testing the exact content of the XML messages that are sent over the wire, especially when mocking out the entire client class. + +* Write integrations tests, which do test the contents of the message. + +The first approach can easily be accomplished with mocking frameworks, such as EasyMock, JMock, and others. The next section focuses on writing integration tests, using the test features introduced in Spring Web Services 2.0. + +#### 6.2.1. Writing Client-side Integration Tests + +Spring Web Services 2.0 introduced support for creating Web service client integration tests. In this context, a client is a class that uses the `WebServiceTemplate` to access a web service. + +The integration test support lives in the `org.springframework.ws.test.client` package. The core class in that package is the `MockWebServiceServer`. The underlying idea is that the web service template connects to this mock server and sends it a request message, which the mock server then verifies against the registered expectations. If the expectations are met, the mock server then prepares a response message, which is sent back to the template. + +The typical usage of the `MockWebServiceServer` is: . + +1. Create a `MockWebServiceServer` instance by calling `MockWebServiceServer.createServer(WebServiceTemplate)`, `MockWebServiceServer.createServer(WebServiceGatewaySupport)`, or `MockWebServiceServer.createServer(ApplicationContext)`. + +2. Set up request expectations by calling `expect(RequestMatcher)`, possibly by using the default `RequestMatcher` implementations provided in `RequestMatchers` (which can be statically imported). Multiple expectations can be set up by chaining `andExpect(RequestMatcher)` calls. + +3. Create an appropriate response message by calling `andRespond(ResponseCreator)`, possibly by using the default `ResponseCreator` implementations provided in `ResponseCreators` (which can be statically imported). + +4. Use the `WebServiceTemplate` as normal, either directly of through client code. + +5. Call `MockWebServiceServer.verify()` to make sure that all expectations have been met. + +| |Note that the `MockWebServiceServer` (and related classes) offers a 'fluent' API, so you can typically use the code-completion features in your IDE to guide you through the process of setting up the mock server.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +| |Also note that you can rely on the standard logging features available in Spring Web Services in your unit tests. Sometimes, it might be useful to inspect the request or response message to find out why a particular tests failed. See [Message Logging and Tracing](#logging) for more information.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +Consider, for example, the following Web service client class: + +``` +import org.springframework.ws.client.core.support.WebServiceGatewaySupport; + +public class CustomerClient extends WebServiceGatewaySupport { (1) + + public int getCustomerCount() { + CustomerCountRequest request = new CustomerCountRequest(); (2) + request.setCustomerName("John Doe"); + + CustomerCountResponse response = + (CustomerCountResponse) getWebServiceTemplate().marshalSendAndReceive(request); (3) + + return response.getCustomerCount(); + } + +} +``` + +|**1**| The `CustomerClient` extends `WebServiceGatewaySupport`, which provides it with a `webServiceTemplate` property. | +|-----|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| `CustomerCountRequest` is an object supported by a marshaller. For instance, it can have an `@XmlRootElement` annotation to be supported by JAXB2. | +|**3**|The `CustomerClient` uses the `WebServiceTemplate` offered by `WebServiceGatewaySupport` to marshal the request object into a SOAP message and sends that to the web service. The response object is unmarshalled into a `CustomerCountResponse`.| + +The following example shows a typical test for `CustomerClient`: + +``` +import javax.xml.transform.Source; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.test.context.ContextConfiguration; +import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; +import org.springframework.xml.transform.StringSource; + +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; + +import static org.junit.Assert.assertEquals; + +import org.springframework.ws.test.client.MockWebServiceServer; (1) +import static org.springframework.ws.test.client.RequestMatchers.*; (1) +import static org.springframework.ws.test.client.ResponseCreators.*; (1) + +@RunWith(SpringJUnit4ClassRunner.class) (2) +@ContextConfiguration("integration-test.xml") (2) +public class CustomerClientIntegrationTest { + + @Autowired + private CustomerClient client; (3) + + private MockWebServiceServer mockServer; (4) + + @Before + public void createServer() throws Exception { + mockServer = MockWebServiceServer.createServer(client); + } + + @Test + public void customerClient() throws Exception { + Source requestPayload = new StringSource( + "" + + "John Doe" + + ""); + Source responsePayload = new StringSource( + "" + + "10" + + ""); + + mockServer.expect(payload(requestPayload)).andRespond(withPayload(responsePayload));(5) + + int result = client.getCustomerCount(); (6) + assertEquals(10, result); (6) + + mockServer.verify(); (7) + } + +} +``` + +|**1**| The `CustomerClientIntegrationTest` imports the `MockWebServiceServer` and statically imports `RequestMatchers` and `ResponseCreators`. | +|-----|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|**2**| This test uses the standard testing facilities provided in the Spring Framework. This is not required but is generally the easiest way to set up the test. | +|**3**| The `CustomerClient` is configured in `integration-test.xml` and wired into this test using `@Autowired`. | +|**4**| In a `@Before` method, we create a `MockWebServiceServer` by using the `createServer` factory method. | +|**5**|We define expectations by calling `expect()` with a `payload()` `RequestMatcher` provided by the statically imported `RequestMatchers` (see [Using `RequestMatcher` and `RequestMatchers`](#client-test-request-matcher)).

We also set up a response by calling `andRespond()` with a `withPayload()` `ResponseCreator` provided by the statically imported `ResponseCreators` (see [Using `ResponseCreator` and `ResponseCreators`](#client-test-response-creator)).

This part of the test might look a bit confusing, but the code-completion features of your IDE are of great help. After you type `expect(`, your IDE can provide you with a list of possible request matching strategies, provided you statically imported `RequestMatchers`. The same applies to `andRespond(`, provided you statically imported `ResponseCreators`.| +|**6**| We call `getCustomerCount()` on the `CustomerClient`, thus using the `WebServiceTemplate`. The template has been set up for “testing mode” by now, so no real (HTTP) connection is made by this method call. We also make some JUnit assertions based on the result of the method call. | +|**7**| We call `verify()` on the `MockWebServiceServer`, verifying that the expected message was actually received. | + +#### 6.2.2. Using `RequestMatcher` and `RequestMatchers` + +To verify whether the request message meets certain expectations, the `MockWebServiceServer` uses the `RequestMatcher` strategy interface. The contract defined by this interface is as follows: + +``` +public interface RequestMatcher { + + void match(URI uri, + WebServiceMessage request) + throws IOException, + AssertionError; +} +``` + +You can write your own implementations of this interface, throwing `AssertionError` exceptions when the message does not meet your expectations, but you certainly do not have to. The `RequestMatchers` class provides standard `RequestMatcher` implementations for you to use in your tests. You typically statically import this class. + +The `RequestMatchers` class provides the following request matchers: + +|`RequestMatchers` method| Description | +|------------------------|-----------------------------------------------------------------------------------| +| `anything()` | Expects any sort of request. | +| `payload()` | Expects a given request payload. | +| `validPayload()` | Expects the request payload to validate against given XSD schemas. | +| `xpath()` |Expects a given XPath expression to exist, not exist, or evaluate to a given value.| +| `soapHeader()` | Expects a given SOAP header to exist in the request message. | +| `connectionTo()` | Expects a connection to the given URL. | + +You can set up multiple request expectations by chaining `andExpect()` calls: + +``` +mockServer.expect(connectionTo("http://example.com")). + andExpect(payload(expectedRequestPayload)). + andExpect(validPayload(schemaResource)). + andRespond(...); +``` + +For more information on the request matchers provided by `RequestMatchers`, see the [Javadoc](https://docs.spring.io/spring-ws/docs/current/api/org/springframework/ws/test/client/RequestMatchers.html). + +#### 6.2.3. Using `ResponseCreator` and `ResponseCreators` + +When the request message has been verified and meets the defined expectations, the `MockWebServiceServer` creates a response message for the `WebServiceTemplate` to consume. The server uses the `ResponseCreator` strategy interface for this purpose: + +``` +public interface ResponseCreator { + + WebServiceMessage createResponse(URI uri, + WebServiceMessage request, + WebServiceMessageFactory messageFactory) + throws IOException; + +} +``` + +Once again, you can write your own implementations of this interface, creating a response message by using the message factory, but you certainly do not have to, as the `ResponseCreators` class provides standard `ResponseCreator` implementations for you to use in your tests. You typically statically import this class. + +The `ResponseCreators` class provides the following responses: + +| `ResponseCreators` method | Description | +|------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------| +| `withPayload()` | Creates a response message with a given payload. | +| `withError()` | Creates an error in the response connection. This method gives you the opportunity to test your error handling. | +| `withException()` |Throws an exception when reading from the response connection. This method gives you the opportunity to test your exception handling.| +|`withMustUnderstandFault()`, `withClientOrSenderFault()`, `withServerOrReceiverFault()`, or `withVersionMismatchFault()`| Creates a response message with a given SOAP fault. This method gives you the opportunity to test your Fault handling. | + +For more information on the request matchers provided by `RequestMatchers`, see the [Javadoc](https://docs.spring.io/spring-ws/docs/current/api/org/springframework/ws/test/client/RequestMatchers.html). + +## 7. Securing Your Web services with Spring-WS + +This chapter explains how to add WS-Security aspects to your Web services. We focus on the three different areas of WS-Security: + +* **Authentication**: This is the process of determining whether a principal is who they claim to be. In this context, a “principal” generally means a user, device or some other system that can perform an action in your application. + +* **Digital signatures**: The digital signature of a message is a piece of information based on both the document and the signer’s private key. It is created through the use of a hash function and a private signing function (encrypting with the signer’s private key). + +* **Encryption and Decryption**: Encryption is the process of transforming data into a form that is impossible to read without the appropriate key. It is mainly used to keep information hidden from anyone for whom it is not intended. Decryption is the reverse of encryption. It is the process of transforming encrypted data back into an readable form. + +These three areas are implemented by using the `XwsSecurityInterceptor` or `Wss4jSecurityInterceptor`, which we describe in [`XwsSecurityInterceptor`](#security-xws-security-interceptor) and [Using `Wss4jSecurityInterceptor`](#security-wss4j-security-interceptor), respectively + +| |Note that WS-Security (especially encryption and signing) requires substantial amounts of memory and can decrease performance. If performance is important to you, you might want to consider not using WS-Security or using HTTP-based security.| +|---|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +### 7.1. `XwsSecurityInterceptor` + +The `XwsSecurityInterceptor` is an `EndpointInterceptor` (see [Intercepting Requests — the `EndpointInterceptor` Interface](#server-endpoint-interceptor)) that is based on SUN’s XML and Web Services Security package (XWSS). This WS-Security implementation is part of the Java Web Services Developer Pack ([Java WSDP](http://java.sun.com/webservices/)). + +Like any other endpoint interceptor, it is defined in the endpoint mapping (see [Endpoint mappings](#server-endpoint-mapping)). This means that you can be selective about adding WS-Security support. Some endpoint mappings require it, while others do not. + +| |Note that XWSS requires both a SUN 1.5 JDK and the SUN SAAJ reference implementation. The WSS4J interceptor does not have these requirements (see [Using `Wss4jSecurityInterceptor`](#security-wss4j-security-interceptor)).| +|---|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +The `XwsSecurityInterceptor` requires a security policy file to operate. This XML file tells the interceptor what security aspects to require from incoming SOAP messages and what aspects to add to outgoing messages. The basic format of the policy file is explained in the following sections, but you can find a more in-depth tutorial [here](http://java.sun.com/webservices/docs/1.6/tutorial/doc/XWS-SecurityIntro4.html#wp564887). You can set the policy with the `policyConfiguration` property, which requires a Spring resource. The policy file can contain multiple elements — for example, require a username token on incoming messages and sign all outgoing messages. It contains a `SecurityConfiguration` element (not a `JAXRPCSecurity` element) as its root. + +Additionally, the security interceptor requires one or more `CallbackHandler` instances to operate. These handlers are used to retrieve certificates, private keys, validate user credentials, and so on. Spring-WS offers handlers for most common security concerns — for example, authenticating against a Spring Security authentication manager and signing outgoing messages based on a X509 certificate. The following sections indicate what callback handler to use for which security concern. You can set the callback handlers by using the `callbackHandler` or `callbackHandlers` property. + +The following example that shows how to wire up the `XwsSecurityInterceptor`: + +``` + + + + + + + + + + + ... + +``` + +This interceptor is configured by using the `securityPolicy.xml` file on the classpath. It uses two callback handlers that are defined later in the file. + +#### 7.1.1. Keystores + +For most cryptographic operations, you an use the standard `java.security.KeyStore` objects. These operations include certificate verification, message signing, signature verification, and encryption. They exclude username and time-stamp verification. This section aims to give you some background knowledge on keystores and the Java tools that you can use to store keys and certificates in a keystore file. This information is mostly not related to Spring-WS but to the general cryptographic features of Java. + +The `java.security.KeyStore` class represents a storage facility for cryptographic keys and certificates. It can contain three different sort of elements: + +* **Private Keys**: These keys are used for self-authentication. The private key is accompanied by a certificate chain for the corresponding public key. Within the field of WS-Security, this accounts for message signing and message decryption. + +* **Symmetric Keys**: Symmetric (or secret) keys are also used for message encryption and decryption — the difference being that both sides (sender and recipient) share the same secret key. + +* **Trusted certificates**: These X509 certificates are called a “trusted certificate” because the keystore owner trusts that the public key in the certificates does indeed belong to the owner of the certificate. Within WS-Security, these certificates are used for certificate validation, signature verification, and encryption. + +##### Using `keytool` + +The `keytool` program, a key and certificate management utility, is supplied with your Java Virtual Machine. You can use this tool to create new keystores, add new private keys and certificates to them, and so on. It is beyond the scope of this document to provide a full reference of the `keytool` command, but you can find a reference [here](http://java.sun.com/j2se/1.5.0/docs/tooldocs/windows/keytool.html) or by using the `keytool -help` command on the command line. + +##### Using `KeyStoreFactoryBean` + +To easily load a keystore by using Spring configuration, you can use the `KeyStoreFactoryBean`. It has a resource location property, which you can set to point to the path of the keystore to load. A password may be given to check the integrity of the keystore data. If a password is not given, integrity checking is not performed. The following listing configures a `KeyStoreFactoryBean`: + +``` + + + + +``` + +| |If you do not specify the location property, a new, empty keystore is created, which is most likely not what you want.| +|---|----------------------------------------------------------------------------------------------------------------------| + +##### KeyStoreCallbackHandler + +To use the keystores within a `XwsSecurityInterceptor`, you need to define a `KeyStoreCallbackHandler`. This callback has three properties with type `keystore`: (`keyStore`,`trustStore`, and `symmetricStore`). The exact stores used by the handler depend on the cryptographic operations that are to be performed by this handler. For private key operation, the `keyStore` is used. For symmetric key operations, the `symmetricStore` is used. For determining trust relationships, the `trustStore` is used. The following table indicates this: + +| Cryptographic operation | Keystore used | +|------------------------------------------|-----------------------------------| +| Certificate validation |First `keyStore`, then `trustStore`| +| Decryption based on private key | `keyStore` | +| Decryption based on symmetric key | `symmetricStore` | +|Encryption based on public key certificate| `trustStore` | +| Encryption based on symmetric key | `symmetricStore` | +| Signing | `keyStore` | +| Signature verification | `trustStore` | + +Additionally, the `KeyStoreCallbackHandler` has a `privateKeyPassword` property, which should be set to unlock the private keys contained in the`keyStore`. + +If the `symmetricStore` is not set, it defaults to the `keyStore`. If the key or trust store is not set, the callback handler uses the standard Java mechanism to load or create it. See the JavaDoc of the `KeyStoreCallbackHandler` to know how this mechanism works. + +For instance, if you want to use the `KeyStoreCallbackHandler` to validate incoming certificates or signatures, you can use a trust store: + +``` + + + + + + + + + + +``` + +If you want to use it to decrypt incoming certificates or sign outgoing messages, you can use a key store: + +``` + + + + + + + + + + + +``` + +The following sections indicate where the `KeyStoreCallbackHandler` can be used and which properties to set for particular cryptographic operations. + +#### 7.1.2. Authentication + +As stated in the [introduction to this chapter](#security), authentication is the task of determining whether a principal is who they claim to be. Within WS-Security, authentication can take two forms: using a username and password token (using either a plain text password or a password digest) or using a X509 certificate. + +##### Plain Text Username Authentication + +The simplest form of username authentication uses plain text passwords. In this scenario, the SOAP message contains a `UsernameToken` element, which itself contains a `Username` element and a `Password` element which contains the plain text password. Plain text authentication can be compared to the basic authentication provided by HTTP servers. + +| |Note that plain text passwords are not very secure. Therefore, you should always add additional security measures to your transport layer if you use them (using HTTPS instead of plain HTTP, for instance).| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +To require that every incoming message contains a `UsernameToken` with a plain text password, the security policy file should contain a `RequireUsernameToken` element, with the `passwordDigestRequired` attribute set to `false`. You can find a reference of possible child elements [here](http://java.sun.com/webservices/docs/1.6/tutorial/doc/XWS-SecurityIntro4.html#wp567459). The following listing shows how to include a `RequireUsernameToken` element: + +``` + + ... + + ... + +``` + +If the username token is not present, the `XwsSecurityInterceptor` returns a SOAP fault to the sender. If it is present, it fires a `PasswordValidationCallback` with a `PlainTextPasswordRequest` to the registered handlers. Within Spring-WS, there are three classes that handle this particular callback. + +* [`SimplePasswordValidationCallbackHandler`](#security-simple-password-validation-callback-handler) + +* [`SpringPlainTextPasswordValidationCallbackHandler`](#using-springplaintextpasswordvalidationcallbackhandler) + +* [`JaasPlainTextPasswordValidationCallbackHandler`](#using-jaasplaintextpasswordvalidationcallbackhandler) + +###### Using `SimplePasswordValidationCallbackHandler` + +The simplest password validation handler is the `SimplePasswordValidationCallbackHandler`. This handler validates passwords against an in-memory `Properties` object, which you can specify byusing the `users` property: + +``` + + + + Ernie + + + +``` + +In this case, we are allowing only the user, "Bert", to log in by using the password, "Ernie". + +###### Using `SpringPlainTextPasswordValidationCallbackHandler` + +The `SpringPlainTextPasswordValidationCallbackHandler` uses [Spring Security](https://spring.io/projects/spring-security) to authenticate users. It is beyond the scope of this document to describe Spring Security, but it is a full-fledged security framework. You can read more about it in the [Spring Security reference documentation](https://docs.spring.io/spring-security/site/docs/current/reference/htmlsingle/). + +The `SpringPlainTextPasswordValidationCallbackHandler` requires an `AuthenticationManager` to operate. It uses this manager to authenticate against a `UsernamePasswordAuthenticationToken` that it creates. If authentication is successful, the token is stored in the `SecurityContextHolder`. You can set the authentication manager by using the `authenticationManager` property: + +``` + + + + + + + + + + + + + + + ... + +``` + +###### Using `JaasPlainTextPasswordValidationCallbackHandler` + +The `JaasPlainTextPasswordValidationCallbackHandler` is based on the standard [Java Authentication and Authorization Service](http://java.sun.com/products/jaas/). It is beyond the scope of this document to provide a full introduction into JAAS, but a [good tutorial](http://www.javaworld.com/javaworld/jw-09-2002/jw-0913-jaas.html) is available. + +The `JaasPlainTextPasswordValidationCallbackHandler` requires only a `loginContextName` to operate. It creates a new JAAS `LoginContext` by using this name and handles the standard JAAS `NameCallback` and `PasswordCallback` by using the username and password provided in the SOAP message. This means that this callback handler integrates with any JAAS `LoginModule` that fires these callbacks during the `login()` phase, which is standard behavior. + +You can wire up a `JaasPlainTextPasswordValidationCallbackHandler` as follows: + +``` + + + +``` + +In this case, the callback handler uses the `LoginContext` named `MyLoginModule`. This module should be defined in your `jaas.config` file, as explained in the [tutorial mentioned earlier](http://www.javaworld.com/javaworld/jw-09-2002/jw-0913-jaas.html). + +##### Digest Username Authentication + +When using password digests, the SOAP message also contains a `UsernameToken` element, which itself contains a `Username` element and a `Password` element. The difference is that the password is not sent as plain text, but as a digest. The recipient compares this digest to the digest he calculated from the known password of the user, and, if they are the same, the user is authenticated. This method is comparable to the digest authentication provided by HTTP servers. + +To require that every incoming message contains a `UsernameToken` element with a password digest, the security policy file should contain a `RequireUsernameToken` element, with the `passwordDigestRequired` attribute set to `true`. Additionally, the `nonceRequired` attribute should be set to `true`: You can find a reference of possible child elements [here](http://java.sun.com/webservices/docs/1.6/tutorial/doc/XWS-SecurityIntro4.html#wp567459). The following listing shows how to define a `RequireUsernameToken` element: + +``` + + ... + + ... + +``` + +If the username token is not present, the `XwsSecurityInterceptor` returns a SOAP fault to the sender. If it is present, it fires a `PasswordValidationCallback` with a `DigestPasswordRequest` to the registered handlers. Within Spring-WS, two classes handle this particular callback: `SimplePasswordValidationCallbackHandler` and `SpringDigestPasswordValidationCallbackHandler`. + +###### Using `SimplePasswordValidationCallbackHandler` + +The `SimplePasswordValidationCallbackHandler` can handle both plain text passwords as well as password digests. It is described in [Using `SimplePasswordValidationCallbackHandler`](#security-simple-password-validation-callback-handler). + +###### Using `SpringDigestPasswordValidationCallbackHandler` + +The `SpringDigestPasswordValidationCallbackHandler` requires a Spring Security `UserDetailService` to operate. It uses this service to retrieve the password of the user specified in the token. The digest of the password contained in this details object is then compared with the digest in the message. If they are equal, the user has successfully authenticated, and a `UsernamePasswordAuthenticationToken` is stored in the `SecurityContextHolder`. You can set the service by using the `userDetailsService` property. Additionally, you can set a `userCache` property, to cache loaded user details. The following example shows how to do so: + +``` + + + + + + + ... + +``` + +##### Certificate Authentication + +A more secure way of authentication uses X509 certificates. In this scenario, the SOAP message contains a`BinarySecurityToken`, which contains a Base 64-encoded version of a X509 certificate. The certificate is used by the recipient to authenticate. The certificate stored in the message is also used to sign the message (see [Verifying Signatures](#security-verifying-signatures)). + +To make sure that all incoming SOAP messages carry a`BinarySecurityToken`, the security policy file should contain a `RequireSignature` element. This element can further carry other elements, which are covered in [Verifying Signatures](#security-verifying-signatures). You can find a reference of possible child elements [here](http://java.sun.com/webservices/docs/1.6/tutorial/doc/XWS-SecurityIntro4.html#wp565769). The following listing shows how to define a `RequireSignature` element: + +``` + + ... + + ... + +``` + +When a message arrives that carries no certificate, the `XwsSecurityInterceptor` returns a SOAP fault to the sender. If it is present, it fires a `CertificateValidationCallback`. Three handlers within Spring-WS handle this callback for authentication purposes: + +* [`KeyStoreCallbackHandler`](#using-keystorecallbackhandler) + +* [`SpringCertificateValidationCallbackHandler`](#using-springcertificatevalidationcallbackhandler) + +* [`JaasCertificateValidationCallbackHandler`](#using-jaascertificatevalidationcallbackhandler) + +| |In most cases, certificate authentication should be preceded by certificate validation, since you want to authenticate against only valid certificates. Invalid certificates, such as certificates for which the expiration date has passed or which are not in your store of trusted certificates, should be ignored.

In Spring-WS terms, this means that the `SpringCertificateValidationCallbackHandler` or `JaasCertificateValidationCallbackHandler` should be preceded by `KeyStoreCallbackHandler`. This can be accomplished by setting the order of the `callbackHandlers` property in the configuration of the `XwsSecurityInterceptor`:

```
class="org.springframework.ws.soap.security.xwss.XwsSecurityInterceptor">








```

Using this setup, the interceptor first determines if the certificate in the message is valid buusing the keystore and then authenticating against it.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + +###### Using `KeyStoreCallbackHandler` + +The `KeyStoreCallbackHandler` uses a standard Java keystore to validate certificates. This certificate validation process consists of the following steps: . + +1. The handler checks whether the certificate is in the private `keyStore`. If it is, it is valid. + +2. If the certificate is not in the private keystore, the handler checks whether the current date and time are within the validity period given in the certificate. If they are not, the certificate is invalid. If it is, it continues with the final step. + +3. A certification path for the certificate is created. This basically means that the handler determines whether the certificate has been issued by any of the certificate authorities in the `trustStore`. If a certification path can be built successfully, the certificate is valid. Otherwise, the certificate is not valid. + +To use the `KeyStoreCallbackHandler` for certificate validation purposes, you most likely need to set only the `trustStore` property: + +``` + + + + + + + + + + +``` + +Using the setup shown in the preceding example, the certificate that is to be validated must be in the trust store itself or the trust store must contain a certificate authority that issued the certificate. + +###### Using `SpringCertificateValidationCallbackHandler` + +The `SpringCertificateValidationCallbackHandler` requires an Spring Security `AuthenticationManager` to operate. It uses this manager to authenticate against a `X509AuthenticationToken` that it creates. The configured authentication manager is expected to supply a provider that can handle this token (usually an instance of `X509AuthenticationProvider`). If authentication is successful, the token is stored in the `SecurityContextHolder`. You can set the authentication manager by using the `authenticationManager` property: + +``` + + + + + + + + + + + + + + + + + + + ... + +``` + +In this case, we use a custom user details service to obtain authentication details based on the certificate. See the [Spring Security reference documentation](http://www.springframework.org/security) for more information about authentication against X509 certificates. + +###### Using `JaasCertificateValidationCallbackHandler` + +The `JaasCertificateValidationCallbackHandler` requires a `loginContextName` to operate. It creates a new JAAS `LoginContext` by using this name and the `X500Principal` of the certificate. This means that this callback handler integrates with any JAAS `LoginModule` that handles X500 principals. + +You can wire up a `JaasCertificateValidationCallbackHandler` as follows: + +``` + + MyLoginModule + +``` + +In this case, the callback handler uses the `LoginContext` named `MyLoginModule`. This module should be defined in your `jaas.config` file and should be able to authenticate against X500 principals. + +#### 7.1.3. Digital Signatures + +The digital signature of a message is a piece of information based on both the document and the signer’s private key. Two main tasks are related to signatures in WS-Security: verifying signatures and signing messages. + +##### Verifying Signatures + +As with [certificate-based authentication](#security-certificate-authentication), a signed message contains a `BinarySecurityToken`, which contains the certificate used to sign the message. Additionally, it contains a `SignedInfo` block, which indicates what part of the message was signed. + +To make sure that all incoming SOAP messages carry a `BinarySecurityToken`, the security policy file should contain a `RequireSignature` element. It can also contain a `SignatureTarget` element, which specifies the target message part that was expected to be signed and various other subelements. You can also define the private key alias to use, whether to use a symmetric instead of a private key, and many other properties. You can find a reference of possible child elements [here](http://java.sun.com/webservices/docs/1.6/tutorial/doc/XWS-SecurityIntro4.html#wp565769). The following listing configures a `RequireSignature` element: + +``` + + + +``` + +If the signature is not present, the `XwsSecurityInterceptor` returns a SOAP fault to the sender. If it is present, it fires a `SignatureVerificationKeyCallback` to the registered handlers. Within Spring-WS, one class handles this particular callback: `KeyStoreCallbackHandler`. + +###### Using `KeyStoreCallbackHandler` + +As described in [KeyStoreCallbackHandler](#security-key-store-callback-handler), `KeyStoreCallbackHandler` uses a `java.security.KeyStore` for handling various cryptographic callbacks, including signature verification. For signature verification, the handler uses the `trustStore` property: + +``` + + + + + + + + + + +``` + +##### Signing Messages + +When signing a message, the `XwsSecurityInterceptor` adds the `BinarySecurityToken` to the message. It also adds a `SignedInfo` block, which indicates what part of the message was signed. + +To sign all outgoing SOAP messages, the security policy file should contain a `Sign` element. It can also contain a `SignatureTarget` element, which specifies the target message part that was expected to be signed and various other subelements. You can also define the private key alias to use, whether to use a symmetric instead of a private key, and many other properties. You can find a reference of possible child elements [here](http://java.sun.com/webservices/docs/1.6/tutorial/doc/XWS-SecurityIntro4.html#wp565497). The following example includes a `Sign` element: + +``` + + + +``` + +The `XwsSecurityInterceptor` fires a `SignatureKeyCallback` to the registered handlers. Within Spring-WS, the `KeyStoreCallbackHandler` class handles this particular callback. + +###### Using `KeyStoreCallbackHandler` + +As described in [KeyStoreCallbackHandler](#security-key-store-callback-handler), the `KeyStoreCallbackHandler` uses a `java.security.KeyStore` to handle various cryptographic callbacks, including signing messages. For adding signatures, the handler uses the `keyStore` property. Additionally, you must set the `privateKeyPassword` property to unlock the private key used for signing. The following example uses a `KeyStoreCallbackHandler`: + +``` + + + + + + + + + + + +``` + +#### 7.1.4. Decryption and Encryption + +When encrypting, the message is transformed into a form that can be read only with the appropriate key. The message can be decrypted to reveal the original, readable message. + +##### Decryption + +To decrypt incoming SOAP messages, the security policy file should contain a `RequireEncryption` element. This element can further carry a `EncryptionTarget` element that indicates which part of the message should be encrypted and a `SymmetricKey` to indicate that a shared secret instead of the regular private key should be used to decrypt the message. You can read a description of the other elements [here](http://java.sun.com/webservices/docs/1.6/tutorial/doc/XWS-SecurityIntro4.html#wp565951). The following example uses a `RequireEncryption` element: + +``` + + + +``` + +If an incoming message is not encrypted, the `XwsSecurityInterceptor` returns a SOAP ault to the sender. If it is present, it fires a `DecryptionKeyCallback` to the registered handlers. Within Spring-WS, the `KeyStoreCallbackHandler` class handles this particular callback. + +###### Using `KeyStoreCallbackHandler` + +As described in [KeyStoreCallbackHandler](#security-key-store-callback-handler), the `KeyStoreCallbackHandler` uses a `java.security.KeyStore` to handle various cryptographic callbacks, including decryption. For decryption, the handler uses the `keyStore` property. Additionally, you must set the `privateKeyPassword` property to unlock the private key used for decryption. For decryption based on symmetric keys, it uses the `symmetricStore`. The following example uses `KeyStoreCallbackHandler`: + +``` + + + + + + + + + + + +``` + +##### Encryption + +To encrypt outgoing SOAP messages, the security policy file should contain an `Encrypt` element. This element can further carry a `EncryptionTarget` element that indicates which part of the message should be encrypted and a `SymmetricKey` to indicate that a shared secret instead of the regular public key should be used to encrypt the message. You can read a description of the other elements [here](http://java.sun.com/webservices/docs/1.6/tutorial/doc/XWS-SecurityIntro4.html#wp565951). The following example uses an `Encrypt` element: + +``` + + + +``` + +The `XwsSecurityInterceptor` fires an `EncryptionKeyCallback` to the registered handlers to retrieve the encryption information. Within Spring-WS, the `KeyStoreCallbackHandler` class handles this particular callback. + +###### Using `KeyStoreCallbackHandler` + +As described in [KeyStoreCallbackHandler](#security-key-store-callback-handler), the `KeyStoreCallbackHandler` uses a `java.security.KeyStore` to handle various cryptographic callbacks, including encryption. For encryption based on public keys, the handler uses the `trustStore` property. For encryption based on symmetric keys, it uses `symmetricStore`. The following example uses `KeyStoreCallbackHandler`: + +``` + + + + + + + + + + +``` + +#### 7.1.5. Security Exception Handling + +When a securement or validation action fails, the `XwsSecurityInterceptor` throws a `WsSecuritySecurementException` or `WsSecurityValidationException` respectively. These exceptions bypass the [standard exception handling mechanism](#server-endpoint-exception-resolver) but are handled by the interceptor itself. + +`WsSecuritySecurementException` exceptions are handled by the `handleSecurementException` method of the `XwsSecurityInterceptor`. By default, this method logs an error and stops further processing of the message. + +Similarly, `WsSecurityValidationException` exceptions are handled by the `handleValidationException` method of the `XwsSecurityInterceptor`. By default, this method creates a SOAP 1.1 Client or SOAP 1.2 sender fault and sends that back as a response. + +| |Both `handleSecurementException` and `handleValidationException` are protected methods, which you can override to change their default behavior.| +|---|------------------------------------------------------------------------------------------------------------------------------------------------| + +### 7.2. Using `Wss4jSecurityInterceptor` + +The `Wss4jSecurityInterceptor` is an `EndpointInterceptor` (see [Intercepting Requests — the `EndpointInterceptor` Interface](#server-endpoint-interceptor)) that is based on [Apache’s WSS4J](https://ws.apache.org/wss4j/). + +WSS4J implements the following standards: + +* OASIS Web Services Security: SOAP Message Security 1.0 Standard 200401, March 2004 + +* Username Token profile V1.0 + +* X.509 Token Profile V1.0 + +This interceptor supports messages created by the `AxiomSoapMessageFactory` and the `SaajSoapMessageFactory`. + +#### 7.2.1. Configuring `Wss4jSecurityInterceptor` + +WSS4J uses no external configuration file. The interceptor is entirely configured by properties. The validation and securement actions invoked by this interceptor are specified via `validationActions` and `securementActions` properties, respectively. Actions are passed as a space-separated strings. The following listing shows an example configuration: + +``` + + + ... + + ... + +``` + +The following table shows the available validation actions: + +|Validation action| Description | +|-----------------|------------------------| +| `UsernameToken` |Validates username token| +| `Timestamp` |Validates the timestamp | +| `Encrypt` | Decrypts the message | +| `Signature` |Validates the signature | +| `NoSecurity` | No action performed | + +The following table shows the available securement actions: + +| Securement action | Description | +|------------------------|---------------------------------------------------------------| +| `UsernameToken` | Adds a username token | +|`UsernameTokenSignature`|Adds a username token and a signature username token secret key| +| `Timestamp` | Adds a timestamp | +| `Encrypt` | Encrypts the response | +| `Signature` | Signs the response | +| `NoSecurity` | No action performed | + +The order of the actions is significant and is enforced by the interceptor. If its security actions were performed in a different order than the one specified by`validationActions`, the interceptor rejects an incoming SOAP message. + +#### 7.2.2. Handling Digital Certificates + +For cryptographic operations that require interaction with a keystore or certificate handling (signature, encryption, and decryption operations), WSS4J requires an instance of`org.apache.ws.security.components.crypto.Crypto`. + +`Crypto` instances can be obtained from WSS4J’s `CryptoFactory` or more conveniently with the Spring-WS`CryptoFactoryBean`. + +##### CryptoFactoryBean + +Spring-WS provides a convenient factory bean, `CryptoFactoryBean`, that constructs and configures `Crypto` instances through strongly typed properties (preferred) or through a `Properties` object. + +By default, `CryptoFactoryBean` returns instances of `org.apache.ws.security.components.crypto.Merlin`. You can change this by setting the `cryptoProvider` property (or its equivalent `org.apache.ws.security.crypto.provider` string property). + +The following example configuration uses `CryptoFactoryBean`: + +``` + + + + +``` + +#### 7.2.3. Authentication + +This section addresses how to do authentication with `Wss4jSecurityInterceptor`. + +##### Validating Username Token + +Spring-WS provides a set of callback handlers to integrate with Spring Security. Additionally, a simple callback handler, `SimplePasswordValidationCallbackHandler`, is provided to configure users and passwords with an in-memory `Properties` object. + +Callback handlers are configured through the `validationCallbackHandler` of the `Wss4jSecurityInterceptor` property. + +###### Using `SimplePasswordValidationCallbackHandler` + +`SimplePasswordValidationCallbackHandler` validates plain text and digest username tokens against an in-memory `Properties` object. You can configure it as follows: + +``` + + + + Ernie + + + +``` + +###### Using `SpringSecurityPasswordValidationCallbackHandler` + +The `SpringSecurityPasswordValidationCallbackHandler` validates plain text and digest passwords by using a Spring Security `UserDetailService` to operate. It uses this service to retrieve the the password (or a digest of the password) of the user specified in the token. The password (or a digest of the password) contained in this details object is then compared with the digest in the message. If they are equal, the user has successfully authenticated, and a `UsernamePasswordAuthenticationToken` is stored in the`SecurityContextHolder`. You can set the service by using the `userDetailsService`. Additionally, you can set a `userCache` property, to cache loaded user details, as follows: + +``` + + + + + + + ... + +``` + +##### Adding Username Token + +Adding a username token to an outgoing message is as simple as adding `UsernameToken` to the `securementActions` property of the `Wss4jSecurityInterceptor` and specifying `securementUsername` and`securementPassword`. + +The password type can be set by setting the `securementPasswordType` property. Possible values are `PasswordText` for plain text passwords or `PasswordDigest` for digest passwords, which is the default. + +The following example generates a username token with a digest password: + +``` + + + + + +``` + +If the plain text password type is chosen, it is possible to instruct the interceptor to add `Nonce` and `Created` elements by setting the `securementUsernameTokenElements` property. The value must be a list that contains the desired elements' names separated by spaces (case sensitive). + +The following example generates a username token with a plain text password, a `Nonce`, and a `Created` element: + +``` + + + + + + + +``` + +##### Certificate Authentication + +As certificate authentication is akin to digital signatures, WSS4J handles it as part of the signature validation and securement. Specifically, the `securementSignatureKeyIdentifier` property must be set to `DirectReference` in order to instruct WSS4J to generate a `BinarySecurityToken` element containing the X509 certificate and to include it in the outgoing message. The certificate’s name and password are passed through the `securementUsername` and `securementPassword` properties, respectively, as the following example shows: + +``` + + + + + + + + + + + + +``` + +For the certificate validation, regular signature validation applies: + +``` + + + + + + + + + +``` + +At the end of the validation, the interceptor automatically verifies the validity of the certificate by delegating to the default WSS4J implementation. If needed, you can change this behavior by redefining the `verifyCertificateTrust` method. + +For more detail, see to [Digital Signatures](#security-wss4j-digital-signatures). + +#### 7.2.4. Security Timestamps + +This section describes the various timestamp options available in the `Wss4jSecurityInterceptor`. + +##### Validating Timestamps + +To validate timestamps, add `Timestamp` to the `validationActions` property. You can override timestamp semantics specified by the initiator of the SOAP message by setting `timestampStrict` to `true` and specifying a server-side time-to-live in seconds (default: 300) by setting the `timeToLive` property. The interceptor always rejects already expired timestamps, whatever the value of `timeToLive` is. + +In the following example, the interceptor limits the timestamp validity window to 10 seconds, rejecting any valid timestamp token outside that window: + +``` + + + + + +``` + +##### Adding Timestamps + +Adding `Timestamp` to the `securementActions` property generates a timestamp header in outgoing messages. The `timestampPrecisionInMilliseconds` property specifies whether the precision of the generated timestamp is in milliseconds. The default value is `true`. The following listing adds a timestamp: + +``` + + + + +``` + +#### 7.2.5. Digital Signatures + +This section describes the various signature options available in the `Wss4jSecurityInterceptor`. + +##### Verifying Signatures + +To instruct the `Wss4jSecurityInterceptor`, `validationActions` must contain the `Signature` action. Additionally, the `validationSignatureCrypto` property must point to the keystore containing the public certificates of the initiator: + +``` + + + + + + + + + +``` + +##### Signing Messages + +Signing outgoing messages is enabled by adding the `Signature` action to the `securementActions`. The alias and the password of the private key to use are specified by the `securementUsername` and `securementPassword` properties, respectively. `securementSignatureCrypto` must point to the keystore that contains the private key: + +``` + + + + + + + + + + + +``` + +Furthermore, you can define the signature algorithm by setting the `securementSignatureAlgorithm` property. + +You can customize the key identifier type to use by setting the `securementSignatureKeyIdentifier` property. Only `IssuerSerial` and `DirectReference` are valid for the signature. + +The `securementSignatureParts` property controls which part of the message is signed. The value of this property is a list of semicolon-separated element names that identify the elements to sign. The general form of a signature part is `{}{namespace}Element`. Note that the first empty brackets are used for encryption parts only. The default behavior is to sign the SOAP body. + +The following example shows how to sign the `echoResponse` element in the Spring Web Services echo sample: + +``` + +``` + +To specify an element without a namespace, use the string, `Null` (case sensitive), as the namespace name. + +If no other element in the request has a local name of `Body`, the SOAP namespace identifier can be empty (`{}`). + +##### Signature Confirmation + +Signature confirmation is enabled by setting `enableSignatureConfirmation` to `true`. Note that the signature confirmation action spans over the request and the response. This implies that `secureResponse` and `validateRequest` must be set to `true` (which is the default value) even if there are no corresponding security actions. The following example sets the `enableSignatureConfirmation` property to `true`: + +``` + + + + + + + + + + +``` + +#### 7.2.6. Decryption and Encryption + +This section describes the various decryption and encryption options available in the `Wss4jSecurityInterceptor`. + +##### Decryption + +Decryption of incoming SOAP messages requires that the `Encrypt` action be added to the `validationActions` property. The rest of the configuration depends on the key information that appears in the message. (This is because WSS4J needs only a Crypto for encypted keys, whereas embedded key name validation is delegated to a callback handler.) + +To decrypt messages with an embedded encrypted symmetric key (the `xenc:EncryptedKey` element), `validationDecryptionCrypto` needs to point to a keystore that contains the decryption private key. Additionally, `validationCallbackHandler` has to be injected with a `org.springframework.ws.soap.security.wss4j.callback.KeyStoreCallbackHandler` that specifies the key’s password: + +``` + + + + + + + + + + + + + + +``` + +To support decryption of messages with an embedded key name ( `ds:KeyName` element), you can configure a `KeyStoreCallbackHandler` that points to the keystore with the symmetric secret key. The `symmetricKeyPassword` property indicates the key’s password, the key name being the one specified by `ds:KeyName` element: + +``` + + + + + + + + + + + + + + + +``` + +##### Encryption + +Adding `Encrypt` to the `securementActions` enables encryption of outgoing messages. You can set the certificate’s alias to use for the encryption by setting the `securementEncryptionUser` property. The keystore where the certificate resides is accessed through the `securementEncryptionCrypto` property. As encryption relies on public certificates, no password needs to be passed. The following example uses the `securementEncryptionCrypto` property: + +``` + + + + + + + + + + +``` + +You can customize encryption in several ways: The key identifier type to use is defined by the `securementEncryptionKeyIdentifier` property. Possible values are `IssuerSerial`,`X509KeyIdentifier`, `DirectReference`,`Thumbprint`, `SKIKeyIdentifier`, and `EmbeddedKeyName`. + +If you choose the `EmbeddedKeyName` type, you need to specify the secret key to use for the encryption. The alias of the key is set in the `securementEncryptionUser` property, as for the other key identifier types. However, WSS4J requires a callback handler to fetch the secret key. Thus, you must provide `securementCallbackHandler` with a `KeyStoreCallbackHandler` that points to the appropriate keystore. By default, the `ds:KeyName` element in the resulting WS-Security header takes the value of the `securementEncryptionUser` property. To indicate a different name, you can set the `securementEncryptionEmbeddedKeyName` with the desired value. In the next example, the outgoing message is encrypted with a key aliased `secretKey`, whereas `myKey` appears in `ds:KeyName` element: + +``` + + + + + + + + + + + + + + + + + + +``` + +The `securementEncryptionKeyTransportAlgorithm` property defines which algorithm to use to encrypt the generated symmetric key. Supported values are `[http://www.w3.org/2001/04/xmlenc#rsa-1_5](https://www.w3.org/2001/04/xmlenc#rsa-1_5)`, which is the default, and `[http://www.w3.org/2001/04/xmlenc#rsa-oaep-mgf1p](https://www.w3.org/2001/04/xmlenc#rsa-oaep-mgf1p)`. + +You can set the symmetric encryption algorithm to use by setting the `securementEncryptionSymAlgorithm` property. Supported values are `[http://www.w3.org/2001/04/xmlenc#aes128-cbc](https://www.w3.org/2001/04/xmlenc#aes128-cbc)` (default), `[http://www.w3.org/2001/04/xmlenc#tripledes-cbc](https://www.w3.org/2001/04/xmlenc#tripledes-cbc)`, `[http://www.w3.org/2001/04/xmlenc#aes256-cbc](https://www.w3.org/2001/04/xmlenc#aes256-cbc)`, and `[http://www.w3.org/2001/04/xmlenc#aes192-cbc](https://www.w3.org/2001/04/xmlenc#aes192-cbc)`. + +Finally, the `securementEncryptionParts` property defines which parts of the message are encrypted. The value of this property is a list of semicolon-separated element names that identify the elements to encrypt. An encryption mode specifier and a namespace identification, each inside a pair of curly brackets, may precede each element name. The encryption mode specifier is either `{Content}` or `{Element}` See the W3C XML Encryption specification about the differences between Element and Content encryption. The following example identifies the `echoResponse` from the echo sample: + +``` + +``` + +Be aware that the element name, the namespace identifier, and the encryption modifier are case-sensitive. You can omit the encryption modifier and the namespace identifier. If you do, the encryption mode defaults to `Content`, and the namespace is set to the SOAP namespace. + +To specify an element without a namespace, use the value, `Null` (case sensitive), as the namespace name. If no list is specified, the handler encrypts the SOAP Body in `Content` mode by default. + +#### 7.2.7. Security Exception Handling + +The exception handling of the `Wss4jSecurityInterceptor` is identical to that of the `XwsSecurityInterceptor`. See [Security Exception Handling](#security-xws-exception-handling) for more information. + +# III. Other Resources + +In addition to this reference documentation, a number of other resources may help you learn how to use Spring Web Services. These additional, third-party resources are enumerated in this section. + +## Bibliography + +* [waldo-94] Jim Waldo, Ann Wollrath, and Sam Kendall. *A Note on Distributed Computing*. Springer Verlag. 1994 + +* [alpine] Steve Loughran & Edmund Smith. *Rethinking the Java SOAP Stack*. May 17, 2005. © 2005 IEEE Telephone Laboratories, Inc. + +* [effective-enterprise-java] Ted Neward. Scott Meyers. *Effective Enterprise Java*. Addison-Wesley. 2004 + +* [effective-xml] Elliotte Rusty Harold. Scott Meyers. *Effective XML*. Addison-Wesley. 2004 \ No newline at end of file diff --git a/docs/spring-amqp/spring-amqp.md b/docs/spring-amqp/spring-amqp.md index ce8eca66fc30c246c4b033aadbf173741cfbfb9b..61bd42ea6d5e1ea0d08f5d577d0bb1457e090ea0 100644 --- a/docs/spring-amqp/spring-amqp.md +++ b/docs/spring-amqp/spring-amqp.md @@ -799,7 +799,7 @@ public interface ChannelListener { 还包括`cacheMode`属性(`CHANNEL`或`CONNECTION`)。 -![cacheStats](images/cacheStats.png) +![cacheStats](https://docs.spring.io/spring-amqp/docs/current/reference/html/images/cacheStats.png) 图 1.JVisualVM 示例 @@ -4382,7 +4382,7 @@ public RabbitTransactionManager rabbitTransactionManager() { | | | | | | | | | -| (group) |这仅在使用名称空间时可用。
当指定时,类型`Collection`的 Bean 被注册为该名称,并且每个
元素的
容器被添加到集合中。
例如,这允许,通过迭代集合来启动和停止容器组。
如果多个``元素具有相同的组值,集合形式中的容器
是如此指定的所有容器的集合。|![tickmark](images/tickmark.png)|![tickmark](images/tickmark.png)| | +| (group) |这仅在使用名称空间时可用。
当指定时,类型`Collection`的 Bean 被注册为该名称,并且每个
元素的
容器被添加到集合中。
例如,这允许,通过迭代集合来启动和停止容器组。
如果多个``元素具有相同的组值,集合形式中的容器
是如此指定的所有容器的集合。|![tickmark](https://docs.spring.io/spring-amqp/docs/current/reference/html/images/tickmark.png)|![tickmark](https://docs.spring.io/spring-amqp/docs/current/reference/html/images/tickmark.png)| | | | | | | | | | | | diff --git a/docs/spring-hateoas/spring-hateoas.md b/docs/spring-hateoas/spring-hateoas.md index f8ff1091fe65701d88060b9c79b321374e3502a7..7f6ef1162606ed141ef22139595dce33c414c5a8 100644 --- a/docs/spring-hateoas/spring-hateoas.md +++ b/docs/spring-hateoas/spring-hateoas.md @@ -159,7 +159,7 @@ Spring 为了方便地创建富含超媒体的表示,Hateoas 提供了一组 例 7.`RepresentationModel`类层次结构 -diagram classes +diagram classes 使用`RepresentationModel`的默认方法是创建它的一个子类,以包含表示应该包含的所有属性,创建该类的实例,填充属性并用链接丰富它。